diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go new file mode 100644 index 0000000000000000000000000000000000000000..843398d3b0a65829953d8170c679c710f427a5d9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go @@ -0,0 +1,133 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 +// +build riscv64 + +package testbranch + +import ( + "testing" +) + +func testBEQZ(a int64) (r bool) +func testBGE(a, b int64) (r bool) +func testBGEU(a, b int64) (r bool) +func testBGEZ(a int64) (r bool) +func testBGT(a, b int64) (r bool) +func testBGTU(a, b int64) (r bool) +func testBGTZ(a int64) (r bool) +func testBLE(a, b int64) (r bool) +func testBLEU(a, b int64) (r bool) +func testBLEZ(a int64) (r bool) +func testBLT(a, b int64) (r bool) +func testBLTU(a, b int64) (r bool) +func testBLTZ(a int64) (r bool) +func testBNEZ(a int64) (r bool) + +func testGoBGE(a, b int64) bool { return a >= b } +func testGoBGEU(a, b int64) bool { return uint64(a) >= uint64(b) } +func testGoBGT(a, b int64) bool { return a > b } +func testGoBGTU(a, b int64) bool { return uint64(a) > uint64(b) } +func testGoBLE(a, b int64) bool { return a <= b } +func testGoBLEU(a, b int64) bool { return uint64(a) <= uint64(b) } +func testGoBLT(a, b int64) bool { return a < b } +func testGoBLTU(a, b int64) bool { return uint64(a) < uint64(b) } + +func TestBranchCondition(t *testing.T) { + tests := []struct { + ins string + a int64 + b int64 + fn func(a, b int64) bool + goFn func(a, b int64) bool + want bool + }{ + {"BGE", 0, 1, testBGE, testGoBGE, false}, + {"BGE", 0, 0, testBGE, testGoBGE, true}, + {"BGE", 0, -1, testBGE, testGoBGE, true}, + {"BGE", -1, 0, testBGE, testGoBGE, false}, + {"BGE", 1, 0, testBGE, testGoBGE, true}, + {"BGEU", 0, 1, testBGEU, testGoBGEU, false}, + {"BGEU", 0, 0, testBGEU, testGoBGEU, true}, + {"BGEU", 0, -1, testBGEU, testGoBGEU, false}, + {"BGEU", -1, 0, testBGEU, testGoBGEU, true}, + {"BGEU", 1, 0, testBGEU, testGoBGEU, true}, + {"BGT", 0, 1, testBGT, testGoBGT, false}, + {"BGT", 0, 0, testBGT, testGoBGT, false}, + {"BGT", 0, -1, testBGT, testGoBGT, true}, + {"BGT", -1, 0, testBGT, testGoBGT, false}, + {"BGT", 1, 0, testBGT, testGoBGT, true}, + {"BGTU", 0, 1, testBGTU, testGoBGTU, false}, + {"BGTU", 0, 0, testBGTU, testGoBGTU, false}, + {"BGTU", 0, -1, testBGTU, testGoBGTU, false}, + {"BGTU", -1, 0, testBGTU, testGoBGTU, true}, + {"BGTU", 1, 0, testBGTU, testGoBGTU, true}, + {"BLE", 0, 1, testBLE, testGoBLE, true}, + {"BLE", 0, 0, testBLE, testGoBLE, true}, + {"BLE", 0, -1, testBLE, testGoBLE, false}, + {"BLE", -1, 0, testBLE, testGoBLE, true}, + {"BLE", 1, 0, testBLE, testGoBLE, false}, + {"BLEU", 0, 1, testBLEU, testGoBLEU, true}, + {"BLEU", 0, 0, testBLEU, testGoBLEU, true}, + {"BLEU", 0, -1, testBLEU, testGoBLEU, true}, + {"BLEU", -1, 0, testBLEU, testGoBLEU, false}, + {"BLEU", 1, 0, testBLEU, testGoBLEU, false}, + {"BLT", 0, 1, testBLT, testGoBLT, true}, + {"BLT", 0, 0, testBLT, testGoBLT, false}, + {"BLT", 0, -1, testBLT, testGoBLT, false}, + {"BLT", -1, 0, testBLT, testGoBLT, true}, + {"BLT", 1, 0, testBLT, testGoBLT, false}, + {"BLTU", 0, 1, testBLTU, testGoBLTU, true}, + {"BLTU", 0, 0, testBLTU, testGoBLTU, false}, + {"BLTU", 0, -1, testBLTU, testGoBLTU, true}, + {"BLTU", -1, 0, testBLTU, testGoBLTU, false}, + {"BLTU", 1, 0, testBLTU, testGoBLTU, false}, + } + for _, test := range tests { + t.Run(test.ins, func(t *testing.T) { + if got := test.fn(test.a, test.b); got != test.want { + t.Errorf("Assembly %v %v, %v = %v, want %v", test.ins, test.a, test.b, got, test.want) + } + if got := test.goFn(test.a, test.b); got != test.want { + t.Errorf("Go %v %v, %v = %v, want %v", test.ins, test.a, test.b, got, test.want) + } + }) + } +} + +func TestBranchZero(t *testing.T) { + tests := []struct { + ins string + a int64 + fn func(a int64) bool + want bool + }{ + {"BEQZ", -1, testBEQZ, false}, + {"BEQZ", 0, testBEQZ, true}, + {"BEQZ", 1, testBEQZ, false}, + {"BGEZ", -1, testBGEZ, false}, + {"BGEZ", 0, testBGEZ, true}, + {"BGEZ", 1, testBGEZ, true}, + {"BGTZ", -1, testBGTZ, false}, + {"BGTZ", 0, testBGTZ, false}, + {"BGTZ", 1, testBGTZ, true}, + {"BLEZ", -1, testBLEZ, true}, + {"BLEZ", 0, testBLEZ, true}, + {"BLEZ", 1, testBLEZ, false}, + {"BLTZ", -1, testBLTZ, true}, + {"BLTZ", 0, testBLTZ, false}, + {"BLTZ", 1, testBLTZ, false}, + {"BNEZ", -1, testBNEZ, true}, + {"BNEZ", 0, testBNEZ, false}, + {"BNEZ", 1, testBNEZ, true}, + } + for _, test := range tests { + t.Run(test.ins, func(t *testing.T) { + if got := test.fn(test.a); got != test.want { + t.Errorf("%v %v = %v, want %v", test.ins, test.a, got, test.want) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s b/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s new file mode 100644 index 0000000000000000000000000000000000000000..d7141e38c1655f269aa873997e4837fb72f0ed94 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s @@ -0,0 +1,156 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 +// +build riscv64 + +#include "textflag.h" + +// func testBEQZ(a int64) (r bool) +TEXT ·testBEQZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BEQZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET + +// func testBGE(a, b int64) (r bool) +TEXT ·testBGE(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BGE X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBGEU(a, b int64) (r bool) +TEXT ·testBGEU(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BGEU X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBGEZ(a int64) (r bool) +TEXT ·testBGEZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BGEZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET + +// func testBGT(a, b int64) (r bool) +TEXT ·testBGT(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BGT X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBGTU(a, b int64) (r bool) +TEXT ·testBGTU(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BGTU X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBGTZ(a int64) (r bool) +TEXT ·testBGTZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BGTZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET + +// func testBLE(a, b int64) (r bool) +TEXT ·testBLE(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BLE X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBLEU(a, b int64) (r bool) +TEXT ·testBLEU(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BLEU X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBLEZ(a int64) (r bool) +TEXT ·testBLEZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BLEZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET + +// func testBLT(a, b int64) (r bool) +TEXT ·testBLT(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BLT X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBLTU(a, b int64) (r bool) +TEXT ·testBLTU(SB),NOSPLIT,$0-17 + MOV a+0(FP), X5 + MOV b+8(FP), X6 + MOV $1, X7 + BLTU X5, X6, b + MOV $0, X7 +b: + MOV X7, r+16(FP) + RET + +// func testBLTZ(a int64) (r bool) +TEXT ·testBLTZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BLTZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET + +// func testBNEZ(a int64) (r bool) +TEXT ·testBNEZ(SB),NOSPLIT,$0-9 + MOV a+0(FP), X5 + MOV $1, X6 + BNEZ X5, b + MOV $0, X6 +b: + MOV X6, r+8(FP) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/rotate.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/rotate.go new file mode 100644 index 0000000000000000000000000000000000000000..5407c8df1109993f2b86f32323b2f288036b108a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/rotate.go @@ -0,0 +1,117 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "math/bits" +) + +// RotateParams represents the immediates required for a "rotate +// then ... selected bits instruction". +// +// The Start and End values are the indexes that represent +// the masked region. They are inclusive and are in big- +// endian order (bit 0 is the MSB, bit 63 is the LSB). They +// may wrap around. +// +// Some examples: +// +// Masked region | Start | End +// --------------------------+-------+---- +// 0x00_00_00_00_00_00_00_0f | 60 | 63 +// 0xf0_00_00_00_00_00_00_00 | 0 | 3 +// 0xf0_00_00_00_00_00_00_0f | 60 | 3 +// +// The Amount value represents the amount to rotate the +// input left by. Note that this rotation is performed +// before the masked region is used. +type RotateParams struct { + Start uint8 // big-endian start bit index [0..63] + End uint8 // big-endian end bit index [0..63] + Amount uint8 // amount to rotate left +} + +// NewRotateParams creates a set of parameters representing a +// rotation left by the amount provided and a selection of the bits +// between the provided start and end indexes (inclusive). +// +// The start and end indexes and the rotation amount must all +// be in the range 0-63 inclusive or this function will panic. +func NewRotateParams(start, end, amount uint8) RotateParams { + if start&^63 != 0 { + panic("start out of bounds") + } + if end&^63 != 0 { + panic("end out of bounds") + } + if amount&^63 != 0 { + panic("amount out of bounds") + } + return RotateParams{ + Start: start, + End: end, + Amount: amount, + } +} + +// RotateLeft generates a new set of parameters with the rotation amount +// increased by the given value. The selected bits are left unchanged. +func (r RotateParams) RotateLeft(amount uint8) RotateParams { + r.Amount += amount + r.Amount &= 63 + return r +} + +// OutMask provides a mask representing the selected bits. +func (r RotateParams) OutMask() uint64 { + // Note: z must be unsigned for bootstrap compiler + z := uint8(63-r.End+r.Start) & 63 // number of zero bits in mask + return bits.RotateLeft64(^uint64(0)<> 1, outMask: ^uint64(0) >> 1}, + {start: 0, end: 62, amount: 0, inMask: ^uint64(1), outMask: ^uint64(1)}, + {start: 1, end: 62, amount: 0, inMask: ^uint64(3) >> 1, outMask: ^uint64(3) >> 1}, + + // end before start, no rotation + {start: 63, end: 0, amount: 0, inMask: 1<<63 | 1, outMask: 1<<63 | 1}, + {start: 62, end: 0, amount: 0, inMask: 1<<63 | 3, outMask: 1<<63 | 3}, + {start: 63, end: 1, amount: 0, inMask: 3<<62 | 1, outMask: 3<<62 | 1}, + {start: 62, end: 1, amount: 0, inMask: 3<<62 | 3, outMask: 3<<62 | 3}, + + // rotation + {start: 32, end: 63, amount: 32, inMask: 0xffffffff00000000, outMask: 0x00000000ffffffff}, + {start: 48, end: 15, amount: 16, inMask: 0xffffffff00000000, outMask: 0xffff00000000ffff}, + {start: 0, end: 7, amount: -8 & 63, inMask: 0xff, outMask: 0xff << 56}, + } + for i, test := range tests { + r := NewRotateParams(test.start, test.end, test.amount) + if m := r.OutMask(); m != test.outMask { + t.Errorf("out mask %v: want %#x, got %#x", i, test.outMask, m) + } + if m := r.InMask(); m != test.inMask { + t.Errorf("in mask %v: want %#x, got %#x", i, test.inMask, m) + } + } +} + +func TestRotateParamsMerge(t *testing.T) { + tests := []struct { + // inputs + src RotateParams + mask uint64 + + // results + in *RotateParams + out *RotateParams + }{ + { + src: RotateParams{Start: 48, End: 15, Amount: 16}, + mask: 0xffffffffffffffff, + in: &RotateParams{Start: 48, End: 15, Amount: 16}, + out: &RotateParams{Start: 48, End: 15, Amount: 16}, + }, + { + src: RotateParams{Start: 16, End: 47, Amount: 0}, + mask: 0x00000000ffffffff, + in: &RotateParams{Start: 32, End: 47, Amount: 0}, + out: &RotateParams{Start: 32, End: 47, Amount: 0}, + }, + { + src: RotateParams{Start: 16, End: 47, Amount: 0}, + mask: 0xffff00000000ffff, + in: nil, + out: nil, + }, + { + src: RotateParams{Start: 0, End: 63, Amount: 0}, + mask: 0xf7f0000000000000, + in: nil, + out: nil, + }, + { + src: RotateParams{Start: 0, End: 63, Amount: 1}, + mask: 0x000000000000ff00, + in: &RotateParams{Start: 47, End: 54, Amount: 1}, + out: &RotateParams{Start: 48, End: 55, Amount: 1}, + }, + { + src: RotateParams{Start: 32, End: 63, Amount: 32}, + mask: 0xffff00000000ffff, + in: &RotateParams{Start: 32, End: 47, Amount: 32}, + out: &RotateParams{Start: 48, End: 63, Amount: 32}, + }, + { + src: RotateParams{Start: 0, End: 31, Amount: 32}, + mask: 0x8000000000000000, + in: nil, + out: &RotateParams{Start: 0, End: 0, Amount: 32}, + }, + { + src: RotateParams{Start: 0, End: 31, Amount: 32}, + mask: 0x0000000080000000, + in: &RotateParams{Start: 0, End: 0, Amount: 32}, + out: nil, + }, + } + + eq := func(x, y *RotateParams) bool { + if x == nil && y == nil { + return true + } + if x == nil || y == nil { + return false + } + return *x == *y + } + + for _, test := range tests { + if r := test.src.InMerge(test.mask); !eq(r, test.in) { + t.Errorf("%v merged with %#x (input): want %v, got %v", test.src, test.mask, test.in, r) + } + if r := test.src.OutMerge(test.mask); !eq(r, test.out) { + t.Errorf("%v merged with %#x (output): want %v, got %v", test.src, test.mask, test.out, r) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/vector.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/vector.go new file mode 100644 index 0000000000000000000000000000000000000000..00f578340f9db69bc43451d23a074c48af0c93af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/s390x/vector.go @@ -0,0 +1,1069 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/internal/obj" +) + +// This file contains utility functions for use when +// assembling vector instructions. + +// vop returns the opcode, element size and condition +// setting for the given (possibly extended) mnemonic. +func vop(as obj.As) (opcode, es, cs uint32) { + switch as { + default: + return 0, 0, 0 + case AVA: + return op_VA, 0, 0 + case AVAB: + return op_VA, 0, 0 + case AVAH: + return op_VA, 1, 0 + case AVAF: + return op_VA, 2, 0 + case AVAG: + return op_VA, 3, 0 + case AVAQ: + return op_VA, 4, 0 + case AVACC: + return op_VACC, 0, 0 + case AVACCB: + return op_VACC, 0, 0 + case AVACCH: + return op_VACC, 1, 0 + case AVACCF: + return op_VACC, 2, 0 + case AVACCG: + return op_VACC, 3, 0 + case AVACCQ: + return op_VACC, 4, 0 + case AVAC: + return op_VAC, 0, 0 + case AVACQ: + return op_VAC, 4, 0 + case AVMSLG, AVMSLEG, AVMSLOG, AVMSLEOG: + return op_VMSL, 3, 0 + case AVACCC: + return op_VACCC, 0, 0 + case AVACCCQ: + return op_VACCC, 4, 0 + case AVN: + return op_VN, 0, 0 + case AVNC: + return op_VNC, 0, 0 + case AVAVG: + return op_VAVG, 0, 0 + case AVAVGB: + return op_VAVG, 0, 0 + case AVAVGH: + return op_VAVG, 1, 0 + case AVAVGF: + return op_VAVG, 2, 0 + case AVAVGG: + return op_VAVG, 3, 0 + case AVAVGL: + return op_VAVGL, 0, 0 + case AVAVGLB: + return op_VAVGL, 0, 0 + case AVAVGLH: + return op_VAVGL, 1, 0 + case AVAVGLF: + return op_VAVGL, 2, 0 + case AVAVGLG: + return op_VAVGL, 3, 0 + case AVCKSM: + return op_VCKSM, 0, 0 + case AVCEQ: + return op_VCEQ, 0, 0 + case AVCEQB: + return op_VCEQ, 0, 0 + case AVCEQH: + return op_VCEQ, 1, 0 + case AVCEQF: + return op_VCEQ, 2, 0 + case AVCEQG: + return op_VCEQ, 3, 0 + case AVCEQBS: + return op_VCEQ, 0, 1 + case AVCEQHS: + return op_VCEQ, 1, 1 + case AVCEQFS: + return op_VCEQ, 2, 1 + case AVCEQGS: + return op_VCEQ, 3, 1 + case AVCH: + return op_VCH, 0, 0 + case AVCHB: + return op_VCH, 0, 0 + case AVCHH: + return op_VCH, 1, 0 + case AVCHF: + return op_VCH, 2, 0 + case AVCHG: + return op_VCH, 3, 0 + case AVCHBS: + return op_VCH, 0, 1 + case AVCHHS: + return op_VCH, 1, 1 + case AVCHFS: + return op_VCH, 2, 1 + case AVCHGS: + return op_VCH, 3, 1 + case AVCHL: + return op_VCHL, 0, 0 + case AVCHLB: + return op_VCHL, 0, 0 + case AVCHLH: + return op_VCHL, 1, 0 + case AVCHLF: + return op_VCHL, 2, 0 + case AVCHLG: + return op_VCHL, 3, 0 + case AVCHLBS: + return op_VCHL, 0, 1 + case AVCHLHS: + return op_VCHL, 1, 1 + case AVCHLFS: + return op_VCHL, 2, 1 + case AVCHLGS: + return op_VCHL, 3, 1 + case AVCLZ: + return op_VCLZ, 0, 0 + case AVCLZB: + return op_VCLZ, 0, 0 + case AVCLZH: + return op_VCLZ, 1, 0 + case AVCLZF: + return op_VCLZ, 2, 0 + case AVCLZG: + return op_VCLZ, 3, 0 + case AVCTZ: + return op_VCTZ, 0, 0 + case AVCTZB: + return op_VCTZ, 0, 0 + case AVCTZH: + return op_VCTZ, 1, 0 + case AVCTZF: + return op_VCTZ, 2, 0 + case AVCTZG: + return op_VCTZ, 3, 0 + case AVEC: + return op_VEC, 0, 0 + case AVECB: + return op_VEC, 0, 0 + case AVECH: + return op_VEC, 1, 0 + case AVECF: + return op_VEC, 2, 0 + case AVECG: + return op_VEC, 3, 0 + case AVECL: + return op_VECL, 0, 0 + case AVECLB: + return op_VECL, 0, 0 + case AVECLH: + return op_VECL, 1, 0 + case AVECLF: + return op_VECL, 2, 0 + case AVECLG: + return op_VECL, 3, 0 + case AVERIM: + return op_VERIM, 0, 0 + case AVERIMB: + return op_VERIM, 0, 0 + case AVERIMH: + return op_VERIM, 1, 0 + case AVERIMF: + return op_VERIM, 2, 0 + case AVERIMG: + return op_VERIM, 3, 0 + case AVERLL: + return op_VERLL, 0, 0 + case AVERLLB: + return op_VERLL, 0, 0 + case AVERLLH: + return op_VERLL, 1, 0 + case AVERLLF: + return op_VERLL, 2, 0 + case AVERLLG: + return op_VERLL, 3, 0 + case AVERLLV: + return op_VERLLV, 0, 0 + case AVERLLVB: + return op_VERLLV, 0, 0 + case AVERLLVH: + return op_VERLLV, 1, 0 + case AVERLLVF: + return op_VERLLV, 2, 0 + case AVERLLVG: + return op_VERLLV, 3, 0 + case AVESLV: + return op_VESLV, 0, 0 + case AVESLVB: + return op_VESLV, 0, 0 + case AVESLVH: + return op_VESLV, 1, 0 + case AVESLVF: + return op_VESLV, 2, 0 + case AVESLVG: + return op_VESLV, 3, 0 + case AVESL: + return op_VESL, 0, 0 + case AVESLB: + return op_VESL, 0, 0 + case AVESLH: + return op_VESL, 1, 0 + case AVESLF: + return op_VESL, 2, 0 + case AVESLG: + return op_VESL, 3, 0 + case AVESRA: + return op_VESRA, 0, 0 + case AVESRAB: + return op_VESRA, 0, 0 + case AVESRAH: + return op_VESRA, 1, 0 + case AVESRAF: + return op_VESRA, 2, 0 + case AVESRAG: + return op_VESRA, 3, 0 + case AVESRAV: + return op_VESRAV, 0, 0 + case AVESRAVB: + return op_VESRAV, 0, 0 + case AVESRAVH: + return op_VESRAV, 1, 0 + case AVESRAVF: + return op_VESRAV, 2, 0 + case AVESRAVG: + return op_VESRAV, 3, 0 + case AVESRL: + return op_VESRL, 0, 0 + case AVESRLB: + return op_VESRL, 0, 0 + case AVESRLH: + return op_VESRL, 1, 0 + case AVESRLF: + return op_VESRL, 2, 0 + case AVESRLG: + return op_VESRL, 3, 0 + case AVESRLV: + return op_VESRLV, 0, 0 + case AVESRLVB: + return op_VESRLV, 0, 0 + case AVESRLVH: + return op_VESRLV, 1, 0 + case AVESRLVF: + return op_VESRLV, 2, 0 + case AVESRLVG: + return op_VESRLV, 3, 0 + case AVX: + return op_VX, 0, 0 + case AVFAE: + return op_VFAE, 0, 0 + case AVFAEB: + return op_VFAE, 0, 0 + case AVFAEH: + return op_VFAE, 1, 0 + case AVFAEF: + return op_VFAE, 2, 0 + case AVFAEBS: + return op_VFAE, 0, 1 + case AVFAEHS: + return op_VFAE, 1, 1 + case AVFAEFS: + return op_VFAE, 2, 1 + case AVFAEZB: + return op_VFAE, 0, 2 + case AVFAEZH: + return op_VFAE, 1, 2 + case AVFAEZF: + return op_VFAE, 2, 2 + case AVFAEZBS: + return op_VFAE, 0, 3 + case AVFAEZHS: + return op_VFAE, 1, 3 + case AVFAEZFS: + return op_VFAE, 2, 3 + case AVFEE: + return op_VFEE, 0, 0 + case AVFEEB: + return op_VFEE, 0, 0 + case AVFEEH: + return op_VFEE, 1, 0 + case AVFEEF: + return op_VFEE, 2, 0 + case AVFEEBS: + return op_VFEE, 0, 1 + case AVFEEHS: + return op_VFEE, 1, 1 + case AVFEEFS: + return op_VFEE, 2, 1 + case AVFEEZB: + return op_VFEE, 0, 2 + case AVFEEZH: + return op_VFEE, 1, 2 + case AVFEEZF: + return op_VFEE, 2, 2 + case AVFEEZBS: + return op_VFEE, 0, 3 + case AVFEEZHS: + return op_VFEE, 1, 3 + case AVFEEZFS: + return op_VFEE, 2, 3 + case AVFENE: + return op_VFENE, 0, 0 + case AVFENEB: + return op_VFENE, 0, 0 + case AVFENEH: + return op_VFENE, 1, 0 + case AVFENEF: + return op_VFENE, 2, 0 + case AVFENEBS: + return op_VFENE, 0, 1 + case AVFENEHS: + return op_VFENE, 1, 1 + case AVFENEFS: + return op_VFENE, 2, 1 + case AVFENEZB: + return op_VFENE, 0, 2 + case AVFENEZH: + return op_VFENE, 1, 2 + case AVFENEZF: + return op_VFENE, 2, 2 + case AVFENEZBS: + return op_VFENE, 0, 3 + case AVFENEZHS: + return op_VFENE, 1, 3 + case AVFENEZFS: + return op_VFENE, 2, 3 + case AVFA: + return op_VFA, 0, 0 + case AVFADB: + return op_VFA, 3, 0 + case AWFADB: + return op_VFA, 3, 0 + case AWFK: + return op_WFK, 0, 0 + case AWFKDB: + return op_WFK, 3, 0 + case AVFCE: + return op_VFCE, 0, 0 + case AVFCEDB: + return op_VFCE, 3, 0 + case AVFCEDBS: + return op_VFCE, 3, 1 + case AWFCEDB: + return op_VFCE, 3, 0 + case AWFCEDBS: + return op_VFCE, 3, 1 + case AVFCH: + return op_VFCH, 0, 0 + case AVFCHDB: + return op_VFCH, 3, 0 + case AVFCHDBS: + return op_VFCH, 3, 1 + case AWFCHDB: + return op_VFCH, 3, 0 + case AWFCHDBS: + return op_VFCH, 3, 1 + case AVFCHE: + return op_VFCHE, 0, 0 + case AVFCHEDB: + return op_VFCHE, 3, 0 + case AVFCHEDBS: + return op_VFCHE, 3, 1 + case AWFCHEDB: + return op_VFCHE, 3, 0 + case AWFCHEDBS: + return op_VFCHE, 3, 1 + case AWFC: + return op_WFC, 0, 0 + case AWFCDB: + return op_WFC, 3, 0 + case AVCDG: + return op_VCDG, 0, 0 + case AVCDGB: + return op_VCDG, 3, 0 + case AWCDGB: + return op_VCDG, 3, 0 + case AVCDLG: + return op_VCDLG, 0, 0 + case AVCDLGB: + return op_VCDLG, 3, 0 + case AWCDLGB: + return op_VCDLG, 3, 0 + case AVCGD: + return op_VCGD, 0, 0 + case AVCGDB: + return op_VCGD, 3, 0 + case AWCGDB: + return op_VCGD, 3, 0 + case AVCLGD: + return op_VCLGD, 0, 0 + case AVCLGDB: + return op_VCLGD, 3, 0 + case AWCLGDB: + return op_VCLGD, 3, 0 + case AVFD: + return op_VFD, 0, 0 + case AVFDDB: + return op_VFD, 3, 0 + case AWFDDB: + return op_VFD, 3, 0 + case AVLDE: + return op_VLDE, 0, 0 + case AVLDEB: + return op_VLDE, 2, 0 + case AWLDEB: + return op_VLDE, 2, 0 + case AVLED: + return op_VLED, 0, 0 + case AVLEDB: + return op_VLED, 3, 0 + case AWLEDB: + return op_VLED, 3, 0 + case AVFM: + return op_VFM, 0, 0 + case AVFMDB: + return op_VFM, 3, 0 + case AWFMDB: + return op_VFM, 3, 0 + case AVFMA: + return op_VFMA, 0, 0 + case AVFMADB: + return op_VFMA, 3, 0 + case AWFMADB: + return op_VFMA, 3, 0 + case AVFMS: + return op_VFMS, 0, 0 + case AVFMSDB: + return op_VFMS, 3, 0 + case AWFMSDB: + return op_VFMS, 3, 0 + case AVFPSO: + return op_VFPSO, 0, 0 + case AVFPSODB: + return op_VFPSO, 3, 0 + case AWFPSODB: + return op_VFPSO, 3, 0 + case AVFLCDB: + return op_VFPSO, 3, 0 + case AWFLCDB: + return op_VFPSO, 3, 0 + case AVFLNDB: + return op_VFPSO, 3, 1 + case AWFLNDB: + return op_VFPSO, 3, 1 + case AVFLPDB: + return op_VFPSO, 3, 2 + case AWFLPDB: + return op_VFPSO, 3, 2 + case AVFSQ: + return op_VFSQ, 0, 0 + case AVFSQDB: + return op_VFSQ, 3, 0 + case AWFSQDB: + return op_VFSQ, 3, 0 + case AVFS: + return op_VFS, 0, 0 + case AVFSDB: + return op_VFS, 3, 0 + case AWFSDB: + return op_VFS, 3, 0 + case AVFTCI: + return op_VFTCI, 0, 0 + case AVFTCIDB: + return op_VFTCI, 3, 0 + case AWFTCIDB: + return op_VFTCI, 3, 0 + case AVGFM: + return op_VGFM, 0, 0 + case AVGFMB: + return op_VGFM, 0, 0 + case AVGFMH: + return op_VGFM, 1, 0 + case AVGFMF: + return op_VGFM, 2, 0 + case AVGFMG: + return op_VGFM, 3, 0 + case AVGFMA: + return op_VGFMA, 0, 0 + case AVGFMAB: + return op_VGFMA, 0, 0 + case AVGFMAH: + return op_VGFMA, 1, 0 + case AVGFMAF: + return op_VGFMA, 2, 0 + case AVGFMAG: + return op_VGFMA, 3, 0 + case AVGEF: + return op_VGEF, 0, 0 + case AVGEG: + return op_VGEG, 0, 0 + case AVGBM: + return op_VGBM, 0, 0 + case AVZERO: + return op_VGBM, 0, 0 + case AVONE: + return op_VGBM, 0, 0 + case AVGM: + return op_VGM, 0, 0 + case AVGMB: + return op_VGM, 0, 0 + case AVGMH: + return op_VGM, 1, 0 + case AVGMF: + return op_VGM, 2, 0 + case AVGMG: + return op_VGM, 3, 0 + case AVISTR: + return op_VISTR, 0, 0 + case AVISTRB: + return op_VISTR, 0, 0 + case AVISTRH: + return op_VISTR, 1, 0 + case AVISTRF: + return op_VISTR, 2, 0 + case AVISTRBS: + return op_VISTR, 0, 1 + case AVISTRHS: + return op_VISTR, 1, 1 + case AVISTRFS: + return op_VISTR, 2, 1 + case AVL: + return op_VL, 0, 0 + case AVLR: + return op_VLR, 0, 0 + case AVLREP: + return op_VLREP, 0, 0 + case AVLREPB: + return op_VLREP, 0, 0 + case AVLREPH: + return op_VLREP, 1, 0 + case AVLREPF: + return op_VLREP, 2, 0 + case AVLREPG: + return op_VLREP, 3, 0 + case AVLC: + return op_VLC, 0, 0 + case AVLCB: + return op_VLC, 0, 0 + case AVLCH: + return op_VLC, 1, 0 + case AVLCF: + return op_VLC, 2, 0 + case AVLCG: + return op_VLC, 3, 0 + case AVLEH: + return op_VLEH, 0, 0 + case AVLEF: + return op_VLEF, 0, 0 + case AVLEG: + return op_VLEG, 0, 0 + case AVLEB: + return op_VLEB, 0, 0 + case AVLEIH: + return op_VLEIH, 0, 0 + case AVLEIF: + return op_VLEIF, 0, 0 + case AVLEIG: + return op_VLEIG, 0, 0 + case AVLEIB: + return op_VLEIB, 0, 0 + case AVFI: + return op_VFI, 0, 0 + case AVFIDB: + return op_VFI, 3, 0 + case AWFIDB: + return op_VFI, 3, 0 + case AVLGV: + return op_VLGV, 0, 0 + case AVLGVB: + return op_VLGV, 0, 0 + case AVLGVH: + return op_VLGV, 1, 0 + case AVLGVF: + return op_VLGV, 2, 0 + case AVLGVG: + return op_VLGV, 3, 0 + case AVLLEZ: + return op_VLLEZ, 0, 0 + case AVLLEZB: + return op_VLLEZ, 0, 0 + case AVLLEZH: + return op_VLLEZ, 1, 0 + case AVLLEZF: + return op_VLLEZ, 2, 0 + case AVLLEZG: + return op_VLLEZ, 3, 0 + case AVLM: + return op_VLM, 0, 0 + case AVLP: + return op_VLP, 0, 0 + case AVLPB: + return op_VLP, 0, 0 + case AVLPH: + return op_VLP, 1, 0 + case AVLPF: + return op_VLP, 2, 0 + case AVLPG: + return op_VLP, 3, 0 + case AVLBB: + return op_VLBB, 0, 0 + case AVLVG: + return op_VLVG, 0, 0 + case AVLVGB: + return op_VLVG, 0, 0 + case AVLVGH: + return op_VLVG, 1, 0 + case AVLVGF: + return op_VLVG, 2, 0 + case AVLVGG: + return op_VLVG, 3, 0 + case AVLVGP: + return op_VLVGP, 0, 0 + case AVLL: + return op_VLL, 0, 0 + case AVMX: + return op_VMX, 0, 0 + case AVMXB: + return op_VMX, 0, 0 + case AVMXH: + return op_VMX, 1, 0 + case AVMXF: + return op_VMX, 2, 0 + case AVMXG: + return op_VMX, 3, 0 + case AVMXL: + return op_VMXL, 0, 0 + case AVMXLB: + return op_VMXL, 0, 0 + case AVMXLH: + return op_VMXL, 1, 0 + case AVMXLF: + return op_VMXL, 2, 0 + case AVMXLG: + return op_VMXL, 3, 0 + case AVMRH: + return op_VMRH, 0, 0 + case AVMRHB: + return op_VMRH, 0, 0 + case AVMRHH: + return op_VMRH, 1, 0 + case AVMRHF: + return op_VMRH, 2, 0 + case AVMRHG: + return op_VMRH, 3, 0 + case AVMRL: + return op_VMRL, 0, 0 + case AVMRLB: + return op_VMRL, 0, 0 + case AVMRLH: + return op_VMRL, 1, 0 + case AVMRLF: + return op_VMRL, 2, 0 + case AVMRLG: + return op_VMRL, 3, 0 + case AVMN: + return op_VMN, 0, 0 + case AVMNB: + return op_VMN, 0, 0 + case AVMNH: + return op_VMN, 1, 0 + case AVMNF: + return op_VMN, 2, 0 + case AVMNG: + return op_VMN, 3, 0 + case AVMNL: + return op_VMNL, 0, 0 + case AVMNLB: + return op_VMNL, 0, 0 + case AVMNLH: + return op_VMNL, 1, 0 + case AVMNLF: + return op_VMNL, 2, 0 + case AVMNLG: + return op_VMNL, 3, 0 + case AVMAE: + return op_VMAE, 0, 0 + case AVMAEB: + return op_VMAE, 0, 0 + case AVMAEH: + return op_VMAE, 1, 0 + case AVMAEF: + return op_VMAE, 2, 0 + case AVMAH: + return op_VMAH, 0, 0 + case AVMAHB: + return op_VMAH, 0, 0 + case AVMAHH: + return op_VMAH, 1, 0 + case AVMAHF: + return op_VMAH, 2, 0 + case AVMALE: + return op_VMALE, 0, 0 + case AVMALEB: + return op_VMALE, 0, 0 + case AVMALEH: + return op_VMALE, 1, 0 + case AVMALEF: + return op_VMALE, 2, 0 + case AVMALH: + return op_VMALH, 0, 0 + case AVMALHB: + return op_VMALH, 0, 0 + case AVMALHH: + return op_VMALH, 1, 0 + case AVMALHF: + return op_VMALH, 2, 0 + case AVMALO: + return op_VMALO, 0, 0 + case AVMALOB: + return op_VMALO, 0, 0 + case AVMALOH: + return op_VMALO, 1, 0 + case AVMALOF: + return op_VMALO, 2, 0 + case AVMAL: + return op_VMAL, 0, 0 + case AVMALB: + return op_VMAL, 0, 0 + case AVMALHW: + return op_VMAL, 1, 0 + case AVMALF: + return op_VMAL, 2, 0 + case AVMAO: + return op_VMAO, 0, 0 + case AVMAOB: + return op_VMAO, 0, 0 + case AVMAOH: + return op_VMAO, 1, 0 + case AVMAOF: + return op_VMAO, 2, 0 + case AVME: + return op_VME, 0, 0 + case AVMEB: + return op_VME, 0, 0 + case AVMEH: + return op_VME, 1, 0 + case AVMEF: + return op_VME, 2, 0 + case AVMH: + return op_VMH, 0, 0 + case AVMHB: + return op_VMH, 0, 0 + case AVMHH: + return op_VMH, 1, 0 + case AVMHF: + return op_VMH, 2, 0 + case AVMLE: + return op_VMLE, 0, 0 + case AVMLEB: + return op_VMLE, 0, 0 + case AVMLEH: + return op_VMLE, 1, 0 + case AVMLEF: + return op_VMLE, 2, 0 + case AVMLH: + return op_VMLH, 0, 0 + case AVMLHB: + return op_VMLH, 0, 0 + case AVMLHH: + return op_VMLH, 1, 0 + case AVMLHF: + return op_VMLH, 2, 0 + case AVMLO: + return op_VMLO, 0, 0 + case AVMLOB: + return op_VMLO, 0, 0 + case AVMLOH: + return op_VMLO, 1, 0 + case AVMLOF: + return op_VMLO, 2, 0 + case AVML: + return op_VML, 0, 0 + case AVMLB: + return op_VML, 0, 0 + case AVMLHW: + return op_VML, 1, 0 + case AVMLF: + return op_VML, 2, 0 + case AVMO: + return op_VMO, 0, 0 + case AVMOB: + return op_VMO, 0, 0 + case AVMOH: + return op_VMO, 1, 0 + case AVMOF: + return op_VMO, 2, 0 + case AVNO: + return op_VNO, 0, 0 + case AVNOT: + return op_VNO, 0, 0 + case AVO: + return op_VO, 0, 0 + case AVPK: + return op_VPK, 0, 0 + case AVPKH: + return op_VPK, 1, 0 + case AVPKF: + return op_VPK, 2, 0 + case AVPKG: + return op_VPK, 3, 0 + case AVPKLS: + return op_VPKLS, 0, 0 + case AVPKLSH: + return op_VPKLS, 1, 0 + case AVPKLSF: + return op_VPKLS, 2, 0 + case AVPKLSG: + return op_VPKLS, 3, 0 + case AVPKLSHS: + return op_VPKLS, 1, 1 + case AVPKLSFS: + return op_VPKLS, 2, 1 + case AVPKLSGS: + return op_VPKLS, 3, 1 + case AVPKS: + return op_VPKS, 0, 0 + case AVPKSH: + return op_VPKS, 1, 0 + case AVPKSF: + return op_VPKS, 2, 0 + case AVPKSG: + return op_VPKS, 3, 0 + case AVPKSHS: + return op_VPKS, 1, 1 + case AVPKSFS: + return op_VPKS, 2, 1 + case AVPKSGS: + return op_VPKS, 3, 1 + case AVPERM: + return op_VPERM, 0, 0 + case AVPDI: + return op_VPDI, 0, 0 + case AVPOPCT: + return op_VPOPCT, 0, 0 + case AVREP: + return op_VREP, 0, 0 + case AVREPB: + return op_VREP, 0, 0 + case AVREPH: + return op_VREP, 1, 0 + case AVREPF: + return op_VREP, 2, 0 + case AVREPG: + return op_VREP, 3, 0 + case AVREPI: + return op_VREPI, 0, 0 + case AVREPIB: + return op_VREPI, 0, 0 + case AVREPIH: + return op_VREPI, 1, 0 + case AVREPIF: + return op_VREPI, 2, 0 + case AVREPIG: + return op_VREPI, 3, 0 + case AVSCEF: + return op_VSCEF, 0, 0 + case AVSCEG: + return op_VSCEG, 0, 0 + case AVSEL: + return op_VSEL, 0, 0 + case AVSL: + return op_VSL, 0, 0 + case AVSLB: + return op_VSLB, 0, 0 + case AVSLDB: + return op_VSLDB, 0, 0 + case AVSRA: + return op_VSRA, 0, 0 + case AVSRAB: + return op_VSRAB, 0, 0 + case AVSRL: + return op_VSRL, 0, 0 + case AVSRLB: + return op_VSRLB, 0, 0 + case AVSEG: + return op_VSEG, 0, 0 + case AVSEGB: + return op_VSEG, 0, 0 + case AVSEGH: + return op_VSEG, 1, 0 + case AVSEGF: + return op_VSEG, 2, 0 + case AVST: + return op_VST, 0, 0 + case AVSTEH: + return op_VSTEH, 0, 0 + case AVSTEF: + return op_VSTEF, 0, 0 + case AVSTEG: + return op_VSTEG, 0, 0 + case AVSTEB: + return op_VSTEB, 0, 0 + case AVSTM: + return op_VSTM, 0, 0 + case AVSTL: + return op_VSTL, 0, 0 + case AVSTRC: + return op_VSTRC, 0, 0 + case AVSTRCB: + return op_VSTRC, 0, 0 + case AVSTRCH: + return op_VSTRC, 1, 0 + case AVSTRCF: + return op_VSTRC, 2, 0 + case AVSTRCBS: + return op_VSTRC, 0, 1 + case AVSTRCHS: + return op_VSTRC, 1, 1 + case AVSTRCFS: + return op_VSTRC, 2, 1 + case AVSTRCZB: + return op_VSTRC, 0, 2 + case AVSTRCZH: + return op_VSTRC, 1, 2 + case AVSTRCZF: + return op_VSTRC, 2, 2 + case AVSTRCZBS: + return op_VSTRC, 0, 3 + case AVSTRCZHS: + return op_VSTRC, 1, 3 + case AVSTRCZFS: + return op_VSTRC, 2, 3 + case AVS: + return op_VS, 0, 0 + case AVSB: + return op_VS, 0, 0 + case AVSH: + return op_VS, 1, 0 + case AVSF: + return op_VS, 2, 0 + case AVSG: + return op_VS, 3, 0 + case AVSQ: + return op_VS, 4, 0 + case AVSCBI: + return op_VSCBI, 0, 0 + case AVSCBIB: + return op_VSCBI, 0, 0 + case AVSCBIH: + return op_VSCBI, 1, 0 + case AVSCBIF: + return op_VSCBI, 2, 0 + case AVSCBIG: + return op_VSCBI, 3, 0 + case AVSCBIQ: + return op_VSCBI, 4, 0 + case AVSBCBI: + return op_VSBCBI, 0, 0 + case AVSBCBIQ: + return op_VSBCBI, 4, 0 + case AVSBI: + return op_VSBI, 0, 0 + case AVSBIQ: + return op_VSBI, 4, 0 + case AVSUMG: + return op_VSUMG, 0, 0 + case AVSUMGH: + return op_VSUMG, 1, 0 + case AVSUMGF: + return op_VSUMG, 2, 0 + case AVSUMQ: + return op_VSUMQ, 0, 0 + case AVSUMQF: + return op_VSUMQ, 2, 0 + case AVSUMQG: + return op_VSUMQ, 3, 0 + case AVSUM: + return op_VSUM, 0, 0 + case AVSUMB: + return op_VSUM, 0, 0 + case AVSUMH: + return op_VSUM, 1, 0 + case AVTM: + return op_VTM, 0, 0 + case AVUPH: + return op_VUPH, 0, 0 + case AVUPHB: + return op_VUPH, 0, 0 + case AVUPHH: + return op_VUPH, 1, 0 + case AVUPHF: + return op_VUPH, 2, 0 + case AVUPLH: + return op_VUPLH, 0, 0 + case AVUPLHB: + return op_VUPLH, 0, 0 + case AVUPLHH: + return op_VUPLH, 1, 0 + case AVUPLHF: + return op_VUPLH, 2, 0 + case AVUPLL: + return op_VUPLL, 0, 0 + case AVUPLLB: + return op_VUPLL, 0, 0 + case AVUPLLH: + return op_VUPLL, 1, 0 + case AVUPLLF: + return op_VUPLL, 2, 0 + case AVUPL: + return op_VUPL, 0, 0 + case AVUPLB: + return op_VUPL, 0, 0 + case AVUPLHW: + return op_VUPL, 1, 0 + case AVUPLF: + return op_VUPL, 2, 0 + } +} + +// singleElementMask returns the single element mask bits required for the +// given instruction. +func singleElementMask(as obj.As) uint32 { + switch as { + case AWFADB, + AWFK, + AWFKDB, + AWFCEDB, + AWFCEDBS, + AWFCHDB, + AWFCHDBS, + AWFCHEDB, + AWFCHEDBS, + AWFC, + AWFCDB, + AWCDGB, + AWCDLGB, + AWCGDB, + AWCLGDB, + AWFDDB, + AWLDEB, + AWLEDB, + AWFMDB, + AWFMADB, + AWFMSDB, + AWFPSODB, + AWFLCDB, + AWFLNDB, + AWFLPDB, + AWFSQDB, + AWFSDB, + AWFTCIDB, + AWFIDB: + return 8 + case AVMSLEG: + return 8 + case AVMSLOG: + return 4 + case AVMSLEOG: + return 12 + } + return 0 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/a.out.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/a.out.go new file mode 100644 index 0000000000000000000000000000000000000000..0262630d5a78a0ed4cebc33d466c62e61ae4f55f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/a.out.go @@ -0,0 +1,341 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package wasm + +import "cmd/internal/obj" + +//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p wasm + +const ( + /* mark flags */ + DONE = 1 << iota + PRESERVEFLAGS // not allowed to clobber flags +) + +/* + * wasm + */ +const ( + AGet = obj.ABaseWasm + obj.A_ARCHSPECIFIC + iota + ASet + ATee + ANot // alias for I32Eqz + + // The following are low-level WebAssembly instructions. + // Their order matters, since it matches the opcode encoding. + // Gaps in the encoding are indicated by comments. + + AUnreachable // opcode 0x00 + ANop + ABlock + ALoop + AIf + AElse + + AEnd // opcode 0x0B + ABr + ABrIf + ABrTable + // ACall and AReturn are WebAssembly instructions. obj.ACALL and obj.ARET are higher level instructions + // with Go semantics, e.g. they manipulate the Go stack on the linear memory. + AReturn + ACall + ACallIndirect + + ADrop // opcode 0x1A + ASelect + + ALocalGet // opcode 0x20 + ALocalSet + ALocalTee + AGlobalGet + AGlobalSet + + AI32Load // opcode 0x28 + AI64Load + AF32Load + AF64Load + AI32Load8S + AI32Load8U + AI32Load16S + AI32Load16U + AI64Load8S + AI64Load8U + AI64Load16S + AI64Load16U + AI64Load32S + AI64Load32U + AI32Store + AI64Store + AF32Store + AF64Store + AI32Store8 + AI32Store16 + AI64Store8 + AI64Store16 + AI64Store32 + ACurrentMemory + AGrowMemory + + AI32Const + AI64Const + AF32Const + AF64Const + + AI32Eqz + AI32Eq + AI32Ne + AI32LtS + AI32LtU + AI32GtS + AI32GtU + AI32LeS + AI32LeU + AI32GeS + AI32GeU + + AI64Eqz + AI64Eq + AI64Ne + AI64LtS + AI64LtU + AI64GtS + AI64GtU + AI64LeS + AI64LeU + AI64GeS + AI64GeU + + AF32Eq + AF32Ne + AF32Lt + AF32Gt + AF32Le + AF32Ge + + AF64Eq + AF64Ne + AF64Lt + AF64Gt + AF64Le + AF64Ge + + AI32Clz + AI32Ctz + AI32Popcnt + AI32Add + AI32Sub + AI32Mul + AI32DivS + AI32DivU + AI32RemS + AI32RemU + AI32And + AI32Or + AI32Xor + AI32Shl + AI32ShrS + AI32ShrU + AI32Rotl + AI32Rotr + + AI64Clz + AI64Ctz + AI64Popcnt + AI64Add + AI64Sub + AI64Mul + AI64DivS + AI64DivU + AI64RemS + AI64RemU + AI64And + AI64Or + AI64Xor + AI64Shl + AI64ShrS + AI64ShrU + AI64Rotl + AI64Rotr + + AF32Abs + AF32Neg + AF32Ceil + AF32Floor + AF32Trunc + AF32Nearest + AF32Sqrt + AF32Add + AF32Sub + AF32Mul + AF32Div + AF32Min + AF32Max + AF32Copysign + + AF64Abs + AF64Neg + AF64Ceil + AF64Floor + AF64Trunc + AF64Nearest + AF64Sqrt + AF64Add + AF64Sub + AF64Mul + AF64Div + AF64Min + AF64Max + AF64Copysign + + AI32WrapI64 + AI32TruncF32S + AI32TruncF32U + AI32TruncF64S + AI32TruncF64U + AI64ExtendI32S + AI64ExtendI32U + AI64TruncF32S + AI64TruncF32U + AI64TruncF64S + AI64TruncF64U + AF32ConvertI32S + AF32ConvertI32U + AF32ConvertI64S + AF32ConvertI64U + AF32DemoteF64 + AF64ConvertI32S + AF64ConvertI32U + AF64ConvertI64S + AF64ConvertI64U + AF64PromoteF32 + AI32ReinterpretF32 + AI64ReinterpretF64 + AF32ReinterpretI32 + AF64ReinterpretI64 + AI32Extend8S + AI32Extend16S + AI64Extend8S + AI64Extend16S + AI64Extend32S + + AI32TruncSatF32S // opcode 0xFC 0x00 + AI32TruncSatF32U + AI32TruncSatF64S + AI32TruncSatF64U + AI64TruncSatF32S + AI64TruncSatF32U + AI64TruncSatF64S + AI64TruncSatF64U + + AMemoryInit + ADataDrop + AMemoryCopy + AMemoryFill + ATableInit + AElemDrop + ATableCopy + ATableGrow + ATableSize + ATableFill + + ALast // Sentinel: End of low-level WebAssembly instructions. + + ARESUMEPOINT + // ACALLNORESUME is a call which is not followed by a resume point. + // It is allowed inside of WebAssembly blocks, whereas obj.ACALL is not. + // However, it is not allowed to switch goroutines while inside of an ACALLNORESUME call. + ACALLNORESUME + + ARETUNWIND + + AMOVB + AMOVH + AMOVW + AMOVD + + AWORD + ALAST +) + +const ( + REG_NONE = 0 +) + +const ( + // globals + REG_SP = obj.RBaseWasm + iota // SP is currently 32-bit, until 64-bit memory operations are available + REG_CTXT + REG_g + // RET* are used by runtime.return0 and runtime.reflectcall. These functions pass return values in registers. + REG_RET0 + REG_RET1 + REG_RET2 + REG_RET3 + REG_PAUSE + + // i32 locals + REG_R0 + REG_R1 + REG_R2 + REG_R3 + REG_R4 + REG_R5 + REG_R6 + REG_R7 + REG_R8 + REG_R9 + REG_R10 + REG_R11 + REG_R12 + REG_R13 + REG_R14 + REG_R15 + + // f32 locals + REG_F0 + REG_F1 + REG_F2 + REG_F3 + REG_F4 + REG_F5 + REG_F6 + REG_F7 + REG_F8 + REG_F9 + REG_F10 + REG_F11 + REG_F12 + REG_F13 + REG_F14 + REG_F15 + + // f64 locals + REG_F16 + REG_F17 + REG_F18 + REG_F19 + REG_F20 + REG_F21 + REG_F22 + REG_F23 + REG_F24 + REG_F25 + REG_F26 + REG_F27 + REG_F28 + REG_F29 + REG_F30 + REG_F31 + + REG_PC_B // also first parameter, i32 + + MAXREG + + MINREG = REG_SP + REGSP = REG_SP + REGCTXT = REG_CTXT + REGG = REG_g +) diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/anames.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/anames.go new file mode 100644 index 0000000000000000000000000000000000000000..6f1a662960280061a16763fcb6dbf1482e7d4692 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/anames.go @@ -0,0 +1,217 @@ +// Code generated by stringer -i a.out.go -o anames.go -p wasm; DO NOT EDIT. + +package wasm + +import "cmd/internal/obj" + +var Anames = []string{ + obj.A_ARCHSPECIFIC: "Get", + "Set", + "Tee", + "Not", + "Unreachable", + "Nop", + "Block", + "Loop", + "If", + "Else", + "End", + "Br", + "BrIf", + "BrTable", + "Return", + "Call", + "CallIndirect", + "Drop", + "Select", + "LocalGet", + "LocalSet", + "LocalTee", + "GlobalGet", + "GlobalSet", + "I32Load", + "I64Load", + "F32Load", + "F64Load", + "I32Load8S", + "I32Load8U", + "I32Load16S", + "I32Load16U", + "I64Load8S", + "I64Load8U", + "I64Load16S", + "I64Load16U", + "I64Load32S", + "I64Load32U", + "I32Store", + "I64Store", + "F32Store", + "F64Store", + "I32Store8", + "I32Store16", + "I64Store8", + "I64Store16", + "I64Store32", + "CurrentMemory", + "GrowMemory", + "I32Const", + "I64Const", + "F32Const", + "F64Const", + "I32Eqz", + "I32Eq", + "I32Ne", + "I32LtS", + "I32LtU", + "I32GtS", + "I32GtU", + "I32LeS", + "I32LeU", + "I32GeS", + "I32GeU", + "I64Eqz", + "I64Eq", + "I64Ne", + "I64LtS", + "I64LtU", + "I64GtS", + "I64GtU", + "I64LeS", + "I64LeU", + "I64GeS", + "I64GeU", + "F32Eq", + "F32Ne", + "F32Lt", + "F32Gt", + "F32Le", + "F32Ge", + "F64Eq", + "F64Ne", + "F64Lt", + "F64Gt", + "F64Le", + "F64Ge", + "I32Clz", + "I32Ctz", + "I32Popcnt", + "I32Add", + "I32Sub", + "I32Mul", + "I32DivS", + "I32DivU", + "I32RemS", + "I32RemU", + "I32And", + "I32Or", + "I32Xor", + "I32Shl", + "I32ShrS", + "I32ShrU", + "I32Rotl", + "I32Rotr", + "I64Clz", + "I64Ctz", + "I64Popcnt", + "I64Add", + "I64Sub", + "I64Mul", + "I64DivS", + "I64DivU", + "I64RemS", + "I64RemU", + "I64And", + "I64Or", + "I64Xor", + "I64Shl", + "I64ShrS", + "I64ShrU", + "I64Rotl", + "I64Rotr", + "F32Abs", + "F32Neg", + "F32Ceil", + "F32Floor", + "F32Trunc", + "F32Nearest", + "F32Sqrt", + "F32Add", + "F32Sub", + "F32Mul", + "F32Div", + "F32Min", + "F32Max", + "F32Copysign", + "F64Abs", + "F64Neg", + "F64Ceil", + "F64Floor", + "F64Trunc", + "F64Nearest", + "F64Sqrt", + "F64Add", + "F64Sub", + "F64Mul", + "F64Div", + "F64Min", + "F64Max", + "F64Copysign", + "I32WrapI64", + "I32TruncF32S", + "I32TruncF32U", + "I32TruncF64S", + "I32TruncF64U", + "I64ExtendI32S", + "I64ExtendI32U", + "I64TruncF32S", + "I64TruncF32U", + "I64TruncF64S", + "I64TruncF64U", + "F32ConvertI32S", + "F32ConvertI32U", + "F32ConvertI64S", + "F32ConvertI64U", + "F32DemoteF64", + "F64ConvertI32S", + "F64ConvertI32U", + "F64ConvertI64S", + "F64ConvertI64U", + "F64PromoteF32", + "I32ReinterpretF32", + "I64ReinterpretF64", + "F32ReinterpretI32", + "F64ReinterpretI64", + "I32Extend8S", + "I32Extend16S", + "I64Extend8S", + "I64Extend16S", + "I64Extend32S", + "I32TruncSatF32S", + "I32TruncSatF32U", + "I32TruncSatF64S", + "I32TruncSatF64U", + "I64TruncSatF32S", + "I64TruncSatF32U", + "I64TruncSatF64S", + "I64TruncSatF64U", + "MemoryInit", + "DataDrop", + "MemoryCopy", + "MemoryFill", + "TableInit", + "ElemDrop", + "TableCopy", + "TableGrow", + "TableSize", + "TableFill", + "Last", + "RESUMEPOINT", + "CALLNORESUME", + "RETUNWIND", + "MOVB", + "MOVH", + "MOVW", + "MOVD", + "WORD", + "LAST", +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/wasmobj.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/wasmobj.go new file mode 100644 index 0000000000000000000000000000000000000000..23f51f8b4238e380915760eb4d9fdf9a236e45a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/wasm/wasmobj.go @@ -0,0 +1,1331 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package wasm + +import ( + "bytes" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" + "encoding/binary" + "fmt" + "internal/abi" + "io" + "math" +) + +var Register = map[string]int16{ + "SP": REG_SP, + "CTXT": REG_CTXT, + "g": REG_g, + "RET0": REG_RET0, + "RET1": REG_RET1, + "RET2": REG_RET2, + "RET3": REG_RET3, + "PAUSE": REG_PAUSE, + + "R0": REG_R0, + "R1": REG_R1, + "R2": REG_R2, + "R3": REG_R3, + "R4": REG_R4, + "R5": REG_R5, + "R6": REG_R6, + "R7": REG_R7, + "R8": REG_R8, + "R9": REG_R9, + "R10": REG_R10, + "R11": REG_R11, + "R12": REG_R12, + "R13": REG_R13, + "R14": REG_R14, + "R15": REG_R15, + + "F0": REG_F0, + "F1": REG_F1, + "F2": REG_F2, + "F3": REG_F3, + "F4": REG_F4, + "F5": REG_F5, + "F6": REG_F6, + "F7": REG_F7, + "F8": REG_F8, + "F9": REG_F9, + "F10": REG_F10, + "F11": REG_F11, + "F12": REG_F12, + "F13": REG_F13, + "F14": REG_F14, + "F15": REG_F15, + + "F16": REG_F16, + "F17": REG_F17, + "F18": REG_F18, + "F19": REG_F19, + "F20": REG_F20, + "F21": REG_F21, + "F22": REG_F22, + "F23": REG_F23, + "F24": REG_F24, + "F25": REG_F25, + "F26": REG_F26, + "F27": REG_F27, + "F28": REG_F28, + "F29": REG_F29, + "F30": REG_F30, + "F31": REG_F31, + + "PC_B": REG_PC_B, +} + +var registerNames []string + +func init() { + obj.RegisterRegister(MINREG, MAXREG, rconv) + obj.RegisterOpcode(obj.ABaseWasm, Anames) + + registerNames = make([]string, MAXREG-MINREG) + for name, reg := range Register { + registerNames[reg-MINREG] = name + } +} + +func rconv(r int) string { + return registerNames[r-MINREG] +} + +var unaryDst = map[obj.As]bool{ + ASet: true, + ATee: true, + ACall: true, + ACallIndirect: true, + ABr: true, + ABrIf: true, + ABrTable: true, + AI32Store: true, + AI64Store: true, + AF32Store: true, + AF64Store: true, + AI32Store8: true, + AI32Store16: true, + AI64Store8: true, + AI64Store16: true, + AI64Store32: true, + ACALLNORESUME: true, +} + +var Linkwasm = obj.LinkArch{ + Arch: sys.ArchWasm, + Init: instinit, + Preprocess: preprocess, + Assemble: assemble, + UnaryDst: unaryDst, +} + +var ( + morestack *obj.LSym + morestackNoCtxt *obj.LSym + sigpanic *obj.LSym +) + +const ( + /* mark flags */ + WasmImport = 1 << 0 +) + +const ( + // This is a special wasm module name that when used as the module name + // in //go:wasmimport will cause the generated code to pass the stack pointer + // directly to the imported function. In other words, any function that + // uses the gojs module understands the internal Go WASM ABI directly. + GojsModule = "gojs" +) + +func instinit(ctxt *obj.Link) { + morestack = ctxt.Lookup("runtime.morestack") + morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt") + sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal) +} + +func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { + appendp := func(p *obj.Prog, as obj.As, args ...obj.Addr) *obj.Prog { + if p.As != obj.ANOP { + p2 := obj.Appendp(p, newprog) + p2.Pc = p.Pc + p = p2 + } + p.As = as + switch len(args) { + case 0: + p.From = obj.Addr{} + p.To = obj.Addr{} + case 1: + if unaryDst[as] { + p.From = obj.Addr{} + p.To = args[0] + } else { + p.From = args[0] + p.To = obj.Addr{} + } + case 2: + p.From = args[0] + p.To = args[1] + default: + panic("bad args") + } + return p + } + + framesize := s.Func().Text.To.Offset + if framesize < 0 { + panic("bad framesize") + } + s.Func().Args = s.Func().Text.To.Val.(int32) + s.Func().Locals = int32(framesize) + + // If the function exits just to call out to a wasmimport, then + // generate the code to translate from our internal Go-stack + // based call convention to the native webassembly call convention. + if wi := s.Func().WasmImport; wi != nil { + s.Func().WasmImportSym = wi.CreateSym(ctxt) + p := s.Func().Text + if p.Link != nil { + panic("wrapper functions for WASM imports should not have a body") + } + to := obj.Addr{ + Type: obj.TYPE_MEM, + Name: obj.NAME_EXTERN, + Sym: s, + } + + // If the module that the import is for is our magic "gojs" module, then this + // indicates that the called function understands the Go stack-based call convention + // so we just pass the stack pointer to it, knowing it will read the params directly + // off the stack and push the results into memory based on the stack pointer. + if wi.Module == GojsModule { + // The called function has a signature of 'func(sp int)'. It has access to the memory + // value somewhere to be able to address the memory based on the "sp" value. + + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, ACall, to) + + p.Mark = WasmImport + } else { + if len(wi.Results) > 1 { + // TODO(evanphx) implement support for the multi-value proposal: + // https://github.com/WebAssembly/multi-value/blob/master/proposals/multi-value/Overview.md + panic("invalid results type") // impossible until multi-value proposal has landed + } + if len(wi.Results) == 1 { + // If we have a result (rather than returning nothing at all), then + // we'll write the result to the Go stack relative to the current stack pointer. + // We cache the current stack pointer value on the wasm stack here and then use + // it after the Call instruction to store the result. + p = appendp(p, AGet, regAddr(REG_SP)) + } + for _, f := range wi.Params { + // Each load instructions will consume the value of sp on the stack, so + // we need to read sp for each param. WASM appears to not have a stack dup instruction + // (a strange omission for a stack-based VM), if it did, we'd be using the dup here. + p = appendp(p, AGet, regAddr(REG_SP)) + + // Offset is the location of the param on the Go stack (ie relative to sp). + // Because of our call convention, the parameters are located an additional 8 bytes + // from sp because we store the return address as an int64 at the bottom of the stack. + // Ie the stack looks like [return_addr, param3, param2, param1, etc] + + // Ergo, we add 8 to the true byte offset of the param to skip the return address. + loadOffset := f.Offset + 8 + + // We're reading the value from the Go stack onto the WASM stack and leaving it there + // for CALL to pick them up. + switch f.Type { + case obj.WasmI32: + p = appendp(p, AI32Load, constAddr(loadOffset)) + case obj.WasmI64: + p = appendp(p, AI64Load, constAddr(loadOffset)) + case obj.WasmF32: + p = appendp(p, AF32Load, constAddr(loadOffset)) + case obj.WasmF64: + p = appendp(p, AF64Load, constAddr(loadOffset)) + case obj.WasmPtr: + p = appendp(p, AI64Load, constAddr(loadOffset)) + p = appendp(p, AI32WrapI64) + default: + panic("bad param type") + } + } + + // The call instruction is marked as being for a wasm import so that a later phase + // will generate relocation information that allows us to patch this with then + // offset of the imported function in the wasm imports. + p = appendp(p, ACall, to) + p.Mark = WasmImport + + if len(wi.Results) == 1 { + f := wi.Results[0] + + // Much like with the params, we need to adjust the offset we store the result value + // to by 8 bytes to account for the return address on the Go stack. + storeOffset := f.Offset + 8 + + // This code is paired the code above that reads the stack pointer onto the wasm + // stack. We've done this so we have a consistent view of the sp value as it might + // be manipulated by the call and we want to ignore that manipulation here. + switch f.Type { + case obj.WasmI32: + p = appendp(p, AI32Store, constAddr(storeOffset)) + case obj.WasmI64: + p = appendp(p, AI64Store, constAddr(storeOffset)) + case obj.WasmF32: + p = appendp(p, AF32Store, constAddr(storeOffset)) + case obj.WasmF64: + p = appendp(p, AF64Store, constAddr(storeOffset)) + case obj.WasmPtr: + p = appendp(p, AI64ExtendI32U) + p = appendp(p, AI64Store, constAddr(storeOffset)) + default: + panic("bad result type") + } + } + } + + p = appendp(p, obj.ARET) + + // It should be 0 already, but we'll set it to 0 anyway just to be sure + // that the code below which adds frame expansion code to the function body + // isn't run. We don't want the frame expansion code because our function + // body is just the code to translate and call the imported function. + framesize = 0 + } else if s.Func().Text.From.Sym.Wrapper() { + // if g._panic != nil && g._panic.argp == FP { + // g._panic.argp = bottom-of-frame + // } + // + // MOVD g_panic(g), R0 + // Get R0 + // I64Eqz + // Not + // If + // Get SP + // I64ExtendI32U + // I64Const $framesize+8 + // I64Add + // I64Load panic_argp(R0) + // I64Eq + // If + // MOVD SP, panic_argp(R0) + // End + // End + + gpanic := obj.Addr{ + Type: obj.TYPE_MEM, + Reg: REGG, + Offset: 4 * 8, // g_panic + } + + panicargp := obj.Addr{ + Type: obj.TYPE_MEM, + Reg: REG_R0, + Offset: 0, // panic.argp + } + + p := s.Func().Text + p = appendp(p, AMOVD, gpanic, regAddr(REG_R0)) + + p = appendp(p, AGet, regAddr(REG_R0)) + p = appendp(p, AI64Eqz) + p = appendp(p, ANot) + p = appendp(p, AIf) + + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI64ExtendI32U) + p = appendp(p, AI64Const, constAddr(framesize+8)) + p = appendp(p, AI64Add) + p = appendp(p, AI64Load, panicargp) + + p = appendp(p, AI64Eq) + p = appendp(p, AIf) + p = appendp(p, AMOVD, regAddr(REG_SP), panicargp) + p = appendp(p, AEnd) + + p = appendp(p, AEnd) + } + + if framesize > 0 { + p := s.Func().Text + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(framesize)) + p = appendp(p, AI32Sub) + p = appendp(p, ASet, regAddr(REG_SP)) + p.Spadj = int32(framesize) + } + + // If the framesize is 0, then imply nosplit because it's a specially + // generated function. + needMoreStack := framesize > 0 && !s.Func().Text.From.Sym.NoSplit() + + // If the maymorestack debug option is enabled, insert the + // call to maymorestack *before* processing resume points so + // we can construct a resume point after maymorestack for + // morestack to resume at. + var pMorestack = s.Func().Text + if needMoreStack && ctxt.Flag_maymorestack != "" { + p := pMorestack + + // Save REGCTXT on the stack. + const tempFrame = 8 + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(tempFrame)) + p = appendp(p, AI32Sub) + p = appendp(p, ASet, regAddr(REG_SP)) + p.Spadj = tempFrame + ctxtp := obj.Addr{ + Type: obj.TYPE_MEM, + Reg: REG_SP, + Offset: 0, + } + p = appendp(p, AMOVD, regAddr(REGCTXT), ctxtp) + + // maymorestack must not itself preempt because we + // don't have full stack information, so this can be + // ACALLNORESUME. + p = appendp(p, ACALLNORESUME, constAddr(0)) + // See ../x86/obj6.go + sym := ctxt.LookupABI(ctxt.Flag_maymorestack, s.ABI()) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym} + + // Restore REGCTXT. + p = appendp(p, AMOVD, ctxtp, regAddr(REGCTXT)) + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(tempFrame)) + p = appendp(p, AI32Add) + p = appendp(p, ASet, regAddr(REG_SP)) + p.Spadj = -tempFrame + + // Add an explicit ARESUMEPOINT after maymorestack for + // morestack to resume at. + pMorestack = appendp(p, ARESUMEPOINT) + } + + // Introduce resume points for CALL instructions + // and collect other explicit resume points. + numResumePoints := 0 + explicitBlockDepth := 0 + pc := int64(0) // pc is only incremented when necessary, this avoids bloat of the BrTable instruction + var tableIdxs []uint64 + tablePC := int64(0) + base := ctxt.PosTable.Pos(s.Func().Text.Pos).Base() + for p := s.Func().Text; p != nil; p = p.Link { + prevBase := base + base = ctxt.PosTable.Pos(p.Pos).Base() + switch p.As { + case ABlock, ALoop, AIf: + explicitBlockDepth++ + + case AEnd: + if explicitBlockDepth == 0 { + panic("End without block") + } + explicitBlockDepth-- + + case ARESUMEPOINT: + if explicitBlockDepth != 0 { + panic("RESUME can only be used on toplevel") + } + p.As = AEnd + for tablePC <= pc { + tableIdxs = append(tableIdxs, uint64(numResumePoints)) + tablePC++ + } + numResumePoints++ + pc++ + + case obj.ACALL: + if explicitBlockDepth != 0 { + panic("CALL can only be used on toplevel, try CALLNORESUME instead") + } + appendp(p, ARESUMEPOINT) + } + + p.Pc = pc + + // Increase pc whenever some pc-value table needs a new entry. Don't increase it + // more often to avoid bloat of the BrTable instruction. + // The "base != prevBase" condition detects inlined instructions. They are an + // implicit call, so entering and leaving this section affects the stack trace. + if p.As == ACALLNORESUME || p.As == obj.ANOP || p.As == ANop || p.Spadj != 0 || base != prevBase { + pc++ + if p.To.Sym == sigpanic { + // The panic stack trace expects the PC at the call of sigpanic, + // not the next one. However, runtime.Caller subtracts 1 from the + // PC. To make both PC and PC-1 work (have the same line number), + // we advance the PC by 2 at sigpanic. + pc++ + } + } + } + tableIdxs = append(tableIdxs, uint64(numResumePoints)) + s.Size = pc + 1 + + if needMoreStack { + p := pMorestack + + if framesize <= abi.StackSmall { + // small stack: SP <= stackguard + // Get SP + // Get g + // I32WrapI64 + // I32Load $stackguard0 + // I32GtU + + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AGet, regAddr(REGG)) + p = appendp(p, AI32WrapI64) + p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0 + p = appendp(p, AI32LeU) + } else { + // large stack: SP-framesize <= stackguard-StackSmall + // SP <= stackguard+(framesize-StackSmall) + // Get SP + // Get g + // I32WrapI64 + // I32Load $stackguard0 + // I32Const $(framesize-StackSmall) + // I32Add + // I32GtU + + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AGet, regAddr(REGG)) + p = appendp(p, AI32WrapI64) + p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0 + p = appendp(p, AI32Const, constAddr(framesize-abi.StackSmall)) + p = appendp(p, AI32Add) + p = appendp(p, AI32LeU) + } + // TODO(neelance): handle wraparound case + + p = appendp(p, AIf) + // This CALL does *not* have a resume point after it + // (we already inserted all of the resume points). As + // a result, morestack will resume at the *previous* + // resume point (typically, the beginning of the + // function) and perform the morestack check again. + // This is why we don't need an explicit loop like + // other architectures. + p = appendp(p, obj.ACALL, constAddr(0)) + if s.Func().Text.From.Sym.NeedCtxt() { + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestack} + } else { + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestackNoCtxt} + } + p = appendp(p, AEnd) + } + + // record the branches targeting the entry loop and the unwind exit, + // their targets with be filled in later + var entryPointLoopBranches []*obj.Prog + var unwindExitBranches []*obj.Prog + currentDepth := 0 + for p := s.Func().Text; p != nil; p = p.Link { + switch p.As { + case ABlock, ALoop, AIf: + currentDepth++ + case AEnd: + currentDepth-- + } + + switch p.As { + case obj.AJMP: + jmp := *p + p.As = obj.ANOP + + if jmp.To.Type == obj.TYPE_BRANCH { + // jump to basic block + p = appendp(p, AI32Const, constAddr(jmp.To.Val.(*obj.Prog).Pc)) + p = appendp(p, ASet, regAddr(REG_PC_B)) // write next basic block to PC_B + p = appendp(p, ABr) // jump to beginning of entryPointLoop + entryPointLoopBranches = append(entryPointLoopBranches, p) + break + } + + // low-level WebAssembly call to function + switch jmp.To.Type { + case obj.TYPE_MEM: + if !notUsePC_B[jmp.To.Sym.Name] { + // Set PC_B parameter to function entry. + p = appendp(p, AI32Const, constAddr(0)) + } + p = appendp(p, ACall, jmp.To) + + case obj.TYPE_NONE: + // (target PC is on stack) + p = appendp(p, AI32WrapI64) + p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero + p = appendp(p, AI32ShrU) + + // Set PC_B parameter to function entry. + // We need to push this before pushing the target PC_F, + // so temporarily pop PC_F, using our REG_PC_B as a + // scratch register, and push it back after pushing 0. + p = appendp(p, ASet, regAddr(REG_PC_B)) + p = appendp(p, AI32Const, constAddr(0)) + p = appendp(p, AGet, regAddr(REG_PC_B)) + + p = appendp(p, ACallIndirect) + + default: + panic("bad target for JMP") + } + + p = appendp(p, AReturn) + + case obj.ACALL, ACALLNORESUME: + call := *p + p.As = obj.ANOP + + pcAfterCall := call.Link.Pc + if call.To.Sym == sigpanic { + pcAfterCall-- // sigpanic expects to be called without advancing the pc + } + + // SP -= 8 + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(8)) + p = appendp(p, AI32Sub) + p = appendp(p, ASet, regAddr(REG_SP)) + + // write return address to Go stack + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI64Const, obj.Addr{ + Type: obj.TYPE_ADDR, + Name: obj.NAME_EXTERN, + Sym: s, // PC_F + Offset: pcAfterCall, // PC_B + }) + p = appendp(p, AI64Store, constAddr(0)) + + // low-level WebAssembly call to function + switch call.To.Type { + case obj.TYPE_MEM: + if !notUsePC_B[call.To.Sym.Name] { + // Set PC_B parameter to function entry. + p = appendp(p, AI32Const, constAddr(0)) + } + p = appendp(p, ACall, call.To) + + case obj.TYPE_NONE: + // (target PC is on stack) + p = appendp(p, AI32WrapI64) + p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero + p = appendp(p, AI32ShrU) + + // Set PC_B parameter to function entry. + // We need to push this before pushing the target PC_F, + // so temporarily pop PC_F, using our PC_B as a + // scratch register, and push it back after pushing 0. + p = appendp(p, ASet, regAddr(REG_PC_B)) + p = appendp(p, AI32Const, constAddr(0)) + p = appendp(p, AGet, regAddr(REG_PC_B)) + + p = appendp(p, ACallIndirect) + + default: + panic("bad target for CALL") + } + + // return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack + if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes + // trying to unwind WebAssembly stack but call has no resume point, terminate with error + p = appendp(p, AIf) + p = appendp(p, obj.AUNDEF) + p = appendp(p, AEnd) + } else { + // unwinding WebAssembly stack to switch goroutine, return 1 + p = appendp(p, ABrIf) + unwindExitBranches = append(unwindExitBranches, p) + } + + case obj.ARET, ARETUNWIND: + ret := *p + p.As = obj.ANOP + + if framesize > 0 { + // SP += framesize + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(framesize)) + p = appendp(p, AI32Add) + p = appendp(p, ASet, regAddr(REG_SP)) + // TODO(neelance): This should theoretically set Spadj, but it only works without. + // p.Spadj = int32(-framesize) + } + + if ret.To.Type == obj.TYPE_MEM { + // Set PC_B parameter to function entry. + p = appendp(p, AI32Const, constAddr(0)) + + // low-level WebAssembly call to function + p = appendp(p, ACall, ret.To) + p = appendp(p, AReturn) + break + } + + // SP += 8 + p = appendp(p, AGet, regAddr(REG_SP)) + p = appendp(p, AI32Const, constAddr(8)) + p = appendp(p, AI32Add) + p = appendp(p, ASet, regAddr(REG_SP)) + + if ret.As == ARETUNWIND { + // function needs to unwind the WebAssembly stack, return 1 + p = appendp(p, AI32Const, constAddr(1)) + p = appendp(p, AReturn) + break + } + + // not unwinding the WebAssembly stack, return 0 + p = appendp(p, AI32Const, constAddr(0)) + p = appendp(p, AReturn) + } + } + + for p := s.Func().Text; p != nil; p = p.Link { + switch p.From.Name { + case obj.NAME_AUTO: + p.From.Offset += framesize + case obj.NAME_PARAM: + p.From.Reg = REG_SP + p.From.Offset += framesize + 8 // parameters are after the frame and the 8-byte return address + } + + switch p.To.Name { + case obj.NAME_AUTO: + p.To.Offset += framesize + case obj.NAME_PARAM: + p.To.Reg = REG_SP + p.To.Offset += framesize + 8 // parameters are after the frame and the 8-byte return address + } + + switch p.As { + case AGet: + if p.From.Type == obj.TYPE_ADDR { + get := *p + p.As = obj.ANOP + + switch get.From.Name { + case obj.NAME_EXTERN: + p = appendp(p, AI64Const, get.From) + case obj.NAME_AUTO, obj.NAME_PARAM: + p = appendp(p, AGet, regAddr(get.From.Reg)) + if get.From.Reg == REG_SP { + p = appendp(p, AI64ExtendI32U) + } + if get.From.Offset != 0 { + p = appendp(p, AI64Const, constAddr(get.From.Offset)) + p = appendp(p, AI64Add) + } + default: + panic("bad Get: invalid name") + } + } + + case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U: + if p.From.Type == obj.TYPE_MEM { + as := p.As + from := p.From + + p.As = AGet + p.From = regAddr(from.Reg) + + if from.Reg != REG_SP { + p = appendp(p, AI32WrapI64) + } + + p = appendp(p, as, constAddr(from.Offset)) + } + + case AMOVB, AMOVH, AMOVW, AMOVD: + mov := *p + p.As = obj.ANOP + + var loadAs obj.As + var storeAs obj.As + switch mov.As { + case AMOVB: + loadAs = AI64Load8U + storeAs = AI64Store8 + case AMOVH: + loadAs = AI64Load16U + storeAs = AI64Store16 + case AMOVW: + loadAs = AI64Load32U + storeAs = AI64Store32 + case AMOVD: + loadAs = AI64Load + storeAs = AI64Store + } + + appendValue := func() { + switch mov.From.Type { + case obj.TYPE_CONST: + p = appendp(p, AI64Const, constAddr(mov.From.Offset)) + + case obj.TYPE_ADDR: + switch mov.From.Name { + case obj.NAME_NONE, obj.NAME_PARAM, obj.NAME_AUTO: + p = appendp(p, AGet, regAddr(mov.From.Reg)) + if mov.From.Reg == REG_SP { + p = appendp(p, AI64ExtendI32U) + } + p = appendp(p, AI64Const, constAddr(mov.From.Offset)) + p = appendp(p, AI64Add) + case obj.NAME_EXTERN: + p = appendp(p, AI64Const, mov.From) + default: + panic("bad name for MOV") + } + + case obj.TYPE_REG: + p = appendp(p, AGet, mov.From) + if mov.From.Reg == REG_SP { + p = appendp(p, AI64ExtendI32U) + } + + case obj.TYPE_MEM: + p = appendp(p, AGet, regAddr(mov.From.Reg)) + if mov.From.Reg != REG_SP { + p = appendp(p, AI32WrapI64) + } + p = appendp(p, loadAs, constAddr(mov.From.Offset)) + + default: + panic("bad MOV type") + } + } + + switch mov.To.Type { + case obj.TYPE_REG: + appendValue() + if mov.To.Reg == REG_SP { + p = appendp(p, AI32WrapI64) + } + p = appendp(p, ASet, mov.To) + + case obj.TYPE_MEM: + switch mov.To.Name { + case obj.NAME_NONE, obj.NAME_PARAM: + p = appendp(p, AGet, regAddr(mov.To.Reg)) + if mov.To.Reg != REG_SP { + p = appendp(p, AI32WrapI64) + } + case obj.NAME_EXTERN: + p = appendp(p, AI32Const, obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: mov.To.Sym}) + default: + panic("bad MOV name") + } + appendValue() + p = appendp(p, storeAs, constAddr(mov.To.Offset)) + + default: + panic("bad MOV type") + } + } + } + + { + p := s.Func().Text + if len(unwindExitBranches) > 0 { + p = appendp(p, ABlock) // unwindExit, used to return 1 when unwinding the stack + for _, b := range unwindExitBranches { + b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p} + } + } + if len(entryPointLoopBranches) > 0 { + p = appendp(p, ALoop) // entryPointLoop, used to jump between basic blocks + for _, b := range entryPointLoopBranches { + b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p} + } + } + if numResumePoints > 0 { + // Add Block instructions for resume points and BrTable to jump to selected resume point. + for i := 0; i < numResumePoints+1; i++ { + p = appendp(p, ABlock) + } + p = appendp(p, AGet, regAddr(REG_PC_B)) // read next basic block from PC_B + p = appendp(p, ABrTable, obj.Addr{Val: tableIdxs}) + p = appendp(p, AEnd) // end of Block + } + for p.Link != nil { + p = p.Link // function instructions + } + if len(entryPointLoopBranches) > 0 { + p = appendp(p, AEnd) // end of entryPointLoop + } + p = appendp(p, obj.AUNDEF) + if len(unwindExitBranches) > 0 { + p = appendp(p, AEnd) // end of unwindExit + p = appendp(p, AI32Const, constAddr(1)) + } + } + + currentDepth = 0 + blockDepths := make(map[*obj.Prog]int) + for p := s.Func().Text; p != nil; p = p.Link { + switch p.As { + case ABlock, ALoop, AIf: + currentDepth++ + blockDepths[p] = currentDepth + case AEnd: + currentDepth-- + } + + switch p.As { + case ABr, ABrIf: + if p.To.Type == obj.TYPE_BRANCH { + blockDepth, ok := blockDepths[p.To.Val.(*obj.Prog)] + if !ok { + panic("label not at block") + } + p.To = constAddr(int64(currentDepth - blockDepth)) + } + } + } +} + +func constAddr(value int64) obj.Addr { + return obj.Addr{Type: obj.TYPE_CONST, Offset: value} +} + +func regAddr(reg int16) obj.Addr { + return obj.Addr{Type: obj.TYPE_REG, Reg: reg} +} + +// Most of the Go functions has a single parameter (PC_B) in +// Wasm ABI. This is a list of exceptions. +var notUsePC_B = map[string]bool{ + "_rt0_wasm_js": true, + "_rt0_wasm_wasip1": true, + "wasm_export_run": true, + "wasm_export_resume": true, + "wasm_export_getsp": true, + "wasm_pc_f_loop": true, + "gcWriteBarrier": true, + "runtime.gcWriteBarrier1": true, + "runtime.gcWriteBarrier2": true, + "runtime.gcWriteBarrier3": true, + "runtime.gcWriteBarrier4": true, + "runtime.gcWriteBarrier5": true, + "runtime.gcWriteBarrier6": true, + "runtime.gcWriteBarrier7": true, + "runtime.gcWriteBarrier8": true, + "runtime.wasmDiv": true, + "runtime.wasmTruncS": true, + "runtime.wasmTruncU": true, + "cmpbody": true, + "memeqbody": true, + "memcmp": true, + "memchr": true, +} + +func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { + type regVar struct { + global bool + index uint64 + } + + type varDecl struct { + count uint64 + typ valueType + } + + hasLocalSP := false + regVars := [MAXREG - MINREG]*regVar{ + REG_SP - MINREG: {true, 0}, + REG_CTXT - MINREG: {true, 1}, + REG_g - MINREG: {true, 2}, + REG_RET0 - MINREG: {true, 3}, + REG_RET1 - MINREG: {true, 4}, + REG_RET2 - MINREG: {true, 5}, + REG_RET3 - MINREG: {true, 6}, + REG_PAUSE - MINREG: {true, 7}, + } + var varDecls []*varDecl + useAssemblyRegMap := func() { + for i := int16(0); i < 16; i++ { + regVars[REG_R0+i-MINREG] = ®Var{false, uint64(i)} + } + } + + // Function starts with declaration of locals: numbers and types. + // Some functions use a special calling convention. + switch s.Name { + case "_rt0_wasm_js", "_rt0_wasm_wasip1", "wasm_export_run", "wasm_export_resume", "wasm_export_getsp", + "wasm_pc_f_loop", "runtime.wasmDiv", "runtime.wasmTruncS", "runtime.wasmTruncU", "memeqbody": + varDecls = []*varDecl{} + useAssemblyRegMap() + case "memchr", "memcmp": + varDecls = []*varDecl{{count: 2, typ: i32}} + useAssemblyRegMap() + case "cmpbody": + varDecls = []*varDecl{{count: 2, typ: i64}} + useAssemblyRegMap() + case "gcWriteBarrier": + varDecls = []*varDecl{{count: 5, typ: i64}} + useAssemblyRegMap() + case "runtime.gcWriteBarrier1", + "runtime.gcWriteBarrier2", + "runtime.gcWriteBarrier3", + "runtime.gcWriteBarrier4", + "runtime.gcWriteBarrier5", + "runtime.gcWriteBarrier6", + "runtime.gcWriteBarrier7", + "runtime.gcWriteBarrier8": + // no locals + useAssemblyRegMap() + default: + // Normal calling convention: PC_B as WebAssembly parameter. First local variable is local SP cache. + regVars[REG_PC_B-MINREG] = ®Var{false, 0} + hasLocalSP = true + + var regUsed [MAXREG - MINREG]bool + for p := s.Func().Text; p != nil; p = p.Link { + if p.From.Reg != 0 { + regUsed[p.From.Reg-MINREG] = true + } + if p.To.Reg != 0 { + regUsed[p.To.Reg-MINREG] = true + } + } + + regs := []int16{REG_SP} + for reg := int16(REG_R0); reg <= REG_F31; reg++ { + if regUsed[reg-MINREG] { + regs = append(regs, reg) + } + } + + var lastDecl *varDecl + for i, reg := range regs { + t := regType(reg) + if lastDecl == nil || lastDecl.typ != t { + lastDecl = &varDecl{ + count: 0, + typ: t, + } + varDecls = append(varDecls, lastDecl) + } + lastDecl.count++ + if reg != REG_SP { + regVars[reg-MINREG] = ®Var{false, 1 + uint64(i)} + } + } + } + + w := new(bytes.Buffer) + + writeUleb128(w, uint64(len(varDecls))) + for _, decl := range varDecls { + writeUleb128(w, decl.count) + w.WriteByte(byte(decl.typ)) + } + + if hasLocalSP { + // Copy SP from its global variable into a local variable. Accessing a local variable is more efficient. + updateLocalSP(w) + } + + for p := s.Func().Text; p != nil; p = p.Link { + switch p.As { + case AGet: + if p.From.Type != obj.TYPE_REG { + panic("bad Get: argument is not a register") + } + reg := p.From.Reg + v := regVars[reg-MINREG] + if v == nil { + panic("bad Get: invalid register") + } + if reg == REG_SP && hasLocalSP { + writeOpcode(w, ALocalGet) + writeUleb128(w, 1) // local SP + continue + } + if v.global { + writeOpcode(w, AGlobalGet) + } else { + writeOpcode(w, ALocalGet) + } + writeUleb128(w, v.index) + continue + + case ASet: + if p.To.Type != obj.TYPE_REG { + panic("bad Set: argument is not a register") + } + reg := p.To.Reg + v := regVars[reg-MINREG] + if v == nil { + panic("bad Set: invalid register") + } + if reg == REG_SP && hasLocalSP { + writeOpcode(w, ALocalTee) + writeUleb128(w, 1) // local SP + } + if v.global { + writeOpcode(w, AGlobalSet) + } else { + if p.Link.As == AGet && p.Link.From.Reg == reg { + writeOpcode(w, ALocalTee) + p = p.Link + } else { + writeOpcode(w, ALocalSet) + } + } + writeUleb128(w, v.index) + continue + + case ATee: + if p.To.Type != obj.TYPE_REG { + panic("bad Tee: argument is not a register") + } + reg := p.To.Reg + v := regVars[reg-MINREG] + if v == nil { + panic("bad Tee: invalid register") + } + writeOpcode(w, ALocalTee) + writeUleb128(w, v.index) + continue + + case ANot: + writeOpcode(w, AI32Eqz) + continue + + case obj.AUNDEF: + writeOpcode(w, AUnreachable) + continue + + case obj.ANOP, obj.ATEXT, obj.AFUNCDATA, obj.APCDATA: + // ignore + continue + } + + writeOpcode(w, p.As) + + switch p.As { + case ABlock, ALoop, AIf: + if p.From.Offset != 0 { + // block type, rarely used, e.g. for code compiled with emscripten + w.WriteByte(0x80 - byte(p.From.Offset)) + continue + } + w.WriteByte(0x40) + + case ABr, ABrIf: + if p.To.Type != obj.TYPE_CONST { + panic("bad Br/BrIf") + } + writeUleb128(w, uint64(p.To.Offset)) + + case ABrTable: + idxs := p.To.Val.([]uint64) + writeUleb128(w, uint64(len(idxs)-1)) + for _, idx := range idxs { + writeUleb128(w, idx) + } + + case ACall: + switch p.To.Type { + case obj.TYPE_CONST: + writeUleb128(w, uint64(p.To.Offset)) + + case obj.TYPE_MEM: + if p.To.Name != obj.NAME_EXTERN && p.To.Name != obj.NAME_STATIC { + fmt.Println(p.To) + panic("bad name for Call") + } + r := obj.Addrel(s) + r.Siz = 1 // actually variable sized + r.Off = int32(w.Len()) + r.Type = objabi.R_CALL + if p.Mark&WasmImport != 0 { + r.Type = objabi.R_WASMIMPORT + } + r.Sym = p.To.Sym + if hasLocalSP { + // The stack may have moved, which changes SP. Update the local SP variable. + updateLocalSP(w) + } + + default: + panic("bad type for Call") + } + + case ACallIndirect: + writeUleb128(w, uint64(p.To.Offset)) + w.WriteByte(0x00) // reserved value + if hasLocalSP { + // The stack may have moved, which changes SP. Update the local SP variable. + updateLocalSP(w) + } + + case AI32Const, AI64Const: + if p.From.Name == obj.NAME_EXTERN { + r := obj.Addrel(s) + r.Siz = 1 // actually variable sized + r.Off = int32(w.Len()) + r.Type = objabi.R_ADDR + r.Sym = p.From.Sym + r.Add = p.From.Offset + break + } + writeSleb128(w, p.From.Offset) + + case AF32Const: + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, math.Float32bits(float32(p.From.Val.(float64)))) + w.Write(b) + + case AF64Const: + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, math.Float64bits(p.From.Val.(float64))) + w.Write(b) + + case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U: + if p.From.Offset < 0 { + panic("negative offset for *Load") + } + if p.From.Type != obj.TYPE_CONST { + panic("bad type for *Load") + } + if p.From.Offset > math.MaxUint32 { + ctxt.Diag("bad offset in %v", p) + } + writeUleb128(w, align(p.As)) + writeUleb128(w, uint64(p.From.Offset)) + + case AI32Store, AI64Store, AF32Store, AF64Store, AI32Store8, AI32Store16, AI64Store8, AI64Store16, AI64Store32: + if p.To.Offset < 0 { + panic("negative offset") + } + if p.From.Offset > math.MaxUint32 { + ctxt.Diag("bad offset in %v", p) + } + writeUleb128(w, align(p.As)) + writeUleb128(w, uint64(p.To.Offset)) + + case ACurrentMemory, AGrowMemory, AMemoryFill: + w.WriteByte(0x00) + + case AMemoryCopy: + w.WriteByte(0x00) + w.WriteByte(0x00) + + } + } + + w.WriteByte(0x0b) // end + + s.P = w.Bytes() +} + +func updateLocalSP(w *bytes.Buffer) { + writeOpcode(w, AGlobalGet) + writeUleb128(w, 0) // global SP + writeOpcode(w, ALocalSet) + writeUleb128(w, 1) // local SP +} + +func writeOpcode(w *bytes.Buffer, as obj.As) { + switch { + case as < AUnreachable: + panic(fmt.Sprintf("unexpected assembler op: %s", as)) + case as < AEnd: + w.WriteByte(byte(as - AUnreachable + 0x00)) + case as < ADrop: + w.WriteByte(byte(as - AEnd + 0x0B)) + case as < ALocalGet: + w.WriteByte(byte(as - ADrop + 0x1A)) + case as < AI32Load: + w.WriteByte(byte(as - ALocalGet + 0x20)) + case as < AI32TruncSatF32S: + w.WriteByte(byte(as - AI32Load + 0x28)) + case as < ALast: + w.WriteByte(0xFC) + w.WriteByte(byte(as - AI32TruncSatF32S + 0x00)) + default: + panic(fmt.Sprintf("unexpected assembler op: %s", as)) + } +} + +type valueType byte + +const ( + i32 valueType = 0x7F + i64 valueType = 0x7E + f32 valueType = 0x7D + f64 valueType = 0x7C +) + +func regType(reg int16) valueType { + switch { + case reg == REG_SP: + return i32 + case reg >= REG_R0 && reg <= REG_R15: + return i64 + case reg >= REG_F0 && reg <= REG_F15: + return f32 + case reg >= REG_F16 && reg <= REG_F31: + return f64 + default: + panic("invalid register") + } +} + +func align(as obj.As) uint64 { + switch as { + case AI32Load8S, AI32Load8U, AI64Load8S, AI64Load8U, AI32Store8, AI64Store8: + return 0 + case AI32Load16S, AI32Load16U, AI64Load16S, AI64Load16U, AI32Store16, AI64Store16: + return 1 + case AI32Load, AF32Load, AI64Load32S, AI64Load32U, AI32Store, AF32Store, AI64Store32: + return 2 + case AI64Load, AF64Load, AI64Store, AF64Store: + return 3 + default: + panic("align: bad op") + } +} + +func writeUleb128(w io.ByteWriter, v uint64) { + if v < 128 { + w.WriteByte(uint8(v)) + return + } + more := true + for more { + c := uint8(v & 0x7f) + v >>= 7 + more = v != 0 + if more { + c |= 0x80 + } + w.WriteByte(c) + } +} + +func writeSleb128(w io.ByteWriter, v int64) { + more := true + for more { + c := uint8(v & 0x7f) + s := uint8(v & 0x40) + v >>= 7 + more = !((v == 0 && s == 0) || (v == -1 && s != 0)) + if more { + c |= 0x80 + } + w.WriteByte(c) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/a.out.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/a.out.go new file mode 100644 index 0000000000000000000000000000000000000000..b121f6df7b2f8e3113dbf8b9cb8a2bc5c466c749 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/a.out.go @@ -0,0 +1,426 @@ +// Inferno utils/6c/6.out.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/6.out.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import "cmd/internal/obj" + +const ( + REG_NONE = 0 +) + +const ( + REG_AL = obj.RBaseAMD64 + iota + REG_CL + REG_DL + REG_BL + REG_SPB + REG_BPB + REG_SIB + REG_DIB + REG_R8B + REG_R9B + REG_R10B + REG_R11B + REG_R12B + REG_R13B + REG_R14B + REG_R15B + + REG_AX + REG_CX + REG_DX + REG_BX + REG_SP + REG_BP + REG_SI + REG_DI + REG_R8 + REG_R9 + REG_R10 + REG_R11 + REG_R12 + REG_R13 + REG_R14 + REG_R15 + + REG_AH + REG_CH + REG_DH + REG_BH + + REG_F0 + REG_F1 + REG_F2 + REG_F3 + REG_F4 + REG_F5 + REG_F6 + REG_F7 + + REG_M0 + REG_M1 + REG_M2 + REG_M3 + REG_M4 + REG_M5 + REG_M6 + REG_M7 + + REG_K0 + REG_K1 + REG_K2 + REG_K3 + REG_K4 + REG_K5 + REG_K6 + REG_K7 + + REG_X0 + REG_X1 + REG_X2 + REG_X3 + REG_X4 + REG_X5 + REG_X6 + REG_X7 + REG_X8 + REG_X9 + REG_X10 + REG_X11 + REG_X12 + REG_X13 + REG_X14 + REG_X15 + REG_X16 + REG_X17 + REG_X18 + REG_X19 + REG_X20 + REG_X21 + REG_X22 + REG_X23 + REG_X24 + REG_X25 + REG_X26 + REG_X27 + REG_X28 + REG_X29 + REG_X30 + REG_X31 + + REG_Y0 + REG_Y1 + REG_Y2 + REG_Y3 + REG_Y4 + REG_Y5 + REG_Y6 + REG_Y7 + REG_Y8 + REG_Y9 + REG_Y10 + REG_Y11 + REG_Y12 + REG_Y13 + REG_Y14 + REG_Y15 + REG_Y16 + REG_Y17 + REG_Y18 + REG_Y19 + REG_Y20 + REG_Y21 + REG_Y22 + REG_Y23 + REG_Y24 + REG_Y25 + REG_Y26 + REG_Y27 + REG_Y28 + REG_Y29 + REG_Y30 + REG_Y31 + + REG_Z0 + REG_Z1 + REG_Z2 + REG_Z3 + REG_Z4 + REG_Z5 + REG_Z6 + REG_Z7 + REG_Z8 + REG_Z9 + REG_Z10 + REG_Z11 + REG_Z12 + REG_Z13 + REG_Z14 + REG_Z15 + REG_Z16 + REG_Z17 + REG_Z18 + REG_Z19 + REG_Z20 + REG_Z21 + REG_Z22 + REG_Z23 + REG_Z24 + REG_Z25 + REG_Z26 + REG_Z27 + REG_Z28 + REG_Z29 + REG_Z30 + REG_Z31 + + REG_CS + REG_SS + REG_DS + REG_ES + REG_FS + REG_GS + + REG_GDTR // global descriptor table register + REG_IDTR // interrupt descriptor table register + REG_LDTR // local descriptor table register + REG_MSW // machine status word + REG_TASK // task register + + REG_CR0 + REG_CR1 + REG_CR2 + REG_CR3 + REG_CR4 + REG_CR5 + REG_CR6 + REG_CR7 + REG_CR8 + REG_CR9 + REG_CR10 + REG_CR11 + REG_CR12 + REG_CR13 + REG_CR14 + REG_CR15 + + REG_DR0 + REG_DR1 + REG_DR2 + REG_DR3 + REG_DR4 + REG_DR5 + REG_DR6 + REG_DR7 + + REG_TR0 + REG_TR1 + REG_TR2 + REG_TR3 + REG_TR4 + REG_TR5 + REG_TR6 + REG_TR7 + + REG_TLS + + MAXREG + + REG_CR = REG_CR0 + REG_DR = REG_DR0 + REG_TR = REG_TR0 + + REGARG = -1 + REGRET = REG_AX + FREGRET = REG_X0 + REGSP = REG_SP + REGCTXT = REG_DX + REGENTRYTMP0 = REG_R12 // scratch register available at function entry in ABIInternal + REGENTRYTMP1 = REG_R13 // scratch register available at function entry in ABIInternal + REGG = REG_R14 // g register in ABIInternal + REGEXT = REG_R15 // compiler allocates external registers R15 down + FREGMIN = REG_X0 + 5 // first register variable + FREGEXT = REG_X0 + 15 // first external register + T_TYPE = 1 << 0 + T_INDEX = 1 << 1 + T_OFFSET = 1 << 2 + T_FCONST = 1 << 3 + T_SYM = 1 << 4 + T_SCONST = 1 << 5 + T_64 = 1 << 6 + T_GOTYPE = 1 << 7 +) + +// https://www.uclibc.org/docs/psABI-x86_64.pdf, figure 3.36 +var AMD64DWARFRegisters = map[int16]int16{ + REG_AX: 0, + REG_DX: 1, + REG_CX: 2, + REG_BX: 3, + REG_SI: 4, + REG_DI: 5, + REG_BP: 6, + REG_SP: 7, + REG_R8: 8, + REG_R9: 9, + REG_R10: 10, + REG_R11: 11, + REG_R12: 12, + REG_R13: 13, + REG_R14: 14, + REG_R15: 15, + // 16 is "Return Address RA", whatever that is. + // 17-24 vector registers (X/Y/Z). + REG_X0: 17, + REG_X1: 18, + REG_X2: 19, + REG_X3: 20, + REG_X4: 21, + REG_X5: 22, + REG_X6: 23, + REG_X7: 24, + // 25-32 extended vector registers (X/Y/Z). + REG_X8: 25, + REG_X9: 26, + REG_X10: 27, + REG_X11: 28, + REG_X12: 29, + REG_X13: 30, + REG_X14: 31, + REG_X15: 32, + // ST registers. %stN => FN. + REG_F0: 33, + REG_F1: 34, + REG_F2: 35, + REG_F3: 36, + REG_F4: 37, + REG_F5: 38, + REG_F6: 39, + REG_F7: 40, + // MMX registers. %mmN => MN. + REG_M0: 41, + REG_M1: 42, + REG_M2: 43, + REG_M3: 44, + REG_M4: 45, + REG_M5: 46, + REG_M6: 47, + REG_M7: 48, + // 48 is flags, which doesn't have a name. + REG_ES: 50, + REG_CS: 51, + REG_SS: 52, + REG_DS: 53, + REG_FS: 54, + REG_GS: 55, + // 58 and 59 are {fs,gs}base, which don't have names. + REG_TR: 62, + REG_LDTR: 63, + // 64-66 are mxcsr, fcw, fsw, which don't have names. + + // 67-82 upper vector registers (X/Y/Z). + REG_X16: 67, + REG_X17: 68, + REG_X18: 69, + REG_X19: 70, + REG_X20: 71, + REG_X21: 72, + REG_X22: 73, + REG_X23: 74, + REG_X24: 75, + REG_X25: 76, + REG_X26: 77, + REG_X27: 78, + REG_X28: 79, + REG_X29: 80, + REG_X30: 81, + REG_X31: 82, + + // 118-125 vector mask registers. %kN => KN. + REG_K0: 118, + REG_K1: 119, + REG_K2: 120, + REG_K3: 121, + REG_K4: 122, + REG_K5: 123, + REG_K6: 124, + REG_K7: 125, +} + +// https://www.uclibc.org/docs/psABI-i386.pdf, table 2.14 +var X86DWARFRegisters = map[int16]int16{ + REG_AX: 0, + REG_CX: 1, + REG_DX: 2, + REG_BX: 3, + REG_SP: 4, + REG_BP: 5, + REG_SI: 6, + REG_DI: 7, + // 8 is "Return Address RA", whatever that is. + // 9 is flags, which doesn't have a name. + // ST registers. %stN => FN. + REG_F0: 11, + REG_F1: 12, + REG_F2: 13, + REG_F3: 14, + REG_F4: 15, + REG_F5: 16, + REG_F6: 17, + REG_F7: 18, + // XMM registers. %xmmN => XN. + REG_X0: 21, + REG_X1: 22, + REG_X2: 23, + REG_X3: 24, + REG_X4: 25, + REG_X5: 26, + REG_X6: 27, + REG_X7: 28, + // MMX registers. %mmN => MN. + REG_M0: 29, + REG_M1: 30, + REG_M2: 31, + REG_M3: 32, + REG_M4: 33, + REG_M5: 34, + REG_M6: 35, + REG_M7: 36, + // 39 is mxcsr, which doesn't have a name. + REG_ES: 40, + REG_CS: 41, + REG_SS: 42, + REG_DS: 43, + REG_FS: 44, + REG_GS: 45, + REG_TR: 48, + REG_LDTR: 49, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/aenum.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/aenum.go new file mode 100644 index 0000000000000000000000000000000000000000..79cdd241a236a3986fb831e5ea433a3b66a459bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/aenum.go @@ -0,0 +1,1610 @@ +// Code generated by x86avxgen. DO NOT EDIT. + +package x86 + +import "cmd/internal/obj" + +//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p x86 + +const ( + AAAA = obj.ABaseAMD64 + obj.A_ARCHSPECIFIC + iota + AAAD + AAAM + AAAS + AADCB + AADCL + AADCQ + AADCW + AADCXL + AADCXQ + AADDB + AADDL + AADDPD + AADDPS + AADDQ + AADDSD + AADDSS + AADDSUBPD + AADDSUBPS + AADDW + AADJSP + AADOXL + AADOXQ + AAESDEC + AAESDECLAST + AAESENC + AAESENCLAST + AAESIMC + AAESKEYGENASSIST + AANDB + AANDL + AANDNL + AANDNPD + AANDNPS + AANDNQ + AANDPD + AANDPS + AANDQ + AANDW + AARPL + ABEXTRL + ABEXTRQ + ABLENDPD + ABLENDPS + ABLENDVPD + ABLENDVPS + ABLSIL + ABLSIQ + ABLSMSKL + ABLSMSKQ + ABLSRL + ABLSRQ + ABOUNDL + ABOUNDW + ABSFL + ABSFQ + ABSFW + ABSRL + ABSRQ + ABSRW + ABSWAPL + ABSWAPQ + ABTCL + ABTCQ + ABTCW + ABTL + ABTQ + ABTRL + ABTRQ + ABTRW + ABTSL + ABTSQ + ABTSW + ABTW + ABYTE + ABZHIL + ABZHIQ + ACBW + ACDQ + ACDQE + ACLAC + ACLC + ACLD + ACLDEMOTE + ACLFLUSH + ACLFLUSHOPT + ACLI + ACLTS + ACLWB + ACMC + ACMOVLCC + ACMOVLCS + ACMOVLEQ + ACMOVLGE + ACMOVLGT + ACMOVLHI + ACMOVLLE + ACMOVLLS + ACMOVLLT + ACMOVLMI + ACMOVLNE + ACMOVLOC + ACMOVLOS + ACMOVLPC + ACMOVLPL + ACMOVLPS + ACMOVQCC + ACMOVQCS + ACMOVQEQ + ACMOVQGE + ACMOVQGT + ACMOVQHI + ACMOVQLE + ACMOVQLS + ACMOVQLT + ACMOVQMI + ACMOVQNE + ACMOVQOC + ACMOVQOS + ACMOVQPC + ACMOVQPL + ACMOVQPS + ACMOVWCC + ACMOVWCS + ACMOVWEQ + ACMOVWGE + ACMOVWGT + ACMOVWHI + ACMOVWLE + ACMOVWLS + ACMOVWLT + ACMOVWMI + ACMOVWNE + ACMOVWOC + ACMOVWOS + ACMOVWPC + ACMOVWPL + ACMOVWPS + ACMPB + ACMPL + ACMPPD + ACMPPS + ACMPQ + ACMPSB + ACMPSD + ACMPSL + ACMPSQ + ACMPSS + ACMPSW + ACMPW + ACMPXCHG16B + ACMPXCHG8B + ACMPXCHGB + ACMPXCHGL + ACMPXCHGQ + ACMPXCHGW + ACOMISD + ACOMISS + ACPUID + ACQO + ACRC32B + ACRC32L + ACRC32Q + ACRC32W + ACVTPD2PL + ACVTPD2PS + ACVTPL2PD + ACVTPL2PS + ACVTPS2PD + ACVTPS2PL + ACVTSD2SL + ACVTSD2SQ + ACVTSD2SS + ACVTSL2SD + ACVTSL2SS + ACVTSQ2SD + ACVTSQ2SS + ACVTSS2SD + ACVTSS2SL + ACVTSS2SQ + ACVTTPD2PL + ACVTTPS2PL + ACVTTSD2SL + ACVTTSD2SQ + ACVTTSS2SL + ACVTTSS2SQ + ACWD + ACWDE + ADAA + ADAS + ADECB + ADECL + ADECQ + ADECW + ADIVB + ADIVL + ADIVPD + ADIVPS + ADIVQ + ADIVSD + ADIVSS + ADIVW + ADPPD + ADPPS + AEMMS + AENTER + AEXTRACTPS + AF2XM1 + AFABS + AFADDD + AFADDDP + AFADDF + AFADDL + AFADDW + AFBLD + AFBSTP + AFCHS + AFCLEX + AFCMOVB + AFCMOVBE + AFCMOVCC + AFCMOVCS + AFCMOVE + AFCMOVEQ + AFCMOVHI + AFCMOVLS + AFCMOVNB + AFCMOVNBE + AFCMOVNE + AFCMOVNU + AFCMOVU + AFCMOVUN + AFCOMD + AFCOMDP + AFCOMDPP + AFCOMF + AFCOMFP + AFCOMI + AFCOMIP + AFCOML + AFCOMLP + AFCOMW + AFCOMWP + AFCOS + AFDECSTP + AFDIVD + AFDIVDP + AFDIVF + AFDIVL + AFDIVRD + AFDIVRDP + AFDIVRF + AFDIVRL + AFDIVRW + AFDIVW + AFFREE + AFINCSTP + AFINIT + AFLD1 + AFLDCW + AFLDENV + AFLDL2E + AFLDL2T + AFLDLG2 + AFLDLN2 + AFLDPI + AFLDZ + AFMOVB + AFMOVBP + AFMOVD + AFMOVDP + AFMOVF + AFMOVFP + AFMOVL + AFMOVLP + AFMOVV + AFMOVVP + AFMOVW + AFMOVWP + AFMOVX + AFMOVXP + AFMULD + AFMULDP + AFMULF + AFMULL + AFMULW + AFNOP + AFPATAN + AFPREM + AFPREM1 + AFPTAN + AFRNDINT + AFRSTOR + AFSAVE + AFSCALE + AFSIN + AFSINCOS + AFSQRT + AFSTCW + AFSTENV + AFSTSW + AFSUBD + AFSUBDP + AFSUBF + AFSUBL + AFSUBRD + AFSUBRDP + AFSUBRF + AFSUBRL + AFSUBRW + AFSUBW + AFTST + AFUCOM + AFUCOMI + AFUCOMIP + AFUCOMP + AFUCOMPP + AFXAM + AFXCHD + AFXRSTOR + AFXRSTOR64 + AFXSAVE + AFXSAVE64 + AFXTRACT + AFYL2X + AFYL2XP1 + AHADDPD + AHADDPS + AHLT + AHSUBPD + AHSUBPS + AICEBP + AIDIVB + AIDIVL + AIDIVQ + AIDIVW + AIMUL3L + AIMUL3Q + AIMUL3W + AIMULB + AIMULL + AIMULQ + AIMULW + AINB + AINCB + AINCL + AINCQ + AINCW + AINL + AINSB + AINSERTPS + AINSL + AINSW + AINT + AINTO + AINVD + AINVLPG + AINVPCID + AINW + AIRETL + AIRETQ + AIRETW + AJCC // >= unsigned + AJCS // < unsigned + AJCXZL + AJCXZQ + AJCXZW + AJEQ // == (zero) + AJGE // >= signed + AJGT // > signed + AJHI // > unsigned + AJLE // <= signed + AJLS // <= unsigned + AJLT // < signed + AJMI // sign bit set (negative) + AJNE // != (nonzero) + AJOC // overflow clear + AJOS // overflow set + AJPC // parity clear + AJPL // sign bit clear (positive) + AJPS // parity set + AKADDB + AKADDD + AKADDQ + AKADDW + AKANDB + AKANDD + AKANDNB + AKANDND + AKANDNQ + AKANDNW + AKANDQ + AKANDW + AKMOVB + AKMOVD + AKMOVQ + AKMOVW + AKNOTB + AKNOTD + AKNOTQ + AKNOTW + AKORB + AKORD + AKORQ + AKORTESTB + AKORTESTD + AKORTESTQ + AKORTESTW + AKORW + AKSHIFTLB + AKSHIFTLD + AKSHIFTLQ + AKSHIFTLW + AKSHIFTRB + AKSHIFTRD + AKSHIFTRQ + AKSHIFTRW + AKTESTB + AKTESTD + AKTESTQ + AKTESTW + AKUNPCKBW + AKUNPCKDQ + AKUNPCKWD + AKXNORB + AKXNORD + AKXNORQ + AKXNORW + AKXORB + AKXORD + AKXORQ + AKXORW + ALAHF + ALARL + ALARQ + ALARW + ALDDQU + ALDMXCSR + ALEAL + ALEAQ + ALEAVEL + ALEAVEQ + ALEAVEW + ALEAW + ALFENCE + ALFSL + ALFSQ + ALFSW + ALGDT + ALGSL + ALGSQ + ALGSW + ALIDT + ALLDT + ALMSW + ALOCK + ALODSB + ALODSL + ALODSQ + ALODSW + ALONG + ALOOP + ALOOPEQ + ALOOPNE + ALSLL + ALSLQ + ALSLW + ALSSL + ALSSQ + ALSSW + ALTR + ALZCNTL + ALZCNTQ + ALZCNTW + AMASKMOVOU + AMASKMOVQ + AMAXPD + AMAXPS + AMAXSD + AMAXSS + AMFENCE + AMINPD + AMINPS + AMINSD + AMINSS + AMONITOR + AMOVAPD + AMOVAPS + AMOVB + AMOVBEL + AMOVBEQ + AMOVBEW + AMOVBLSX + AMOVBLZX + AMOVBQSX + AMOVBQZX + AMOVBWSX + AMOVBWZX + AMOVDDUP + AMOVHLPS + AMOVHPD + AMOVHPS + AMOVL + AMOVLHPS + AMOVLPD + AMOVLPS + AMOVLQSX + AMOVLQZX + AMOVMSKPD + AMOVMSKPS + AMOVNTDQA + AMOVNTIL + AMOVNTIQ + AMOVNTO + AMOVNTPD + AMOVNTPS + AMOVNTQ + AMOVO + AMOVOU + AMOVQ + AMOVQL + AMOVQOZX + AMOVSB + AMOVSD + AMOVSHDUP + AMOVSL + AMOVSLDUP + AMOVSQ + AMOVSS + AMOVSW + AMOVSWW + AMOVUPD + AMOVUPS + AMOVW + AMOVWLSX + AMOVWLZX + AMOVWQSX + AMOVWQZX + AMOVZWW + AMPSADBW + AMULB + AMULL + AMULPD + AMULPS + AMULQ + AMULSD + AMULSS + AMULW + AMULXL + AMULXQ + AMWAIT + ANEGB + ANEGL + ANEGQ + ANEGW + ANOPL + ANOPW + ANOTB + ANOTL + ANOTQ + ANOTW + AORB + AORL + AORPD + AORPS + AORQ + AORW + AOUTB + AOUTL + AOUTSB + AOUTSL + AOUTSW + AOUTW + APABSB + APABSD + APABSW + APACKSSLW + APACKSSWB + APACKUSDW + APACKUSWB + APADDB + APADDL + APADDQ + APADDSB + APADDSW + APADDUSB + APADDUSW + APADDW + APALIGNR + APAND + APANDN + APAUSE + APAVGB + APAVGW + APBLENDVB + APBLENDW + APCLMULQDQ + APCMPEQB + APCMPEQL + APCMPEQQ + APCMPEQW + APCMPESTRI + APCMPESTRM + APCMPGTB + APCMPGTL + APCMPGTQ + APCMPGTW + APCMPISTRI + APCMPISTRM + APDEPL + APDEPQ + APEXTL + APEXTQ + APEXTRB + APEXTRD + APEXTRQ + APEXTRW + APHADDD + APHADDSW + APHADDW + APHMINPOSUW + APHSUBD + APHSUBSW + APHSUBW + APINSRB + APINSRD + APINSRQ + APINSRW + APMADDUBSW + APMADDWL + APMAXSB + APMAXSD + APMAXSW + APMAXUB + APMAXUD + APMAXUW + APMINSB + APMINSD + APMINSW + APMINUB + APMINUD + APMINUW + APMOVMSKB + APMOVSXBD + APMOVSXBQ + APMOVSXBW + APMOVSXDQ + APMOVSXWD + APMOVSXWQ + APMOVZXBD + APMOVZXBQ + APMOVZXBW + APMOVZXDQ + APMOVZXWD + APMOVZXWQ + APMULDQ + APMULHRSW + APMULHUW + APMULHW + APMULLD + APMULLW + APMULULQ + APOPAL + APOPAW + APOPCNTL + APOPCNTQ + APOPCNTW + APOPFL + APOPFQ + APOPFW + APOPL + APOPQ + APOPW + APOR + APREFETCHNTA + APREFETCHT0 + APREFETCHT1 + APREFETCHT2 + APSADBW + APSHUFB + APSHUFD + APSHUFHW + APSHUFL + APSHUFLW + APSHUFW + APSIGNB + APSIGND + APSIGNW + APSLLL + APSLLO + APSLLQ + APSLLW + APSRAL + APSRAW + APSRLL + APSRLO + APSRLQ + APSRLW + APSUBB + APSUBL + APSUBQ + APSUBSB + APSUBSW + APSUBUSB + APSUBUSW + APSUBW + APTEST + APUNPCKHBW + APUNPCKHLQ + APUNPCKHQDQ + APUNPCKHWL + APUNPCKLBW + APUNPCKLLQ + APUNPCKLQDQ + APUNPCKLWL + APUSHAL + APUSHAW + APUSHFL + APUSHFQ + APUSHFW + APUSHL + APUSHQ + APUSHW + APXOR + AQUAD + ARCLB + ARCLL + ARCLQ + ARCLW + ARCPPS + ARCPSS + ARCRB + ARCRL + ARCRQ + ARCRW + ARDFSBASEL + ARDFSBASEQ + ARDGSBASEL + ARDGSBASEQ + ARDMSR + ARDPID + ARDPKRU + ARDPMC + ARDRANDL + ARDRANDQ + ARDRANDW + ARDSEEDL + ARDSEEDQ + ARDSEEDW + ARDTSC + ARDTSCP + AREP + AREPN + ARETFL + ARETFQ + ARETFW + AROLB + AROLL + AROLQ + AROLW + ARORB + ARORL + ARORQ + ARORW + ARORXL + ARORXQ + AROUNDPD + AROUNDPS + AROUNDSD + AROUNDSS + ARSM + ARSQRTPS + ARSQRTSS + ASAHF + ASALB + ASALL + ASALQ + ASALW + ASARB + ASARL + ASARQ + ASARW + ASARXL + ASARXQ + ASBBB + ASBBL + ASBBQ + ASBBW + ASCASB + ASCASL + ASCASQ + ASCASW + ASETCC + ASETCS + ASETEQ + ASETGE + ASETGT + ASETHI + ASETLE + ASETLS + ASETLT + ASETMI + ASETNE + ASETOC + ASETOS + ASETPC + ASETPL + ASETPS + ASFENCE + ASGDT + ASHA1MSG1 + ASHA1MSG2 + ASHA1NEXTE + ASHA1RNDS4 + ASHA256MSG1 + ASHA256MSG2 + ASHA256RNDS2 + ASHLB + ASHLL + ASHLQ + ASHLW + ASHLXL + ASHLXQ + ASHRB + ASHRL + ASHRQ + ASHRW + ASHRXL + ASHRXQ + ASHUFPD + ASHUFPS + ASIDT + ASLDTL + ASLDTQ + ASLDTW + ASMSWL + ASMSWQ + ASMSWW + ASQRTPD + ASQRTPS + ASQRTSD + ASQRTSS + ASTAC + ASTC + ASTD + ASTI + ASTMXCSR + ASTOSB + ASTOSL + ASTOSQ + ASTOSW + ASTRL + ASTRQ + ASTRW + ASUBB + ASUBL + ASUBPD + ASUBPS + ASUBQ + ASUBSD + ASUBSS + ASUBW + ASWAPGS + ASYSCALL + ASYSENTER + ASYSENTER64 + ASYSEXIT + ASYSEXIT64 + ASYSRET + ATESTB + ATESTL + ATESTQ + ATESTW + ATPAUSE + ATZCNTL + ATZCNTQ + ATZCNTW + AUCOMISD + AUCOMISS + AUD1 + AUD2 + AUMWAIT + AUNPCKHPD + AUNPCKHPS + AUNPCKLPD + AUNPCKLPS + AUMONITOR + AV4FMADDPS + AV4FMADDSS + AV4FNMADDPS + AV4FNMADDSS + AVADDPD + AVADDPS + AVADDSD + AVADDSS + AVADDSUBPD + AVADDSUBPS + AVAESDEC + AVAESDECLAST + AVAESENC + AVAESENCLAST + AVAESIMC + AVAESKEYGENASSIST + AVALIGND + AVALIGNQ + AVANDNPD + AVANDNPS + AVANDPD + AVANDPS + AVBLENDMPD + AVBLENDMPS + AVBLENDPD + AVBLENDPS + AVBLENDVPD + AVBLENDVPS + AVBROADCASTF128 + AVBROADCASTF32X2 + AVBROADCASTF32X4 + AVBROADCASTF32X8 + AVBROADCASTF64X2 + AVBROADCASTF64X4 + AVBROADCASTI128 + AVBROADCASTI32X2 + AVBROADCASTI32X4 + AVBROADCASTI32X8 + AVBROADCASTI64X2 + AVBROADCASTI64X4 + AVBROADCASTSD + AVBROADCASTSS + AVCMPPD + AVCMPPS + AVCMPSD + AVCMPSS + AVCOMISD + AVCOMISS + AVCOMPRESSPD + AVCOMPRESSPS + AVCVTDQ2PD + AVCVTDQ2PS + AVCVTPD2DQ + AVCVTPD2DQX + AVCVTPD2DQY + AVCVTPD2PS + AVCVTPD2PSX + AVCVTPD2PSY + AVCVTPD2QQ + AVCVTPD2UDQ + AVCVTPD2UDQX + AVCVTPD2UDQY + AVCVTPD2UQQ + AVCVTPH2PS + AVCVTPS2DQ + AVCVTPS2PD + AVCVTPS2PH + AVCVTPS2QQ + AVCVTPS2UDQ + AVCVTPS2UQQ + AVCVTQQ2PD + AVCVTQQ2PS + AVCVTQQ2PSX + AVCVTQQ2PSY + AVCVTSD2SI + AVCVTSD2SIQ + AVCVTSD2SS + AVCVTSD2USI + AVCVTSD2USIL + AVCVTSD2USIQ + AVCVTSI2SDL + AVCVTSI2SDQ + AVCVTSI2SSL + AVCVTSI2SSQ + AVCVTSS2SD + AVCVTSS2SI + AVCVTSS2SIQ + AVCVTSS2USI + AVCVTSS2USIL + AVCVTSS2USIQ + AVCVTTPD2DQ + AVCVTTPD2DQX + AVCVTTPD2DQY + AVCVTTPD2QQ + AVCVTTPD2UDQ + AVCVTTPD2UDQX + AVCVTTPD2UDQY + AVCVTTPD2UQQ + AVCVTTPS2DQ + AVCVTTPS2QQ + AVCVTTPS2UDQ + AVCVTTPS2UQQ + AVCVTTSD2SI + AVCVTTSD2SIQ + AVCVTTSD2USI + AVCVTTSD2USIL + AVCVTTSD2USIQ + AVCVTTSS2SI + AVCVTTSS2SIQ + AVCVTTSS2USI + AVCVTTSS2USIL + AVCVTTSS2USIQ + AVCVTUDQ2PD + AVCVTUDQ2PS + AVCVTUQQ2PD + AVCVTUQQ2PS + AVCVTUQQ2PSX + AVCVTUQQ2PSY + AVCVTUSI2SD + AVCVTUSI2SDL + AVCVTUSI2SDQ + AVCVTUSI2SS + AVCVTUSI2SSL + AVCVTUSI2SSQ + AVDBPSADBW + AVDIVPD + AVDIVPS + AVDIVSD + AVDIVSS + AVDPPD + AVDPPS + AVERR + AVERW + AVEXP2PD + AVEXP2PS + AVEXPANDPD + AVEXPANDPS + AVEXTRACTF128 + AVEXTRACTF32X4 + AVEXTRACTF32X8 + AVEXTRACTF64X2 + AVEXTRACTF64X4 + AVEXTRACTI128 + AVEXTRACTI32X4 + AVEXTRACTI32X8 + AVEXTRACTI64X2 + AVEXTRACTI64X4 + AVEXTRACTPS + AVFIXUPIMMPD + AVFIXUPIMMPS + AVFIXUPIMMSD + AVFIXUPIMMSS + AVFMADD132PD + AVFMADD132PS + AVFMADD132SD + AVFMADD132SS + AVFMADD213PD + AVFMADD213PS + AVFMADD213SD + AVFMADD213SS + AVFMADD231PD + AVFMADD231PS + AVFMADD231SD + AVFMADD231SS + AVFMADDSUB132PD + AVFMADDSUB132PS + AVFMADDSUB213PD + AVFMADDSUB213PS + AVFMADDSUB231PD + AVFMADDSUB231PS + AVFMSUB132PD + AVFMSUB132PS + AVFMSUB132SD + AVFMSUB132SS + AVFMSUB213PD + AVFMSUB213PS + AVFMSUB213SD + AVFMSUB213SS + AVFMSUB231PD + AVFMSUB231PS + AVFMSUB231SD + AVFMSUB231SS + AVFMSUBADD132PD + AVFMSUBADD132PS + AVFMSUBADD213PD + AVFMSUBADD213PS + AVFMSUBADD231PD + AVFMSUBADD231PS + AVFNMADD132PD + AVFNMADD132PS + AVFNMADD132SD + AVFNMADD132SS + AVFNMADD213PD + AVFNMADD213PS + AVFNMADD213SD + AVFNMADD213SS + AVFNMADD231PD + AVFNMADD231PS + AVFNMADD231SD + AVFNMADD231SS + AVFNMSUB132PD + AVFNMSUB132PS + AVFNMSUB132SD + AVFNMSUB132SS + AVFNMSUB213PD + AVFNMSUB213PS + AVFNMSUB213SD + AVFNMSUB213SS + AVFNMSUB231PD + AVFNMSUB231PS + AVFNMSUB231SD + AVFNMSUB231SS + AVFPCLASSPD + AVFPCLASSPDX + AVFPCLASSPDY + AVFPCLASSPDZ + AVFPCLASSPS + AVFPCLASSPSX + AVFPCLASSPSY + AVFPCLASSPSZ + AVFPCLASSSD + AVFPCLASSSS + AVGATHERDPD + AVGATHERDPS + AVGATHERPF0DPD + AVGATHERPF0DPS + AVGATHERPF0QPD + AVGATHERPF0QPS + AVGATHERPF1DPD + AVGATHERPF1DPS + AVGATHERPF1QPD + AVGATHERPF1QPS + AVGATHERQPD + AVGATHERQPS + AVGETEXPPD + AVGETEXPPS + AVGETEXPSD + AVGETEXPSS + AVGETMANTPD + AVGETMANTPS + AVGETMANTSD + AVGETMANTSS + AVGF2P8AFFINEINVQB + AVGF2P8AFFINEQB + AVGF2P8MULB + AVHADDPD + AVHADDPS + AVHSUBPD + AVHSUBPS + AVINSERTF128 + AVINSERTF32X4 + AVINSERTF32X8 + AVINSERTF64X2 + AVINSERTF64X4 + AVINSERTI128 + AVINSERTI32X4 + AVINSERTI32X8 + AVINSERTI64X2 + AVINSERTI64X4 + AVINSERTPS + AVLDDQU + AVLDMXCSR + AVMASKMOVDQU + AVMASKMOVPD + AVMASKMOVPS + AVMAXPD + AVMAXPS + AVMAXSD + AVMAXSS + AVMINPD + AVMINPS + AVMINSD + AVMINSS + AVMOVAPD + AVMOVAPS + AVMOVD + AVMOVDDUP + AVMOVDQA + AVMOVDQA32 + AVMOVDQA64 + AVMOVDQU + AVMOVDQU16 + AVMOVDQU32 + AVMOVDQU64 + AVMOVDQU8 + AVMOVHLPS + AVMOVHPD + AVMOVHPS + AVMOVLHPS + AVMOVLPD + AVMOVLPS + AVMOVMSKPD + AVMOVMSKPS + AVMOVNTDQ + AVMOVNTDQA + AVMOVNTPD + AVMOVNTPS + AVMOVQ + AVMOVSD + AVMOVSHDUP + AVMOVSLDUP + AVMOVSS + AVMOVUPD + AVMOVUPS + AVMPSADBW + AVMULPD + AVMULPS + AVMULSD + AVMULSS + AVORPD + AVORPS + AVP4DPWSSD + AVP4DPWSSDS + AVPABSB + AVPABSD + AVPABSQ + AVPABSW + AVPACKSSDW + AVPACKSSWB + AVPACKUSDW + AVPACKUSWB + AVPADDB + AVPADDD + AVPADDQ + AVPADDSB + AVPADDSW + AVPADDUSB + AVPADDUSW + AVPADDW + AVPALIGNR + AVPAND + AVPANDD + AVPANDN + AVPANDND + AVPANDNQ + AVPANDQ + AVPAVGB + AVPAVGW + AVPBLENDD + AVPBLENDMB + AVPBLENDMD + AVPBLENDMQ + AVPBLENDMW + AVPBLENDVB + AVPBLENDW + AVPBROADCASTB + AVPBROADCASTD + AVPBROADCASTMB2Q + AVPBROADCASTMW2D + AVPBROADCASTQ + AVPBROADCASTW + AVPCLMULQDQ + AVPCMPB + AVPCMPD + AVPCMPEQB + AVPCMPEQD + AVPCMPEQQ + AVPCMPEQW + AVPCMPESTRI + AVPCMPESTRM + AVPCMPGTB + AVPCMPGTD + AVPCMPGTQ + AVPCMPGTW + AVPCMPISTRI + AVPCMPISTRM + AVPCMPQ + AVPCMPUB + AVPCMPUD + AVPCMPUQ + AVPCMPUW + AVPCMPW + AVPCOMPRESSB + AVPCOMPRESSD + AVPCOMPRESSQ + AVPCOMPRESSW + AVPCONFLICTD + AVPCONFLICTQ + AVPDPBUSD + AVPDPBUSDS + AVPDPWSSD + AVPDPWSSDS + AVPERM2F128 + AVPERM2I128 + AVPERMB + AVPERMD + AVPERMI2B + AVPERMI2D + AVPERMI2PD + AVPERMI2PS + AVPERMI2Q + AVPERMI2W + AVPERMILPD + AVPERMILPS + AVPERMPD + AVPERMPS + AVPERMQ + AVPERMT2B + AVPERMT2D + AVPERMT2PD + AVPERMT2PS + AVPERMT2Q + AVPERMT2W + AVPERMW + AVPEXPANDB + AVPEXPANDD + AVPEXPANDQ + AVPEXPANDW + AVPEXTRB + AVPEXTRD + AVPEXTRQ + AVPEXTRW + AVPGATHERDD + AVPGATHERDQ + AVPGATHERQD + AVPGATHERQQ + AVPHADDD + AVPHADDSW + AVPHADDW + AVPHMINPOSUW + AVPHSUBD + AVPHSUBSW + AVPHSUBW + AVPINSRB + AVPINSRD + AVPINSRQ + AVPINSRW + AVPLZCNTD + AVPLZCNTQ + AVPMADD52HUQ + AVPMADD52LUQ + AVPMADDUBSW + AVPMADDWD + AVPMASKMOVD + AVPMASKMOVQ + AVPMAXSB + AVPMAXSD + AVPMAXSQ + AVPMAXSW + AVPMAXUB + AVPMAXUD + AVPMAXUQ + AVPMAXUW + AVPMINSB + AVPMINSD + AVPMINSQ + AVPMINSW + AVPMINUB + AVPMINUD + AVPMINUQ + AVPMINUW + AVPMOVB2M + AVPMOVD2M + AVPMOVDB + AVPMOVDW + AVPMOVM2B + AVPMOVM2D + AVPMOVM2Q + AVPMOVM2W + AVPMOVMSKB + AVPMOVQ2M + AVPMOVQB + AVPMOVQD + AVPMOVQW + AVPMOVSDB + AVPMOVSDW + AVPMOVSQB + AVPMOVSQD + AVPMOVSQW + AVPMOVSWB + AVPMOVSXBD + AVPMOVSXBQ + AVPMOVSXBW + AVPMOVSXDQ + AVPMOVSXWD + AVPMOVSXWQ + AVPMOVUSDB + AVPMOVUSDW + AVPMOVUSQB + AVPMOVUSQD + AVPMOVUSQW + AVPMOVUSWB + AVPMOVW2M + AVPMOVWB + AVPMOVZXBD + AVPMOVZXBQ + AVPMOVZXBW + AVPMOVZXDQ + AVPMOVZXWD + AVPMOVZXWQ + AVPMULDQ + AVPMULHRSW + AVPMULHUW + AVPMULHW + AVPMULLD + AVPMULLQ + AVPMULLW + AVPMULTISHIFTQB + AVPMULUDQ + AVPOPCNTB + AVPOPCNTD + AVPOPCNTQ + AVPOPCNTW + AVPOR + AVPORD + AVPORQ + AVPROLD + AVPROLQ + AVPROLVD + AVPROLVQ + AVPRORD + AVPRORQ + AVPRORVD + AVPRORVQ + AVPSADBW + AVPSCATTERDD + AVPSCATTERDQ + AVPSCATTERQD + AVPSCATTERQQ + AVPSHLDD + AVPSHLDQ + AVPSHLDVD + AVPSHLDVQ + AVPSHLDVW + AVPSHLDW + AVPSHRDD + AVPSHRDQ + AVPSHRDVD + AVPSHRDVQ + AVPSHRDVW + AVPSHRDW + AVPSHUFB + AVPSHUFBITQMB + AVPSHUFD + AVPSHUFHW + AVPSHUFLW + AVPSIGNB + AVPSIGND + AVPSIGNW + AVPSLLD + AVPSLLDQ + AVPSLLQ + AVPSLLVD + AVPSLLVQ + AVPSLLVW + AVPSLLW + AVPSRAD + AVPSRAQ + AVPSRAVD + AVPSRAVQ + AVPSRAVW + AVPSRAW + AVPSRLD + AVPSRLDQ + AVPSRLQ + AVPSRLVD + AVPSRLVQ + AVPSRLVW + AVPSRLW + AVPSUBB + AVPSUBD + AVPSUBQ + AVPSUBSB + AVPSUBSW + AVPSUBUSB + AVPSUBUSW + AVPSUBW + AVPTERNLOGD + AVPTERNLOGQ + AVPTEST + AVPTESTMB + AVPTESTMD + AVPTESTMQ + AVPTESTMW + AVPTESTNMB + AVPTESTNMD + AVPTESTNMQ + AVPTESTNMW + AVPUNPCKHBW + AVPUNPCKHDQ + AVPUNPCKHQDQ + AVPUNPCKHWD + AVPUNPCKLBW + AVPUNPCKLDQ + AVPUNPCKLQDQ + AVPUNPCKLWD + AVPXOR + AVPXORD + AVPXORQ + AVRANGEPD + AVRANGEPS + AVRANGESD + AVRANGESS + AVRCP14PD + AVRCP14PS + AVRCP14SD + AVRCP14SS + AVRCP28PD + AVRCP28PS + AVRCP28SD + AVRCP28SS + AVRCPPS + AVRCPSS + AVREDUCEPD + AVREDUCEPS + AVREDUCESD + AVREDUCESS + AVRNDSCALEPD + AVRNDSCALEPS + AVRNDSCALESD + AVRNDSCALESS + AVROUNDPD + AVROUNDPS + AVROUNDSD + AVROUNDSS + AVRSQRT14PD + AVRSQRT14PS + AVRSQRT14SD + AVRSQRT14SS + AVRSQRT28PD + AVRSQRT28PS + AVRSQRT28SD + AVRSQRT28SS + AVRSQRTPS + AVRSQRTSS + AVSCALEFPD + AVSCALEFPS + AVSCALEFSD + AVSCALEFSS + AVSCATTERDPD + AVSCATTERDPS + AVSCATTERPF0DPD + AVSCATTERPF0DPS + AVSCATTERPF0QPD + AVSCATTERPF0QPS + AVSCATTERPF1DPD + AVSCATTERPF1DPS + AVSCATTERPF1QPD + AVSCATTERPF1QPS + AVSCATTERQPD + AVSCATTERQPS + AVSHUFF32X4 + AVSHUFF64X2 + AVSHUFI32X4 + AVSHUFI64X2 + AVSHUFPD + AVSHUFPS + AVSQRTPD + AVSQRTPS + AVSQRTSD + AVSQRTSS + AVSTMXCSR + AVSUBPD + AVSUBPS + AVSUBSD + AVSUBSS + AVTESTPD + AVTESTPS + AVUCOMISD + AVUCOMISS + AVUNPCKHPD + AVUNPCKHPS + AVUNPCKLPD + AVUNPCKLPS + AVXORPD + AVXORPS + AVZEROALL + AVZEROUPPER + AWAIT + AWBINVD + AWORD + AWRFSBASEL + AWRFSBASEQ + AWRGSBASEL + AWRGSBASEQ + AWRMSR + AWRPKRU + AXABORT + AXACQUIRE + AXADDB + AXADDL + AXADDQ + AXADDW + AXBEGIN + AXCHGB + AXCHGL + AXCHGQ + AXCHGW + AXEND + AXGETBV + AXLAT + AXORB + AXORL + AXORPD + AXORPS + AXORQ + AXORW + AXRELEASE + AXRSTOR + AXRSTOR64 + AXRSTORS + AXRSTORS64 + AXSAVE + AXSAVE64 + AXSAVEC + AXSAVEC64 + AXSAVEOPT + AXSAVEOPT64 + AXSAVES + AXSAVES64 + AXSETBV + AXTEST + ALAST +) diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/anames.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/anames.go new file mode 100644 index 0000000000000000000000000000000000000000..3966381e50d221942a350bb0eb215826e2d8133b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/anames.go @@ -0,0 +1,1608 @@ +// Code generated by stringer -i aenum.go -o anames.go -p x86; DO NOT EDIT. + +package x86 + +import "cmd/internal/obj" + +var Anames = []string{ + obj.A_ARCHSPECIFIC: "AAA", + "AAD", + "AAM", + "AAS", + "ADCB", + "ADCL", + "ADCQ", + "ADCW", + "ADCXL", + "ADCXQ", + "ADDB", + "ADDL", + "ADDPD", + "ADDPS", + "ADDQ", + "ADDSD", + "ADDSS", + "ADDSUBPD", + "ADDSUBPS", + "ADDW", + "ADJSP", + "ADOXL", + "ADOXQ", + "AESDEC", + "AESDECLAST", + "AESENC", + "AESENCLAST", + "AESIMC", + "AESKEYGENASSIST", + "ANDB", + "ANDL", + "ANDNL", + "ANDNPD", + "ANDNPS", + "ANDNQ", + "ANDPD", + "ANDPS", + "ANDQ", + "ANDW", + "ARPL", + "BEXTRL", + "BEXTRQ", + "BLENDPD", + "BLENDPS", + "BLENDVPD", + "BLENDVPS", + "BLSIL", + "BLSIQ", + "BLSMSKL", + "BLSMSKQ", + "BLSRL", + "BLSRQ", + "BOUNDL", + "BOUNDW", + "BSFL", + "BSFQ", + "BSFW", + "BSRL", + "BSRQ", + "BSRW", + "BSWAPL", + "BSWAPQ", + "BTCL", + "BTCQ", + "BTCW", + "BTL", + "BTQ", + "BTRL", + "BTRQ", + "BTRW", + "BTSL", + "BTSQ", + "BTSW", + "BTW", + "BYTE", + "BZHIL", + "BZHIQ", + "CBW", + "CDQ", + "CDQE", + "CLAC", + "CLC", + "CLD", + "CLDEMOTE", + "CLFLUSH", + "CLFLUSHOPT", + "CLI", + "CLTS", + "CLWB", + "CMC", + "CMOVLCC", + "CMOVLCS", + "CMOVLEQ", + "CMOVLGE", + "CMOVLGT", + "CMOVLHI", + "CMOVLLE", + "CMOVLLS", + "CMOVLLT", + "CMOVLMI", + "CMOVLNE", + "CMOVLOC", + "CMOVLOS", + "CMOVLPC", + "CMOVLPL", + "CMOVLPS", + "CMOVQCC", + "CMOVQCS", + "CMOVQEQ", + "CMOVQGE", + "CMOVQGT", + "CMOVQHI", + "CMOVQLE", + "CMOVQLS", + "CMOVQLT", + "CMOVQMI", + "CMOVQNE", + "CMOVQOC", + "CMOVQOS", + "CMOVQPC", + "CMOVQPL", + "CMOVQPS", + "CMOVWCC", + "CMOVWCS", + "CMOVWEQ", + "CMOVWGE", + "CMOVWGT", + "CMOVWHI", + "CMOVWLE", + "CMOVWLS", + "CMOVWLT", + "CMOVWMI", + "CMOVWNE", + "CMOVWOC", + "CMOVWOS", + "CMOVWPC", + "CMOVWPL", + "CMOVWPS", + "CMPB", + "CMPL", + "CMPPD", + "CMPPS", + "CMPQ", + "CMPSB", + "CMPSD", + "CMPSL", + "CMPSQ", + "CMPSS", + "CMPSW", + "CMPW", + "CMPXCHG16B", + "CMPXCHG8B", + "CMPXCHGB", + "CMPXCHGL", + "CMPXCHGQ", + "CMPXCHGW", + "COMISD", + "COMISS", + "CPUID", + "CQO", + "CRC32B", + "CRC32L", + "CRC32Q", + "CRC32W", + "CVTPD2PL", + "CVTPD2PS", + "CVTPL2PD", + "CVTPL2PS", + "CVTPS2PD", + "CVTPS2PL", + "CVTSD2SL", + "CVTSD2SQ", + "CVTSD2SS", + "CVTSL2SD", + "CVTSL2SS", + "CVTSQ2SD", + "CVTSQ2SS", + "CVTSS2SD", + "CVTSS2SL", + "CVTSS2SQ", + "CVTTPD2PL", + "CVTTPS2PL", + "CVTTSD2SL", + "CVTTSD2SQ", + "CVTTSS2SL", + "CVTTSS2SQ", + "CWD", + "CWDE", + "DAA", + "DAS", + "DECB", + "DECL", + "DECQ", + "DECW", + "DIVB", + "DIVL", + "DIVPD", + "DIVPS", + "DIVQ", + "DIVSD", + "DIVSS", + "DIVW", + "DPPD", + "DPPS", + "EMMS", + "ENTER", + "EXTRACTPS", + "F2XM1", + "FABS", + "FADDD", + "FADDDP", + "FADDF", + "FADDL", + "FADDW", + "FBLD", + "FBSTP", + "FCHS", + "FCLEX", + "FCMOVB", + "FCMOVBE", + "FCMOVCC", + "FCMOVCS", + "FCMOVE", + "FCMOVEQ", + "FCMOVHI", + "FCMOVLS", + "FCMOVNB", + "FCMOVNBE", + "FCMOVNE", + "FCMOVNU", + "FCMOVU", + "FCMOVUN", + "FCOMD", + "FCOMDP", + "FCOMDPP", + "FCOMF", + "FCOMFP", + "FCOMI", + "FCOMIP", + "FCOML", + "FCOMLP", + "FCOMW", + "FCOMWP", + "FCOS", + "FDECSTP", + "FDIVD", + "FDIVDP", + "FDIVF", + "FDIVL", + "FDIVRD", + "FDIVRDP", + "FDIVRF", + "FDIVRL", + "FDIVRW", + "FDIVW", + "FFREE", + "FINCSTP", + "FINIT", + "FLD1", + "FLDCW", + "FLDENV", + "FLDL2E", + "FLDL2T", + "FLDLG2", + "FLDLN2", + "FLDPI", + "FLDZ", + "FMOVB", + "FMOVBP", + "FMOVD", + "FMOVDP", + "FMOVF", + "FMOVFP", + "FMOVL", + "FMOVLP", + "FMOVV", + "FMOVVP", + "FMOVW", + "FMOVWP", + "FMOVX", + "FMOVXP", + "FMULD", + "FMULDP", + "FMULF", + "FMULL", + "FMULW", + "FNOP", + "FPATAN", + "FPREM", + "FPREM1", + "FPTAN", + "FRNDINT", + "FRSTOR", + "FSAVE", + "FSCALE", + "FSIN", + "FSINCOS", + "FSQRT", + "FSTCW", + "FSTENV", + "FSTSW", + "FSUBD", + "FSUBDP", + "FSUBF", + "FSUBL", + "FSUBRD", + "FSUBRDP", + "FSUBRF", + "FSUBRL", + "FSUBRW", + "FSUBW", + "FTST", + "FUCOM", + "FUCOMI", + "FUCOMIP", + "FUCOMP", + "FUCOMPP", + "FXAM", + "FXCHD", + "FXRSTOR", + "FXRSTOR64", + "FXSAVE", + "FXSAVE64", + "FXTRACT", + "FYL2X", + "FYL2XP1", + "HADDPD", + "HADDPS", + "HLT", + "HSUBPD", + "HSUBPS", + "ICEBP", + "IDIVB", + "IDIVL", + "IDIVQ", + "IDIVW", + "IMUL3L", + "IMUL3Q", + "IMUL3W", + "IMULB", + "IMULL", + "IMULQ", + "IMULW", + "INB", + "INCB", + "INCL", + "INCQ", + "INCW", + "INL", + "INSB", + "INSERTPS", + "INSL", + "INSW", + "INT", + "INTO", + "INVD", + "INVLPG", + "INVPCID", + "INW", + "IRETL", + "IRETQ", + "IRETW", + "JCC", + "JCS", + "JCXZL", + "JCXZQ", + "JCXZW", + "JEQ", + "JGE", + "JGT", + "JHI", + "JLE", + "JLS", + "JLT", + "JMI", + "JNE", + "JOC", + "JOS", + "JPC", + "JPL", + "JPS", + "KADDB", + "KADDD", + "KADDQ", + "KADDW", + "KANDB", + "KANDD", + "KANDNB", + "KANDND", + "KANDNQ", + "KANDNW", + "KANDQ", + "KANDW", + "KMOVB", + "KMOVD", + "KMOVQ", + "KMOVW", + "KNOTB", + "KNOTD", + "KNOTQ", + "KNOTW", + "KORB", + "KORD", + "KORQ", + "KORTESTB", + "KORTESTD", + "KORTESTQ", + "KORTESTW", + "KORW", + "KSHIFTLB", + "KSHIFTLD", + "KSHIFTLQ", + "KSHIFTLW", + "KSHIFTRB", + "KSHIFTRD", + "KSHIFTRQ", + "KSHIFTRW", + "KTESTB", + "KTESTD", + "KTESTQ", + "KTESTW", + "KUNPCKBW", + "KUNPCKDQ", + "KUNPCKWD", + "KXNORB", + "KXNORD", + "KXNORQ", + "KXNORW", + "KXORB", + "KXORD", + "KXORQ", + "KXORW", + "LAHF", + "LARL", + "LARQ", + "LARW", + "LDDQU", + "LDMXCSR", + "LEAL", + "LEAQ", + "LEAVEL", + "LEAVEQ", + "LEAVEW", + "LEAW", + "LFENCE", + "LFSL", + "LFSQ", + "LFSW", + "LGDT", + "LGSL", + "LGSQ", + "LGSW", + "LIDT", + "LLDT", + "LMSW", + "LOCK", + "LODSB", + "LODSL", + "LODSQ", + "LODSW", + "LONG", + "LOOP", + "LOOPEQ", + "LOOPNE", + "LSLL", + "LSLQ", + "LSLW", + "LSSL", + "LSSQ", + "LSSW", + "LTR", + "LZCNTL", + "LZCNTQ", + "LZCNTW", + "MASKMOVOU", + "MASKMOVQ", + "MAXPD", + "MAXPS", + "MAXSD", + "MAXSS", + "MFENCE", + "MINPD", + "MINPS", + "MINSD", + "MINSS", + "MONITOR", + "MOVAPD", + "MOVAPS", + "MOVB", + "MOVBEL", + "MOVBEQ", + "MOVBEW", + "MOVBLSX", + "MOVBLZX", + "MOVBQSX", + "MOVBQZX", + "MOVBWSX", + "MOVBWZX", + "MOVDDUP", + "MOVHLPS", + "MOVHPD", + "MOVHPS", + "MOVL", + "MOVLHPS", + "MOVLPD", + "MOVLPS", + "MOVLQSX", + "MOVLQZX", + "MOVMSKPD", + "MOVMSKPS", + "MOVNTDQA", + "MOVNTIL", + "MOVNTIQ", + "MOVNTO", + "MOVNTPD", + "MOVNTPS", + "MOVNTQ", + "MOVO", + "MOVOU", + "MOVQ", + "MOVQL", + "MOVQOZX", + "MOVSB", + "MOVSD", + "MOVSHDUP", + "MOVSL", + "MOVSLDUP", + "MOVSQ", + "MOVSS", + "MOVSW", + "MOVSWW", + "MOVUPD", + "MOVUPS", + "MOVW", + "MOVWLSX", + "MOVWLZX", + "MOVWQSX", + "MOVWQZX", + "MOVZWW", + "MPSADBW", + "MULB", + "MULL", + "MULPD", + "MULPS", + "MULQ", + "MULSD", + "MULSS", + "MULW", + "MULXL", + "MULXQ", + "MWAIT", + "NEGB", + "NEGL", + "NEGQ", + "NEGW", + "NOPL", + "NOPW", + "NOTB", + "NOTL", + "NOTQ", + "NOTW", + "ORB", + "ORL", + "ORPD", + "ORPS", + "ORQ", + "ORW", + "OUTB", + "OUTL", + "OUTSB", + "OUTSL", + "OUTSW", + "OUTW", + "PABSB", + "PABSD", + "PABSW", + "PACKSSLW", + "PACKSSWB", + "PACKUSDW", + "PACKUSWB", + "PADDB", + "PADDL", + "PADDQ", + "PADDSB", + "PADDSW", + "PADDUSB", + "PADDUSW", + "PADDW", + "PALIGNR", + "PAND", + "PANDN", + "PAUSE", + "PAVGB", + "PAVGW", + "PBLENDVB", + "PBLENDW", + "PCLMULQDQ", + "PCMPEQB", + "PCMPEQL", + "PCMPEQQ", + "PCMPEQW", + "PCMPESTRI", + "PCMPESTRM", + "PCMPGTB", + "PCMPGTL", + "PCMPGTQ", + "PCMPGTW", + "PCMPISTRI", + "PCMPISTRM", + "PDEPL", + "PDEPQ", + "PEXTL", + "PEXTQ", + "PEXTRB", + "PEXTRD", + "PEXTRQ", + "PEXTRW", + "PHADDD", + "PHADDSW", + "PHADDW", + "PHMINPOSUW", + "PHSUBD", + "PHSUBSW", + "PHSUBW", + "PINSRB", + "PINSRD", + "PINSRQ", + "PINSRW", + "PMADDUBSW", + "PMADDWL", + "PMAXSB", + "PMAXSD", + "PMAXSW", + "PMAXUB", + "PMAXUD", + "PMAXUW", + "PMINSB", + "PMINSD", + "PMINSW", + "PMINUB", + "PMINUD", + "PMINUW", + "PMOVMSKB", + "PMOVSXBD", + "PMOVSXBQ", + "PMOVSXBW", + "PMOVSXDQ", + "PMOVSXWD", + "PMOVSXWQ", + "PMOVZXBD", + "PMOVZXBQ", + "PMOVZXBW", + "PMOVZXDQ", + "PMOVZXWD", + "PMOVZXWQ", + "PMULDQ", + "PMULHRSW", + "PMULHUW", + "PMULHW", + "PMULLD", + "PMULLW", + "PMULULQ", + "POPAL", + "POPAW", + "POPCNTL", + "POPCNTQ", + "POPCNTW", + "POPFL", + "POPFQ", + "POPFW", + "POPL", + "POPQ", + "POPW", + "POR", + "PREFETCHNTA", + "PREFETCHT0", + "PREFETCHT1", + "PREFETCHT2", + "PSADBW", + "PSHUFB", + "PSHUFD", + "PSHUFHW", + "PSHUFL", + "PSHUFLW", + "PSHUFW", + "PSIGNB", + "PSIGND", + "PSIGNW", + "PSLLL", + "PSLLO", + "PSLLQ", + "PSLLW", + "PSRAL", + "PSRAW", + "PSRLL", + "PSRLO", + "PSRLQ", + "PSRLW", + "PSUBB", + "PSUBL", + "PSUBQ", + "PSUBSB", + "PSUBSW", + "PSUBUSB", + "PSUBUSW", + "PSUBW", + "PTEST", + "PUNPCKHBW", + "PUNPCKHLQ", + "PUNPCKHQDQ", + "PUNPCKHWL", + "PUNPCKLBW", + "PUNPCKLLQ", + "PUNPCKLQDQ", + "PUNPCKLWL", + "PUSHAL", + "PUSHAW", + "PUSHFL", + "PUSHFQ", + "PUSHFW", + "PUSHL", + "PUSHQ", + "PUSHW", + "PXOR", + "QUAD", + "RCLB", + "RCLL", + "RCLQ", + "RCLW", + "RCPPS", + "RCPSS", + "RCRB", + "RCRL", + "RCRQ", + "RCRW", + "RDFSBASEL", + "RDFSBASEQ", + "RDGSBASEL", + "RDGSBASEQ", + "RDMSR", + "RDPID", + "RDPKRU", + "RDPMC", + "RDRANDL", + "RDRANDQ", + "RDRANDW", + "RDSEEDL", + "RDSEEDQ", + "RDSEEDW", + "RDTSC", + "RDTSCP", + "REP", + "REPN", + "RETFL", + "RETFQ", + "RETFW", + "ROLB", + "ROLL", + "ROLQ", + "ROLW", + "RORB", + "RORL", + "RORQ", + "RORW", + "RORXL", + "RORXQ", + "ROUNDPD", + "ROUNDPS", + "ROUNDSD", + "ROUNDSS", + "RSM", + "RSQRTPS", + "RSQRTSS", + "SAHF", + "SALB", + "SALL", + "SALQ", + "SALW", + "SARB", + "SARL", + "SARQ", + "SARW", + "SARXL", + "SARXQ", + "SBBB", + "SBBL", + "SBBQ", + "SBBW", + "SCASB", + "SCASL", + "SCASQ", + "SCASW", + "SETCC", + "SETCS", + "SETEQ", + "SETGE", + "SETGT", + "SETHI", + "SETLE", + "SETLS", + "SETLT", + "SETMI", + "SETNE", + "SETOC", + "SETOS", + "SETPC", + "SETPL", + "SETPS", + "SFENCE", + "SGDT", + "SHA1MSG1", + "SHA1MSG2", + "SHA1NEXTE", + "SHA1RNDS4", + "SHA256MSG1", + "SHA256MSG2", + "SHA256RNDS2", + "SHLB", + "SHLL", + "SHLQ", + "SHLW", + "SHLXL", + "SHLXQ", + "SHRB", + "SHRL", + "SHRQ", + "SHRW", + "SHRXL", + "SHRXQ", + "SHUFPD", + "SHUFPS", + "SIDT", + "SLDTL", + "SLDTQ", + "SLDTW", + "SMSWL", + "SMSWQ", + "SMSWW", + "SQRTPD", + "SQRTPS", + "SQRTSD", + "SQRTSS", + "STAC", + "STC", + "STD", + "STI", + "STMXCSR", + "STOSB", + "STOSL", + "STOSQ", + "STOSW", + "STRL", + "STRQ", + "STRW", + "SUBB", + "SUBL", + "SUBPD", + "SUBPS", + "SUBQ", + "SUBSD", + "SUBSS", + "SUBW", + "SWAPGS", + "SYSCALL", + "SYSENTER", + "SYSENTER64", + "SYSEXIT", + "SYSEXIT64", + "SYSRET", + "TESTB", + "TESTL", + "TESTQ", + "TESTW", + "TPAUSE", + "TZCNTL", + "TZCNTQ", + "TZCNTW", + "UCOMISD", + "UCOMISS", + "UD1", + "UD2", + "UMWAIT", + "UNPCKHPD", + "UNPCKHPS", + "UNPCKLPD", + "UNPCKLPS", + "UMONITOR", + "V4FMADDPS", + "V4FMADDSS", + "V4FNMADDPS", + "V4FNMADDSS", + "VADDPD", + "VADDPS", + "VADDSD", + "VADDSS", + "VADDSUBPD", + "VADDSUBPS", + "VAESDEC", + "VAESDECLAST", + "VAESENC", + "VAESENCLAST", + "VAESIMC", + "VAESKEYGENASSIST", + "VALIGND", + "VALIGNQ", + "VANDNPD", + "VANDNPS", + "VANDPD", + "VANDPS", + "VBLENDMPD", + "VBLENDMPS", + "VBLENDPD", + "VBLENDPS", + "VBLENDVPD", + "VBLENDVPS", + "VBROADCASTF128", + "VBROADCASTF32X2", + "VBROADCASTF32X4", + "VBROADCASTF32X8", + "VBROADCASTF64X2", + "VBROADCASTF64X4", + "VBROADCASTI128", + "VBROADCASTI32X2", + "VBROADCASTI32X4", + "VBROADCASTI32X8", + "VBROADCASTI64X2", + "VBROADCASTI64X4", + "VBROADCASTSD", + "VBROADCASTSS", + "VCMPPD", + "VCMPPS", + "VCMPSD", + "VCMPSS", + "VCOMISD", + "VCOMISS", + "VCOMPRESSPD", + "VCOMPRESSPS", + "VCVTDQ2PD", + "VCVTDQ2PS", + "VCVTPD2DQ", + "VCVTPD2DQX", + "VCVTPD2DQY", + "VCVTPD2PS", + "VCVTPD2PSX", + "VCVTPD2PSY", + "VCVTPD2QQ", + "VCVTPD2UDQ", + "VCVTPD2UDQX", + "VCVTPD2UDQY", + "VCVTPD2UQQ", + "VCVTPH2PS", + "VCVTPS2DQ", + "VCVTPS2PD", + "VCVTPS2PH", + "VCVTPS2QQ", + "VCVTPS2UDQ", + "VCVTPS2UQQ", + "VCVTQQ2PD", + "VCVTQQ2PS", + "VCVTQQ2PSX", + "VCVTQQ2PSY", + "VCVTSD2SI", + "VCVTSD2SIQ", + "VCVTSD2SS", + "VCVTSD2USI", + "VCVTSD2USIL", + "VCVTSD2USIQ", + "VCVTSI2SDL", + "VCVTSI2SDQ", + "VCVTSI2SSL", + "VCVTSI2SSQ", + "VCVTSS2SD", + "VCVTSS2SI", + "VCVTSS2SIQ", + "VCVTSS2USI", + "VCVTSS2USIL", + "VCVTSS2USIQ", + "VCVTTPD2DQ", + "VCVTTPD2DQX", + "VCVTTPD2DQY", + "VCVTTPD2QQ", + "VCVTTPD2UDQ", + "VCVTTPD2UDQX", + "VCVTTPD2UDQY", + "VCVTTPD2UQQ", + "VCVTTPS2DQ", + "VCVTTPS2QQ", + "VCVTTPS2UDQ", + "VCVTTPS2UQQ", + "VCVTTSD2SI", + "VCVTTSD2SIQ", + "VCVTTSD2USI", + "VCVTTSD2USIL", + "VCVTTSD2USIQ", + "VCVTTSS2SI", + "VCVTTSS2SIQ", + "VCVTTSS2USI", + "VCVTTSS2USIL", + "VCVTTSS2USIQ", + "VCVTUDQ2PD", + "VCVTUDQ2PS", + "VCVTUQQ2PD", + "VCVTUQQ2PS", + "VCVTUQQ2PSX", + "VCVTUQQ2PSY", + "VCVTUSI2SD", + "VCVTUSI2SDL", + "VCVTUSI2SDQ", + "VCVTUSI2SS", + "VCVTUSI2SSL", + "VCVTUSI2SSQ", + "VDBPSADBW", + "VDIVPD", + "VDIVPS", + "VDIVSD", + "VDIVSS", + "VDPPD", + "VDPPS", + "VERR", + "VERW", + "VEXP2PD", + "VEXP2PS", + "VEXPANDPD", + "VEXPANDPS", + "VEXTRACTF128", + "VEXTRACTF32X4", + "VEXTRACTF32X8", + "VEXTRACTF64X2", + "VEXTRACTF64X4", + "VEXTRACTI128", + "VEXTRACTI32X4", + "VEXTRACTI32X8", + "VEXTRACTI64X2", + "VEXTRACTI64X4", + "VEXTRACTPS", + "VFIXUPIMMPD", + "VFIXUPIMMPS", + "VFIXUPIMMSD", + "VFIXUPIMMSS", + "VFMADD132PD", + "VFMADD132PS", + "VFMADD132SD", + "VFMADD132SS", + "VFMADD213PD", + "VFMADD213PS", + "VFMADD213SD", + "VFMADD213SS", + "VFMADD231PD", + "VFMADD231PS", + "VFMADD231SD", + "VFMADD231SS", + "VFMADDSUB132PD", + "VFMADDSUB132PS", + "VFMADDSUB213PD", + "VFMADDSUB213PS", + "VFMADDSUB231PD", + "VFMADDSUB231PS", + "VFMSUB132PD", + "VFMSUB132PS", + "VFMSUB132SD", + "VFMSUB132SS", + "VFMSUB213PD", + "VFMSUB213PS", + "VFMSUB213SD", + "VFMSUB213SS", + "VFMSUB231PD", + "VFMSUB231PS", + "VFMSUB231SD", + "VFMSUB231SS", + "VFMSUBADD132PD", + "VFMSUBADD132PS", + "VFMSUBADD213PD", + "VFMSUBADD213PS", + "VFMSUBADD231PD", + "VFMSUBADD231PS", + "VFNMADD132PD", + "VFNMADD132PS", + "VFNMADD132SD", + "VFNMADD132SS", + "VFNMADD213PD", + "VFNMADD213PS", + "VFNMADD213SD", + "VFNMADD213SS", + "VFNMADD231PD", + "VFNMADD231PS", + "VFNMADD231SD", + "VFNMADD231SS", + "VFNMSUB132PD", + "VFNMSUB132PS", + "VFNMSUB132SD", + "VFNMSUB132SS", + "VFNMSUB213PD", + "VFNMSUB213PS", + "VFNMSUB213SD", + "VFNMSUB213SS", + "VFNMSUB231PD", + "VFNMSUB231PS", + "VFNMSUB231SD", + "VFNMSUB231SS", + "VFPCLASSPD", + "VFPCLASSPDX", + "VFPCLASSPDY", + "VFPCLASSPDZ", + "VFPCLASSPS", + "VFPCLASSPSX", + "VFPCLASSPSY", + "VFPCLASSPSZ", + "VFPCLASSSD", + "VFPCLASSSS", + "VGATHERDPD", + "VGATHERDPS", + "VGATHERPF0DPD", + "VGATHERPF0DPS", + "VGATHERPF0QPD", + "VGATHERPF0QPS", + "VGATHERPF1DPD", + "VGATHERPF1DPS", + "VGATHERPF1QPD", + "VGATHERPF1QPS", + "VGATHERQPD", + "VGATHERQPS", + "VGETEXPPD", + "VGETEXPPS", + "VGETEXPSD", + "VGETEXPSS", + "VGETMANTPD", + "VGETMANTPS", + "VGETMANTSD", + "VGETMANTSS", + "VGF2P8AFFINEINVQB", + "VGF2P8AFFINEQB", + "VGF2P8MULB", + "VHADDPD", + "VHADDPS", + "VHSUBPD", + "VHSUBPS", + "VINSERTF128", + "VINSERTF32X4", + "VINSERTF32X8", + "VINSERTF64X2", + "VINSERTF64X4", + "VINSERTI128", + "VINSERTI32X4", + "VINSERTI32X8", + "VINSERTI64X2", + "VINSERTI64X4", + "VINSERTPS", + "VLDDQU", + "VLDMXCSR", + "VMASKMOVDQU", + "VMASKMOVPD", + "VMASKMOVPS", + "VMAXPD", + "VMAXPS", + "VMAXSD", + "VMAXSS", + "VMINPD", + "VMINPS", + "VMINSD", + "VMINSS", + "VMOVAPD", + "VMOVAPS", + "VMOVD", + "VMOVDDUP", + "VMOVDQA", + "VMOVDQA32", + "VMOVDQA64", + "VMOVDQU", + "VMOVDQU16", + "VMOVDQU32", + "VMOVDQU64", + "VMOVDQU8", + "VMOVHLPS", + "VMOVHPD", + "VMOVHPS", + "VMOVLHPS", + "VMOVLPD", + "VMOVLPS", + "VMOVMSKPD", + "VMOVMSKPS", + "VMOVNTDQ", + "VMOVNTDQA", + "VMOVNTPD", + "VMOVNTPS", + "VMOVQ", + "VMOVSD", + "VMOVSHDUP", + "VMOVSLDUP", + "VMOVSS", + "VMOVUPD", + "VMOVUPS", + "VMPSADBW", + "VMULPD", + "VMULPS", + "VMULSD", + "VMULSS", + "VORPD", + "VORPS", + "VP4DPWSSD", + "VP4DPWSSDS", + "VPABSB", + "VPABSD", + "VPABSQ", + "VPABSW", + "VPACKSSDW", + "VPACKSSWB", + "VPACKUSDW", + "VPACKUSWB", + "VPADDB", + "VPADDD", + "VPADDQ", + "VPADDSB", + "VPADDSW", + "VPADDUSB", + "VPADDUSW", + "VPADDW", + "VPALIGNR", + "VPAND", + "VPANDD", + "VPANDN", + "VPANDND", + "VPANDNQ", + "VPANDQ", + "VPAVGB", + "VPAVGW", + "VPBLENDD", + "VPBLENDMB", + "VPBLENDMD", + "VPBLENDMQ", + "VPBLENDMW", + "VPBLENDVB", + "VPBLENDW", + "VPBROADCASTB", + "VPBROADCASTD", + "VPBROADCASTMB2Q", + "VPBROADCASTMW2D", + "VPBROADCASTQ", + "VPBROADCASTW", + "VPCLMULQDQ", + "VPCMPB", + "VPCMPD", + "VPCMPEQB", + "VPCMPEQD", + "VPCMPEQQ", + "VPCMPEQW", + "VPCMPESTRI", + "VPCMPESTRM", + "VPCMPGTB", + "VPCMPGTD", + "VPCMPGTQ", + "VPCMPGTW", + "VPCMPISTRI", + "VPCMPISTRM", + "VPCMPQ", + "VPCMPUB", + "VPCMPUD", + "VPCMPUQ", + "VPCMPUW", + "VPCMPW", + "VPCOMPRESSB", + "VPCOMPRESSD", + "VPCOMPRESSQ", + "VPCOMPRESSW", + "VPCONFLICTD", + "VPCONFLICTQ", + "VPDPBUSD", + "VPDPBUSDS", + "VPDPWSSD", + "VPDPWSSDS", + "VPERM2F128", + "VPERM2I128", + "VPERMB", + "VPERMD", + "VPERMI2B", + "VPERMI2D", + "VPERMI2PD", + "VPERMI2PS", + "VPERMI2Q", + "VPERMI2W", + "VPERMILPD", + "VPERMILPS", + "VPERMPD", + "VPERMPS", + "VPERMQ", + "VPERMT2B", + "VPERMT2D", + "VPERMT2PD", + "VPERMT2PS", + "VPERMT2Q", + "VPERMT2W", + "VPERMW", + "VPEXPANDB", + "VPEXPANDD", + "VPEXPANDQ", + "VPEXPANDW", + "VPEXTRB", + "VPEXTRD", + "VPEXTRQ", + "VPEXTRW", + "VPGATHERDD", + "VPGATHERDQ", + "VPGATHERQD", + "VPGATHERQQ", + "VPHADDD", + "VPHADDSW", + "VPHADDW", + "VPHMINPOSUW", + "VPHSUBD", + "VPHSUBSW", + "VPHSUBW", + "VPINSRB", + "VPINSRD", + "VPINSRQ", + "VPINSRW", + "VPLZCNTD", + "VPLZCNTQ", + "VPMADD52HUQ", + "VPMADD52LUQ", + "VPMADDUBSW", + "VPMADDWD", + "VPMASKMOVD", + "VPMASKMOVQ", + "VPMAXSB", + "VPMAXSD", + "VPMAXSQ", + "VPMAXSW", + "VPMAXUB", + "VPMAXUD", + "VPMAXUQ", + "VPMAXUW", + "VPMINSB", + "VPMINSD", + "VPMINSQ", + "VPMINSW", + "VPMINUB", + "VPMINUD", + "VPMINUQ", + "VPMINUW", + "VPMOVB2M", + "VPMOVD2M", + "VPMOVDB", + "VPMOVDW", + "VPMOVM2B", + "VPMOVM2D", + "VPMOVM2Q", + "VPMOVM2W", + "VPMOVMSKB", + "VPMOVQ2M", + "VPMOVQB", + "VPMOVQD", + "VPMOVQW", + "VPMOVSDB", + "VPMOVSDW", + "VPMOVSQB", + "VPMOVSQD", + "VPMOVSQW", + "VPMOVSWB", + "VPMOVSXBD", + "VPMOVSXBQ", + "VPMOVSXBW", + "VPMOVSXDQ", + "VPMOVSXWD", + "VPMOVSXWQ", + "VPMOVUSDB", + "VPMOVUSDW", + "VPMOVUSQB", + "VPMOVUSQD", + "VPMOVUSQW", + "VPMOVUSWB", + "VPMOVW2M", + "VPMOVWB", + "VPMOVZXBD", + "VPMOVZXBQ", + "VPMOVZXBW", + "VPMOVZXDQ", + "VPMOVZXWD", + "VPMOVZXWQ", + "VPMULDQ", + "VPMULHRSW", + "VPMULHUW", + "VPMULHW", + "VPMULLD", + "VPMULLQ", + "VPMULLW", + "VPMULTISHIFTQB", + "VPMULUDQ", + "VPOPCNTB", + "VPOPCNTD", + "VPOPCNTQ", + "VPOPCNTW", + "VPOR", + "VPORD", + "VPORQ", + "VPROLD", + "VPROLQ", + "VPROLVD", + "VPROLVQ", + "VPRORD", + "VPRORQ", + "VPRORVD", + "VPRORVQ", + "VPSADBW", + "VPSCATTERDD", + "VPSCATTERDQ", + "VPSCATTERQD", + "VPSCATTERQQ", + "VPSHLDD", + "VPSHLDQ", + "VPSHLDVD", + "VPSHLDVQ", + "VPSHLDVW", + "VPSHLDW", + "VPSHRDD", + "VPSHRDQ", + "VPSHRDVD", + "VPSHRDVQ", + "VPSHRDVW", + "VPSHRDW", + "VPSHUFB", + "VPSHUFBITQMB", + "VPSHUFD", + "VPSHUFHW", + "VPSHUFLW", + "VPSIGNB", + "VPSIGND", + "VPSIGNW", + "VPSLLD", + "VPSLLDQ", + "VPSLLQ", + "VPSLLVD", + "VPSLLVQ", + "VPSLLVW", + "VPSLLW", + "VPSRAD", + "VPSRAQ", + "VPSRAVD", + "VPSRAVQ", + "VPSRAVW", + "VPSRAW", + "VPSRLD", + "VPSRLDQ", + "VPSRLQ", + "VPSRLVD", + "VPSRLVQ", + "VPSRLVW", + "VPSRLW", + "VPSUBB", + "VPSUBD", + "VPSUBQ", + "VPSUBSB", + "VPSUBSW", + "VPSUBUSB", + "VPSUBUSW", + "VPSUBW", + "VPTERNLOGD", + "VPTERNLOGQ", + "VPTEST", + "VPTESTMB", + "VPTESTMD", + "VPTESTMQ", + "VPTESTMW", + "VPTESTNMB", + "VPTESTNMD", + "VPTESTNMQ", + "VPTESTNMW", + "VPUNPCKHBW", + "VPUNPCKHDQ", + "VPUNPCKHQDQ", + "VPUNPCKHWD", + "VPUNPCKLBW", + "VPUNPCKLDQ", + "VPUNPCKLQDQ", + "VPUNPCKLWD", + "VPXOR", + "VPXORD", + "VPXORQ", + "VRANGEPD", + "VRANGEPS", + "VRANGESD", + "VRANGESS", + "VRCP14PD", + "VRCP14PS", + "VRCP14SD", + "VRCP14SS", + "VRCP28PD", + "VRCP28PS", + "VRCP28SD", + "VRCP28SS", + "VRCPPS", + "VRCPSS", + "VREDUCEPD", + "VREDUCEPS", + "VREDUCESD", + "VREDUCESS", + "VRNDSCALEPD", + "VRNDSCALEPS", + "VRNDSCALESD", + "VRNDSCALESS", + "VROUNDPD", + "VROUNDPS", + "VROUNDSD", + "VROUNDSS", + "VRSQRT14PD", + "VRSQRT14PS", + "VRSQRT14SD", + "VRSQRT14SS", + "VRSQRT28PD", + "VRSQRT28PS", + "VRSQRT28SD", + "VRSQRT28SS", + "VRSQRTPS", + "VRSQRTSS", + "VSCALEFPD", + "VSCALEFPS", + "VSCALEFSD", + "VSCALEFSS", + "VSCATTERDPD", + "VSCATTERDPS", + "VSCATTERPF0DPD", + "VSCATTERPF0DPS", + "VSCATTERPF0QPD", + "VSCATTERPF0QPS", + "VSCATTERPF1DPD", + "VSCATTERPF1DPS", + "VSCATTERPF1QPD", + "VSCATTERPF1QPS", + "VSCATTERQPD", + "VSCATTERQPS", + "VSHUFF32X4", + "VSHUFF64X2", + "VSHUFI32X4", + "VSHUFI64X2", + "VSHUFPD", + "VSHUFPS", + "VSQRTPD", + "VSQRTPS", + "VSQRTSD", + "VSQRTSS", + "VSTMXCSR", + "VSUBPD", + "VSUBPS", + "VSUBSD", + "VSUBSS", + "VTESTPD", + "VTESTPS", + "VUCOMISD", + "VUCOMISS", + "VUNPCKHPD", + "VUNPCKHPS", + "VUNPCKLPD", + "VUNPCKLPS", + "VXORPD", + "VXORPS", + "VZEROALL", + "VZEROUPPER", + "WAIT", + "WBINVD", + "WORD", + "WRFSBASEL", + "WRFSBASEQ", + "WRGSBASEL", + "WRGSBASEQ", + "WRMSR", + "WRPKRU", + "XABORT", + "XACQUIRE", + "XADDB", + "XADDL", + "XADDQ", + "XADDW", + "XBEGIN", + "XCHGB", + "XCHGL", + "XCHGQ", + "XCHGW", + "XEND", + "XGETBV", + "XLAT", + "XORB", + "XORL", + "XORPD", + "XORPS", + "XORQ", + "XORW", + "XRELEASE", + "XRSTOR", + "XRSTOR64", + "XRSTORS", + "XRSTORS64", + "XSAVE", + "XSAVE64", + "XSAVEC", + "XSAVEC64", + "XSAVEOPT", + "XSAVEOPT64", + "XSAVES", + "XSAVES64", + "XSETBV", + "XTEST", + "LAST", +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm6.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm6.go new file mode 100644 index 0000000000000000000000000000000000000000..bdd75b4ef884c5cbc41eeb4b239ceda3531cabeb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm6.go @@ -0,0 +1,5473 @@ +// Inferno utils/6l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" + "encoding/binary" + "fmt" + "internal/buildcfg" + "log" + "strings" +) + +var ( + plan9privates *obj.LSym +) + +// Instruction layout. + +// Loop alignment constants: +// want to align loop entry to loopAlign-byte boundary, +// and willing to insert at most maxLoopPad bytes of NOP to do so. +// We define a loop entry as the target of a backward jump. +// +// gcc uses maxLoopPad = 10 for its 'generic x86-64' config, +// and it aligns all jump targets, not just backward jump targets. +// +// As of 6/1/2012, the effect of setting maxLoopPad = 10 here +// is very slight but negative, so the alignment is disabled by +// setting MaxLoopPad = 0. The code is here for reference and +// for future experiments. +const ( + loopAlign = 16 + maxLoopPad = 0 +) + +// Bit flags that are used to express jump target properties. +const ( + // branchBackwards marks targets that are located behind. + // Used to express jumps to loop headers. + branchBackwards = (1 << iota) + // branchShort marks branches those target is close, + // with offset is in -128..127 range. + branchShort + // branchLoopHead marks loop entry. + // Used to insert padding for misaligned loops. + branchLoopHead +) + +// opBytes holds optab encoding bytes. +// Each ytab reserves fixed amount of bytes in this array. +// +// The size should be the minimal number of bytes that +// are enough to hold biggest optab op lines. +type opBytes [31]uint8 + +type Optab struct { + as obj.As + ytab []ytab + prefix uint8 + op opBytes +} + +type movtab struct { + as obj.As + ft uint8 + f3t uint8 + tt uint8 + code uint8 + op [4]uint8 +} + +const ( + Yxxx = iota + Ynone + Yi0 // $0 + Yi1 // $1 + Yu2 // $x, x fits in uint2 + Yi8 // $x, x fits in int8 + Yu8 // $x, x fits in uint8 + Yu7 // $x, x in 0..127 (fits in both int8 and uint8) + Ys32 + Yi32 + Yi64 + Yiauto + Yal + Ycl + Yax + Ycx + Yrb + Yrl + Yrl32 // Yrl on 32-bit system + Yrf + Yf0 + Yrx + Ymb + Yml + Ym + Ybr + Ycs + Yss + Yds + Yes + Yfs + Ygs + Ygdtr + Yidtr + Yldtr + Ymsw + Ytask + Ycr0 + Ycr1 + Ycr2 + Ycr3 + Ycr4 + Ycr5 + Ycr6 + Ycr7 + Ycr8 + Ydr0 + Ydr1 + Ydr2 + Ydr3 + Ydr4 + Ydr5 + Ydr6 + Ydr7 + Ytr0 + Ytr1 + Ytr2 + Ytr3 + Ytr4 + Ytr5 + Ytr6 + Ytr7 + Ymr + Ymm + Yxr0 // X0 only. "" notation in Intel manual. + YxrEvexMulti4 // [ X - X ]; multisource YxrEvex + Yxr // X0..X15 + YxrEvex // X0..X31 + Yxm + YxmEvex // YxrEvex+Ym + Yxvm // VSIB vector array; vm32x/vm64x + YxvmEvex // Yxvm which permits High-16 X register as index. + YyrEvexMulti4 // [ Y - Y ]; multisource YyrEvex + Yyr // Y0..Y15 + YyrEvex // Y0..Y31 + Yym + YymEvex // YyrEvex+Ym + Yyvm // VSIB vector array; vm32y/vm64y + YyvmEvex // Yyvm which permits High-16 Y register as index. + YzrMulti4 // [ Z - Z ]; multisource YzrEvex + Yzr // Z0..Z31 + Yzm // Yzr+Ym + Yzvm // VSIB vector array; vm32z/vm64z + Yk0 // K0 + Yknot0 // K1..K7; write mask + Yk // K0..K7; used for KOP + Ykm // Yk+Ym; used for KOP + Ytls + Ytextsize + Yindir + Ymax +) + +const ( + Zxxx = iota + Zlit + Zlitm_r + Zlitr_m + Zlit_m_r + Z_rp + Zbr + Zcall + Zcallcon + Zcallduff + Zcallind + Zcallindreg + Zib_ + Zib_rp + Zibo_m + Zibo_m_xm + Zil_ + Zil_rp + Ziq_rp + Zilo_m + Zjmp + Zjmpcon + Zloop + Zo_iw + Zm_o + Zm_r + Z_m_r + Zm2_r + Zm_r_xm + Zm_r_i_xm + Zm_r_xm_nr + Zr_m_xm_nr + Zibm_r // mmx1,mmx2/mem64,imm8 + Zibr_m + Zmb_r + Zaut_r + Zo_m + Zo_m64 + Zpseudo + Zr_m + Zr_m_xm + Zrp_ + Z_ib + Z_il + Zm_ibo + Zm_ilo + Zib_rr + Zil_rr + Zbyte + + Zvex_rm_v_r + Zvex_rm_v_ro + Zvex_r_v_rm + Zvex_i_rm_vo + Zvex_v_rm_r + Zvex_i_rm_r + Zvex_i_r_v + Zvex_i_rm_v_r + Zvex + Zvex_rm_r_vo + Zvex_i_r_rm + Zvex_hr_rm_v_r + + Zevex_first + Zevex_i_r_k_rm + Zevex_i_r_rm + Zevex_i_rm_k_r + Zevex_i_rm_k_vo + Zevex_i_rm_r + Zevex_i_rm_v_k_r + Zevex_i_rm_v_r + Zevex_i_rm_vo + Zevex_k_rmo + Zevex_r_k_rm + Zevex_r_v_k_rm + Zevex_r_v_rm + Zevex_rm_k_r + Zevex_rm_v_k_r + Zevex_rm_v_r + Zevex_last + + Zmax +) + +const ( + Px = 0 + Px1 = 1 // symbolic; exact value doesn't matter + P32 = 0x32 // 32-bit only + Pe = 0x66 // operand escape + Pm = 0x0f // 2byte opcode escape + Pq = 0xff // both escapes: 66 0f + Pb = 0xfe // byte operands + Pf2 = 0xf2 // xmm escape 1: f2 0f + Pf3 = 0xf3 // xmm escape 2: f3 0f + Pef3 = 0xf5 // xmm escape 2 with 16-bit prefix: 66 f3 0f + Pq3 = 0x67 // xmm escape 3: 66 48 0f + Pq4 = 0x68 // xmm escape 4: 66 0F 38 + Pq4w = 0x69 // Pq4 with Rex.w 66 0F 38 + Pq5 = 0x6a // xmm escape 5: F3 0F 38 + Pq5w = 0x6b // Pq5 with Rex.w F3 0F 38 + Pfw = 0xf4 // Pf3 with Rex.w: f3 48 0f + Pw = 0x48 // Rex.w + Pw8 = 0x90 // symbolic; exact value doesn't matter + Py = 0x80 // defaults to 64-bit mode + Py1 = 0x81 // symbolic; exact value doesn't matter + Py3 = 0x83 // symbolic; exact value doesn't matter + Pavx = 0x84 // symbolic; exact value doesn't matter + + RxrEvex = 1 << 4 // AVX512 extension to REX.R/VEX.R + Rxw = 1 << 3 // =1, 64-bit operand size + Rxr = 1 << 2 // extend modrm reg + Rxx = 1 << 1 // extend sib index + Rxb = 1 << 0 // extend modrm r/m, sib base, or opcode reg +) + +const ( + // Encoding for VEX prefix in tables. + // The P, L, and W fields are chosen to match + // their eventual locations in the VEX prefix bytes. + + // Encoding for VEX prefix in tables. + // The P, L, and W fields are chosen to match + // their eventual locations in the VEX prefix bytes. + + // Using spare bit to make leading [E]VEX encoding byte different from + // 0x0f even if all other VEX fields are 0. + avxEscape = 1 << 6 + + // P field - 2 bits + vex66 = 1 << 0 + vexF3 = 2 << 0 + vexF2 = 3 << 0 + // L field - 1 bit + vexLZ = 0 << 2 + vexLIG = 0 << 2 + vex128 = 0 << 2 + vex256 = 1 << 2 + // W field - 1 bit + vexWIG = 0 << 7 + vexW0 = 0 << 7 + vexW1 = 1 << 7 + // M field - 5 bits, but mostly reserved; we can store up to 3 + vex0F = 1 << 3 + vex0F38 = 2 << 3 + vex0F3A = 3 << 3 +) + +var ycover [Ymax * Ymax]uint8 + +var reg [MAXREG]int + +var regrex [MAXREG + 1]int + +var ynone = []ytab{ + {Zlit, 1, argList{}}, +} + +var ytext = []ytab{ + {Zpseudo, 0, argList{Ymb, Ytextsize}}, + {Zpseudo, 1, argList{Ymb, Yi32, Ytextsize}}, +} + +var ynop = []ytab{ + {Zpseudo, 0, argList{}}, + {Zpseudo, 0, argList{Yiauto}}, + {Zpseudo, 0, argList{Yml}}, + {Zpseudo, 0, argList{Yrf}}, + {Zpseudo, 0, argList{Yxr}}, + {Zpseudo, 0, argList{Yiauto}}, + {Zpseudo, 0, argList{Yml}}, + {Zpseudo, 0, argList{Yrf}}, + {Zpseudo, 1, argList{Yxr}}, +} + +var yfuncdata = []ytab{ + {Zpseudo, 0, argList{Yi32, Ym}}, +} + +var ypcdata = []ytab{ + {Zpseudo, 0, argList{Yi32, Yi32}}, +} + +var yxorb = []ytab{ + {Zib_, 1, argList{Yi32, Yal}}, + {Zibo_m, 2, argList{Yi32, Ymb}}, + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, +} + +var yaddl = []ytab{ + {Zibo_m, 2, argList{Yi8, Yml}}, + {Zil_, 1, argList{Yi32, Yax}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, +} + +var yincl = []ytab{ + {Z_rp, 1, argList{Yrl}}, + {Zo_m, 2, argList{Yml}}, +} + +var yincq = []ytab{ + {Zo_m, 2, argList{Yml}}, +} + +var ycmpb = []ytab{ + {Z_ib, 1, argList{Yal, Yi32}}, + {Zm_ibo, 2, argList{Ymb, Yi32}}, + {Zm_r, 1, argList{Ymb, Yrb}}, + {Zr_m, 1, argList{Yrb, Ymb}}, +} + +var ycmpl = []ytab{ + {Zm_ibo, 2, argList{Yml, Yi8}}, + {Z_il, 1, argList{Yax, Yi32}}, + {Zm_ilo, 2, argList{Yml, Yi32}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zr_m, 1, argList{Yrl, Yml}}, +} + +var yshb = []ytab{ + {Zo_m, 2, argList{Yi1, Ymb}}, + {Zibo_m, 2, argList{Yu8, Ymb}}, + {Zo_m, 2, argList{Ycx, Ymb}}, +} + +var yshl = []ytab{ + {Zo_m, 2, argList{Yi1, Yml}}, + {Zibo_m, 2, argList{Yu8, Yml}}, + {Zo_m, 2, argList{Ycl, Yml}}, + {Zo_m, 2, argList{Ycx, Yml}}, +} + +var ytestl = []ytab{ + {Zil_, 1, argList{Yi32, Yax}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, +} + +var ymovb = []ytab{ + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, + {Zib_rp, 1, argList{Yi32, Yrb}}, + {Zibo_m, 2, argList{Yi32, Ymb}}, +} + +var ybtl = []ytab{ + {Zibo_m, 2, argList{Yi8, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, +} + +var ymovw = []ytab{ + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zil_rp, 1, argList{Yi32, Yrl}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zaut_r, 2, argList{Yiauto, Yrl}}, +} + +var ymovl = []ytab{ + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zil_rp, 1, argList{Yi32, Yrl}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zm_r_xm, 1, argList{Yml, Ymr}}, // MMX MOVD + {Zr_m_xm, 1, argList{Ymr, Yml}}, // MMX MOVD + {Zm_r_xm, 2, argList{Yml, Yxr}}, // XMM MOVD (32 bit) + {Zr_m_xm, 2, argList{Yxr, Yml}}, // XMM MOVD (32 bit) + {Zaut_r, 2, argList{Yiauto, Yrl}}, +} + +var yret = []ytab{ + {Zo_iw, 1, argList{}}, + {Zo_iw, 1, argList{Yi32}}, +} + +var ymovq = []ytab{ + // valid in 32-bit mode + {Zm_r_xm_nr, 1, argList{Ym, Ymr}}, // 0x6f MMX MOVQ (shorter encoding) + {Zr_m_xm_nr, 1, argList{Ymr, Ym}}, // 0x7f MMX MOVQ + {Zm_r_xm_nr, 2, argList{Yxr, Ymr}}, // Pf2, 0xd6 MOVDQ2Q + {Zm_r_xm_nr, 2, argList{Yxm, Yxr}}, // Pf3, 0x7e MOVQ xmm1/m64 -> xmm2 + {Zr_m_xm_nr, 2, argList{Yxr, Yxm}}, // Pe, 0xd6 MOVQ xmm1 -> xmm2/m64 + + // valid only in 64-bit mode, usually with 64-bit prefix + {Zr_m, 1, argList{Yrl, Yml}}, // 0x89 + {Zm_r, 1, argList{Yml, Yrl}}, // 0x8b + {Zilo_m, 2, argList{Ys32, Yrl}}, // 32 bit signed 0xc7,(0) + {Ziq_rp, 1, argList{Yi64, Yrl}}, // 0xb8 -- 32/64 bit immediate + {Zilo_m, 2, argList{Yi32, Yml}}, // 0xc7,(0) + {Zm_r_xm, 1, argList{Ymm, Ymr}}, // 0x6e MMX MOVD + {Zr_m_xm, 1, argList{Ymr, Ymm}}, // 0x7e MMX MOVD + {Zm_r_xm, 2, argList{Yml, Yxr}}, // Pe, 0x6e MOVD xmm load + {Zr_m_xm, 2, argList{Yxr, Yml}}, // Pe, 0x7e MOVD xmm store + {Zaut_r, 1, argList{Yiauto, Yrl}}, // 0 built-in LEAQ +} + +var ymovbe = []ytab{ + {Zlitm_r, 3, argList{Ym, Yrl}}, + {Zlitr_m, 3, argList{Yrl, Ym}}, +} + +var ym_rl = []ytab{ + {Zm_r, 1, argList{Ym, Yrl}}, +} + +var yrl_m = []ytab{ + {Zr_m, 1, argList{Yrl, Ym}}, +} + +var ymb_rl = []ytab{ + {Zmb_r, 1, argList{Ymb, Yrl}}, +} + +var yml_rl = []ytab{ + {Zm_r, 1, argList{Yml, Yrl}}, +} + +var yrl_ml = []ytab{ + {Zr_m, 1, argList{Yrl, Yml}}, +} + +var yml_mb = []ytab{ + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, +} + +var yrb_mb = []ytab{ + {Zr_m, 1, argList{Yrb, Ymb}}, +} + +var yxchg = []ytab{ + {Z_rp, 1, argList{Yax, Yrl}}, + {Zrp_, 1, argList{Yrl, Yax}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, +} + +var ydivl = []ytab{ + {Zm_o, 2, argList{Yml}}, +} + +var ydivb = []ytab{ + {Zm_o, 2, argList{Ymb}}, +} + +var yimul = []ytab{ + {Zm_o, 2, argList{Yml}}, + {Zib_rr, 1, argList{Yi8, Yrl}}, + {Zil_rr, 1, argList{Yi32, Yrl}}, + {Zm_r, 2, argList{Yml, Yrl}}, +} + +var yimul3 = []ytab{ + {Zibm_r, 2, argList{Yi8, Yml, Yrl}}, + {Zibm_r, 2, argList{Yi32, Yml, Yrl}}, +} + +var ybyte = []ytab{ + {Zbyte, 1, argList{Yi64}}, +} + +var yin = []ytab{ + {Zib_, 1, argList{Yi32}}, + {Zlit, 1, argList{}}, +} + +var yint = []ytab{ + {Zib_, 1, argList{Yi32}}, +} + +var ypushl = []ytab{ + {Zrp_, 1, argList{Yrl}}, + {Zm_o, 2, argList{Ym}}, + {Zib_, 1, argList{Yi8}}, + {Zil_, 1, argList{Yi32}}, +} + +var ypopl = []ytab{ + {Z_rp, 1, argList{Yrl}}, + {Zo_m, 2, argList{Ym}}, +} + +var ywrfsbase = []ytab{ + {Zm_o, 2, argList{Yrl}}, +} + +var yrdrand = []ytab{ + {Zo_m, 2, argList{Yrl}}, +} + +var yclflush = []ytab{ + {Zo_m, 2, argList{Ym}}, +} + +var ybswap = []ytab{ + {Z_rp, 2, argList{Yrl}}, +} + +var yscond = []ytab{ + {Zo_m, 2, argList{Ymb}}, +} + +var yjcond = []ytab{ + {Zbr, 0, argList{Ybr}}, + {Zbr, 0, argList{Yi0, Ybr}}, + {Zbr, 1, argList{Yi1, Ybr}}, +} + +var yloop = []ytab{ + {Zloop, 1, argList{Ybr}}, +} + +var ycall = []ytab{ + {Zcallindreg, 0, argList{Yml}}, + {Zcallindreg, 2, argList{Yrx, Yrx}}, + {Zcallind, 2, argList{Yindir}}, + {Zcall, 0, argList{Ybr}}, + {Zcallcon, 1, argList{Yi32}}, +} + +var yduff = []ytab{ + {Zcallduff, 1, argList{Yi32}}, +} + +var yjmp = []ytab{ + {Zo_m64, 2, argList{Yml}}, + {Zjmp, 0, argList{Ybr}}, + {Zjmpcon, 1, argList{Yi32}}, +} + +var yfmvd = []ytab{ + {Zm_o, 2, argList{Ym, Yf0}}, + {Zo_m, 2, argList{Yf0, Ym}}, + {Zm_o, 2, argList{Yrf, Yf0}}, + {Zo_m, 2, argList{Yf0, Yrf}}, +} + +var yfmvdp = []ytab{ + {Zo_m, 2, argList{Yf0, Ym}}, + {Zo_m, 2, argList{Yf0, Yrf}}, +} + +var yfmvf = []ytab{ + {Zm_o, 2, argList{Ym, Yf0}}, + {Zo_m, 2, argList{Yf0, Ym}}, +} + +var yfmvx = []ytab{ + {Zm_o, 2, argList{Ym, Yf0}}, +} + +var yfmvp = []ytab{ + {Zo_m, 2, argList{Yf0, Ym}}, +} + +var yfcmv = []ytab{ + {Zm_o, 2, argList{Yrf, Yf0}}, +} + +var yfadd = []ytab{ + {Zm_o, 2, argList{Ym, Yf0}}, + {Zm_o, 2, argList{Yrf, Yf0}}, + {Zo_m, 2, argList{Yf0, Yrf}}, +} + +var yfxch = []ytab{ + {Zo_m, 2, argList{Yf0, Yrf}}, + {Zm_o, 2, argList{Yrf, Yf0}}, +} + +var ycompp = []ytab{ + {Zo_m, 2, argList{Yf0, Yrf}}, // botch is really f0,f1 +} + +var ystsw = []ytab{ + {Zo_m, 2, argList{Ym}}, + {Zlit, 1, argList{Yax}}, +} + +var ysvrs_mo = []ytab{ + {Zm_o, 2, argList{Ym}}, +} + +// unaryDst version of "ysvrs_mo". +var ysvrs_om = []ytab{ + {Zo_m, 2, argList{Ym}}, +} + +var ymm = []ytab{ + {Zm_r_xm, 1, argList{Ymm, Ymr}}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, +} + +var yxm = []ytab{ + {Zm_r_xm, 1, argList{Yxm, Yxr}}, +} + +var yxm_q4 = []ytab{ + {Zm_r, 1, argList{Yxm, Yxr}}, +} + +var yxcvm1 = []ytab{ + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zm_r_xm, 2, argList{Yxm, Ymr}}, +} + +var yxcvm2 = []ytab{ + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zm_r_xm, 2, argList{Ymm, Yxr}}, +} + +var yxr = []ytab{ + {Zm_r_xm, 1, argList{Yxr, Yxr}}, +} + +var yxr_ml = []ytab{ + {Zr_m_xm, 1, argList{Yxr, Yml}}, +} + +var ymr = []ytab{ + {Zm_r, 1, argList{Ymr, Ymr}}, +} + +var ymr_ml = []ytab{ + {Zr_m_xm, 1, argList{Ymr, Yml}}, +} + +var yxcmpi = []ytab{ + {Zm_r_i_xm, 2, argList{Yxm, Yxr, Yi8}}, +} + +var yxmov = []ytab{ + {Zm_r_xm, 1, argList{Yxm, Yxr}}, + {Zr_m_xm, 1, argList{Yxr, Yxm}}, +} + +var yxcvfl = []ytab{ + {Zm_r_xm, 1, argList{Yxm, Yrl}}, +} + +var yxcvlf = []ytab{ + {Zm_r_xm, 1, argList{Yml, Yxr}}, +} + +var yxcvfq = []ytab{ + {Zm_r_xm, 2, argList{Yxm, Yrl}}, +} + +var yxcvqf = []ytab{ + {Zm_r_xm, 2, argList{Yml, Yxr}}, +} + +var yps = []ytab{ + {Zm_r_xm, 1, argList{Ymm, Ymr}}, + {Zibo_m_xm, 2, argList{Yi8, Ymr}}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zibo_m_xm, 3, argList{Yi8, Yxr}}, +} + +var yxrrl = []ytab{ + {Zm_r, 1, argList{Yxr, Yrl}}, +} + +var ymrxr = []ytab{ + {Zm_r, 1, argList{Ymr, Yxr}}, + {Zm_r_xm, 1, argList{Yxm, Yxr}}, +} + +var ymshuf = []ytab{ + {Zibm_r, 2, argList{Yi8, Ymm, Ymr}}, +} + +var ymshufb = []ytab{ + {Zm2_r, 2, argList{Yxm, Yxr}}, +} + +// It should never have more than 1 entry, +// because some optab entries have opcode sequences that +// are longer than 2 bytes (zoffset=2 here), +// ROUNDPD and ROUNDPS and recently added BLENDPD, +// to name a few. +var yxshuf = []ytab{ + {Zibm_r, 2, argList{Yu8, Yxm, Yxr}}, +} + +var yextrw = []ytab{ + {Zibm_r, 2, argList{Yu8, Yxr, Yrl}}, + {Zibr_m, 2, argList{Yu8, Yxr, Yml}}, +} + +var yextr = []ytab{ + {Zibr_m, 3, argList{Yu8, Yxr, Ymm}}, +} + +var yinsrw = []ytab{ + {Zibm_r, 2, argList{Yu8, Yml, Yxr}}, +} + +var yinsr = []ytab{ + {Zibm_r, 3, argList{Yu8, Ymm, Yxr}}, +} + +var ypsdq = []ytab{ + {Zibo_m, 2, argList{Yi8, Yxr}}, +} + +var ymskb = []ytab{ + {Zm_r_xm, 2, argList{Yxr, Yrl}}, + {Zm_r_xm, 1, argList{Ymr, Yrl}}, +} + +var ycrc32l = []ytab{ + {Zlitm_r, 0, argList{Yml, Yrl}}, +} + +var ycrc32b = []ytab{ + {Zlitm_r, 0, argList{Ymb, Yrl}}, +} + +var yprefetch = []ytab{ + {Zm_o, 2, argList{Ym}}, +} + +var yaes = []ytab{ + {Zlitm_r, 2, argList{Yxm, Yxr}}, +} + +var yxbegin = []ytab{ + {Zjmp, 1, argList{Ybr}}, +} + +var yxabort = []ytab{ + {Zib_, 1, argList{Yu8}}, +} + +var ylddqu = []ytab{ + {Zm_r, 1, argList{Ym, Yxr}}, +} + +var ypalignr = []ytab{ + {Zibm_r, 2, argList{Yu8, Yxm, Yxr}}, +} + +var ysha256rnds2 = []ytab{ + {Zlit_m_r, 0, argList{Yxr0, Yxm, Yxr}}, +} + +var yblendvpd = []ytab{ + {Z_m_r, 1, argList{Yxr0, Yxm, Yxr}}, +} + +var ymmxmm0f38 = []ytab{ + {Zlitm_r, 3, argList{Ymm, Ymr}}, + {Zlitm_r, 5, argList{Yxm, Yxr}}, +} + +var yextractps = []ytab{ + {Zibr_m, 2, argList{Yu2, Yxr, Yml}}, +} + +var ysha1rnds4 = []ytab{ + {Zibm_r, 2, argList{Yu2, Yxm, Yxr}}, +} + +// You are doasm, holding in your hand a *obj.Prog with p.As set to, say, +// ACRC32, and p.From and p.To as operands (obj.Addr). The linker scans optab +// to find the entry with the given p.As and then looks through the ytable for +// that instruction (the second field in the optab struct) for a line whose +// first two values match the Ytypes of the p.From and p.To operands. The +// function oclass computes the specific Ytype of an operand and then the set +// of more general Ytypes that it satisfies is implied by the ycover table, set +// up in instinit. For example, oclass distinguishes the constants 0 and 1 +// from the more general 8-bit constants, but instinit says +// +// ycover[Yi0*Ymax+Ys32] = 1 +// ycover[Yi1*Ymax+Ys32] = 1 +// ycover[Yi8*Ymax+Ys32] = 1 +// +// which means that Yi0, Yi1, and Yi8 all count as Ys32 (signed 32) +// if that's what an instruction can handle. +// +// In parallel with the scan through the ytable for the appropriate line, there +// is a z pointer that starts out pointing at the strange magic byte list in +// the Optab struct. With each step past a non-matching ytable line, z +// advances by the 4th entry in the line. When a matching line is found, that +// z pointer has the extra data to use in laying down the instruction bytes. +// The actual bytes laid down are a function of the 3rd entry in the line (that +// is, the Ztype) and the z bytes. +// +// For example, let's look at AADDL. The optab line says: +// +// {AADDL, yaddl, Px, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, +// +// and yaddl says +// +// var yaddl = []ytab{ +// {Yi8, Ynone, Yml, Zibo_m, 2}, +// {Yi32, Ynone, Yax, Zil_, 1}, +// {Yi32, Ynone, Yml, Zilo_m, 2}, +// {Yrl, Ynone, Yml, Zr_m, 1}, +// {Yml, Ynone, Yrl, Zm_r, 1}, +// } +// +// so there are 5 possible types of ADDL instruction that can be laid down, and +// possible states used to lay them down (Ztype and z pointer, assuming z +// points at opBytes{0x83, 00, 0x05,0x81, 00, 0x01, 0x03}) are: +// +// Yi8, Yml -> Zibo_m, z (0x83, 00) +// Yi32, Yax -> Zil_, z+2 (0x05) +// Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00) +// Yrl, Yml -> Zr_m, z+2+1+2 (0x01) +// Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03) +// +// The Pconstant in the optab line controls the prefix bytes to emit. That's +// relatively straightforward as this program goes. +// +// The switch on yt.zcase in doasm implements the various Z cases. Zibo_m, for +// example, is an opcode byte (z[0]) then an asmando (which is some kind of +// encoded addressing mode for the Yml arg), and then a single immediate byte. +// Zilo_m is the same but a long (32-bit) immediate. +var optab = +// as, ytab, andproto, opcode +[...]Optab{ + {obj.AXXX, nil, 0, opBytes{}}, + {AAAA, ynone, P32, opBytes{0x37}}, + {AAAD, ynone, P32, opBytes{0xd5, 0x0a}}, + {AAAM, ynone, P32, opBytes{0xd4, 0x0a}}, + {AAAS, ynone, P32, opBytes{0x3f}}, + {AADCB, yxorb, Pb, opBytes{0x14, 0x80, 02, 0x10, 0x12}}, + {AADCL, yaddl, Px, opBytes{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, + {AADCQ, yaddl, Pw, opBytes{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, + {AADCW, yaddl, Pe, opBytes{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, + {AADCXL, yml_rl, Pq4, opBytes{0xf6}}, + {AADCXQ, yml_rl, Pq4w, opBytes{0xf6}}, + {AADDB, yxorb, Pb, opBytes{0x04, 0x80, 00, 0x00, 0x02}}, + {AADDL, yaddl, Px, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, + {AADDPD, yxm, Pq, opBytes{0x58}}, + {AADDPS, yxm, Pm, opBytes{0x58}}, + {AADDQ, yaddl, Pw, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, + {AADDSD, yxm, Pf2, opBytes{0x58}}, + {AADDSS, yxm, Pf3, opBytes{0x58}}, + {AADDSUBPD, yxm, Pq, opBytes{0xd0}}, + {AADDSUBPS, yxm, Pf2, opBytes{0xd0}}, + {AADDW, yaddl, Pe, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, + {AADOXL, yml_rl, Pq5, opBytes{0xf6}}, + {AADOXQ, yml_rl, Pq5w, opBytes{0xf6}}, + {AADJSP, nil, 0, opBytes{}}, + {AANDB, yxorb, Pb, opBytes{0x24, 0x80, 04, 0x20, 0x22}}, + {AANDL, yaddl, Px, opBytes{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}}, + {AANDNPD, yxm, Pq, opBytes{0x55}}, + {AANDNPS, yxm, Pm, opBytes{0x55}}, + {AANDPD, yxm, Pq, opBytes{0x54}}, + {AANDPS, yxm, Pm, opBytes{0x54}}, + {AANDQ, yaddl, Pw, opBytes{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}}, + {AANDW, yaddl, Pe, opBytes{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}}, + {AARPL, yrl_ml, P32, opBytes{0x63}}, + {ABOUNDL, yrl_m, P32, opBytes{0x62}}, + {ABOUNDW, yrl_m, Pe, opBytes{0x62}}, + {ABSFL, yml_rl, Pm, opBytes{0xbc}}, + {ABSFQ, yml_rl, Pw, opBytes{0x0f, 0xbc}}, + {ABSFW, yml_rl, Pq, opBytes{0xbc}}, + {ABSRL, yml_rl, Pm, opBytes{0xbd}}, + {ABSRQ, yml_rl, Pw, opBytes{0x0f, 0xbd}}, + {ABSRW, yml_rl, Pq, opBytes{0xbd}}, + {ABSWAPL, ybswap, Px, opBytes{0x0f, 0xc8}}, + {ABSWAPQ, ybswap, Pw, opBytes{0x0f, 0xc8}}, + {ABTCL, ybtl, Pm, opBytes{0xba, 07, 0xbb}}, + {ABTCQ, ybtl, Pw, opBytes{0x0f, 0xba, 07, 0x0f, 0xbb}}, + {ABTCW, ybtl, Pq, opBytes{0xba, 07, 0xbb}}, + {ABTL, ybtl, Pm, opBytes{0xba, 04, 0xa3}}, + {ABTQ, ybtl, Pw, opBytes{0x0f, 0xba, 04, 0x0f, 0xa3}}, + {ABTRL, ybtl, Pm, opBytes{0xba, 06, 0xb3}}, + {ABTRQ, ybtl, Pw, opBytes{0x0f, 0xba, 06, 0x0f, 0xb3}}, + {ABTRW, ybtl, Pq, opBytes{0xba, 06, 0xb3}}, + {ABTSL, ybtl, Pm, opBytes{0xba, 05, 0xab}}, + {ABTSQ, ybtl, Pw, opBytes{0x0f, 0xba, 05, 0x0f, 0xab}}, + {ABTSW, ybtl, Pq, opBytes{0xba, 05, 0xab}}, + {ABTW, ybtl, Pq, opBytes{0xba, 04, 0xa3}}, + {ABYTE, ybyte, Px, opBytes{1}}, + {obj.ACALL, ycall, Px, opBytes{0xff, 02, 0xff, 0x15, 0xe8}}, + {ACBW, ynone, Pe, opBytes{0x98}}, + {ACDQ, ynone, Px, opBytes{0x99}}, + {ACDQE, ynone, Pw, opBytes{0x98}}, + {ACLAC, ynone, Pm, opBytes{01, 0xca}}, + {ACLC, ynone, Px, opBytes{0xf8}}, + {ACLD, ynone, Px, opBytes{0xfc}}, + {ACLDEMOTE, yclflush, Pm, opBytes{0x1c, 00}}, + {ACLFLUSH, yclflush, Pm, opBytes{0xae, 07}}, + {ACLFLUSHOPT, yclflush, Pq, opBytes{0xae, 07}}, + {ACLI, ynone, Px, opBytes{0xfa}}, + {ACLTS, ynone, Pm, opBytes{0x06}}, + {ACLWB, yclflush, Pq, opBytes{0xae, 06}}, + {ACMC, ynone, Px, opBytes{0xf5}}, + {ACMOVLCC, yml_rl, Pm, opBytes{0x43}}, + {ACMOVLCS, yml_rl, Pm, opBytes{0x42}}, + {ACMOVLEQ, yml_rl, Pm, opBytes{0x44}}, + {ACMOVLGE, yml_rl, Pm, opBytes{0x4d}}, + {ACMOVLGT, yml_rl, Pm, opBytes{0x4f}}, + {ACMOVLHI, yml_rl, Pm, opBytes{0x47}}, + {ACMOVLLE, yml_rl, Pm, opBytes{0x4e}}, + {ACMOVLLS, yml_rl, Pm, opBytes{0x46}}, + {ACMOVLLT, yml_rl, Pm, opBytes{0x4c}}, + {ACMOVLMI, yml_rl, Pm, opBytes{0x48}}, + {ACMOVLNE, yml_rl, Pm, opBytes{0x45}}, + {ACMOVLOC, yml_rl, Pm, opBytes{0x41}}, + {ACMOVLOS, yml_rl, Pm, opBytes{0x40}}, + {ACMOVLPC, yml_rl, Pm, opBytes{0x4b}}, + {ACMOVLPL, yml_rl, Pm, opBytes{0x49}}, + {ACMOVLPS, yml_rl, Pm, opBytes{0x4a}}, + {ACMOVQCC, yml_rl, Pw, opBytes{0x0f, 0x43}}, + {ACMOVQCS, yml_rl, Pw, opBytes{0x0f, 0x42}}, + {ACMOVQEQ, yml_rl, Pw, opBytes{0x0f, 0x44}}, + {ACMOVQGE, yml_rl, Pw, opBytes{0x0f, 0x4d}}, + {ACMOVQGT, yml_rl, Pw, opBytes{0x0f, 0x4f}}, + {ACMOVQHI, yml_rl, Pw, opBytes{0x0f, 0x47}}, + {ACMOVQLE, yml_rl, Pw, opBytes{0x0f, 0x4e}}, + {ACMOVQLS, yml_rl, Pw, opBytes{0x0f, 0x46}}, + {ACMOVQLT, yml_rl, Pw, opBytes{0x0f, 0x4c}}, + {ACMOVQMI, yml_rl, Pw, opBytes{0x0f, 0x48}}, + {ACMOVQNE, yml_rl, Pw, opBytes{0x0f, 0x45}}, + {ACMOVQOC, yml_rl, Pw, opBytes{0x0f, 0x41}}, + {ACMOVQOS, yml_rl, Pw, opBytes{0x0f, 0x40}}, + {ACMOVQPC, yml_rl, Pw, opBytes{0x0f, 0x4b}}, + {ACMOVQPL, yml_rl, Pw, opBytes{0x0f, 0x49}}, + {ACMOVQPS, yml_rl, Pw, opBytes{0x0f, 0x4a}}, + {ACMOVWCC, yml_rl, Pq, opBytes{0x43}}, + {ACMOVWCS, yml_rl, Pq, opBytes{0x42}}, + {ACMOVWEQ, yml_rl, Pq, opBytes{0x44}}, + {ACMOVWGE, yml_rl, Pq, opBytes{0x4d}}, + {ACMOVWGT, yml_rl, Pq, opBytes{0x4f}}, + {ACMOVWHI, yml_rl, Pq, opBytes{0x47}}, + {ACMOVWLE, yml_rl, Pq, opBytes{0x4e}}, + {ACMOVWLS, yml_rl, Pq, opBytes{0x46}}, + {ACMOVWLT, yml_rl, Pq, opBytes{0x4c}}, + {ACMOVWMI, yml_rl, Pq, opBytes{0x48}}, + {ACMOVWNE, yml_rl, Pq, opBytes{0x45}}, + {ACMOVWOC, yml_rl, Pq, opBytes{0x41}}, + {ACMOVWOS, yml_rl, Pq, opBytes{0x40}}, + {ACMOVWPC, yml_rl, Pq, opBytes{0x4b}}, + {ACMOVWPL, yml_rl, Pq, opBytes{0x49}}, + {ACMOVWPS, yml_rl, Pq, opBytes{0x4a}}, + {ACMPB, ycmpb, Pb, opBytes{0x3c, 0x80, 07, 0x38, 0x3a}}, + {ACMPL, ycmpl, Px, opBytes{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}}, + {ACMPPD, yxcmpi, Px, opBytes{Pe, 0xc2}}, + {ACMPPS, yxcmpi, Pm, opBytes{0xc2, 0}}, + {ACMPQ, ycmpl, Pw, opBytes{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}}, + {ACMPSB, ynone, Pb, opBytes{0xa6}}, + {ACMPSD, yxcmpi, Px, opBytes{Pf2, 0xc2}}, + {ACMPSL, ynone, Px, opBytes{0xa7}}, + {ACMPSQ, ynone, Pw, opBytes{0xa7}}, + {ACMPSS, yxcmpi, Px, opBytes{Pf3, 0xc2}}, + {ACMPSW, ynone, Pe, opBytes{0xa7}}, + {ACMPW, ycmpl, Pe, opBytes{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}}, + {ACOMISD, yxm, Pe, opBytes{0x2f}}, + {ACOMISS, yxm, Pm, opBytes{0x2f}}, + {ACPUID, ynone, Pm, opBytes{0xa2}}, + {ACVTPL2PD, yxcvm2, Px, opBytes{Pf3, 0xe6, Pe, 0x2a}}, + {ACVTPL2PS, yxcvm2, Pm, opBytes{0x5b, 0, 0x2a, 0}}, + {ACVTPD2PL, yxcvm1, Px, opBytes{Pf2, 0xe6, Pe, 0x2d}}, + {ACVTPD2PS, yxm, Pe, opBytes{0x5a}}, + {ACVTPS2PL, yxcvm1, Px, opBytes{Pe, 0x5b, Pm, 0x2d}}, + {ACVTPS2PD, yxm, Pm, opBytes{0x5a}}, + {ACVTSD2SL, yxcvfl, Pf2, opBytes{0x2d}}, + {ACVTSD2SQ, yxcvfq, Pw, opBytes{Pf2, 0x2d}}, + {ACVTSD2SS, yxm, Pf2, opBytes{0x5a}}, + {ACVTSL2SD, yxcvlf, Pf2, opBytes{0x2a}}, + {ACVTSQ2SD, yxcvqf, Pw, opBytes{Pf2, 0x2a}}, + {ACVTSL2SS, yxcvlf, Pf3, opBytes{0x2a}}, + {ACVTSQ2SS, yxcvqf, Pw, opBytes{Pf3, 0x2a}}, + {ACVTSS2SD, yxm, Pf3, opBytes{0x5a}}, + {ACVTSS2SL, yxcvfl, Pf3, opBytes{0x2d}}, + {ACVTSS2SQ, yxcvfq, Pw, opBytes{Pf3, 0x2d}}, + {ACVTTPD2PL, yxcvm1, Px, opBytes{Pe, 0xe6, Pe, 0x2c}}, + {ACVTTPS2PL, yxcvm1, Px, opBytes{Pf3, 0x5b, Pm, 0x2c}}, + {ACVTTSD2SL, yxcvfl, Pf2, opBytes{0x2c}}, + {ACVTTSD2SQ, yxcvfq, Pw, opBytes{Pf2, 0x2c}}, + {ACVTTSS2SL, yxcvfl, Pf3, opBytes{0x2c}}, + {ACVTTSS2SQ, yxcvfq, Pw, opBytes{Pf3, 0x2c}}, + {ACWD, ynone, Pe, opBytes{0x99}}, + {ACWDE, ynone, Px, opBytes{0x98}}, + {ACQO, ynone, Pw, opBytes{0x99}}, + {ADAA, ynone, P32, opBytes{0x27}}, + {ADAS, ynone, P32, opBytes{0x2f}}, + {ADECB, yscond, Pb, opBytes{0xfe, 01}}, + {ADECL, yincl, Px1, opBytes{0x48, 0xff, 01}}, + {ADECQ, yincq, Pw, opBytes{0xff, 01}}, + {ADECW, yincq, Pe, opBytes{0xff, 01}}, + {ADIVB, ydivb, Pb, opBytes{0xf6, 06}}, + {ADIVL, ydivl, Px, opBytes{0xf7, 06}}, + {ADIVPD, yxm, Pe, opBytes{0x5e}}, + {ADIVPS, yxm, Pm, opBytes{0x5e}}, + {ADIVQ, ydivl, Pw, opBytes{0xf7, 06}}, + {ADIVSD, yxm, Pf2, opBytes{0x5e}}, + {ADIVSS, yxm, Pf3, opBytes{0x5e}}, + {ADIVW, ydivl, Pe, opBytes{0xf7, 06}}, + {ADPPD, yxshuf, Pq, opBytes{0x3a, 0x41, 0}}, + {ADPPS, yxshuf, Pq, opBytes{0x3a, 0x40, 0}}, + {AEMMS, ynone, Pm, opBytes{0x77}}, + {AEXTRACTPS, yextractps, Pq, opBytes{0x3a, 0x17, 0}}, + {AENTER, nil, 0, opBytes{}}, // botch + {AFXRSTOR, ysvrs_mo, Pm, opBytes{0xae, 01, 0xae, 01}}, + {AFXSAVE, ysvrs_om, Pm, opBytes{0xae, 00, 0xae, 00}}, + {AFXRSTOR64, ysvrs_mo, Pw, opBytes{0x0f, 0xae, 01, 0x0f, 0xae, 01}}, + {AFXSAVE64, ysvrs_om, Pw, opBytes{0x0f, 0xae, 00, 0x0f, 0xae, 00}}, + {AHLT, ynone, Px, opBytes{0xf4}}, + {AIDIVB, ydivb, Pb, opBytes{0xf6, 07}}, + {AIDIVL, ydivl, Px, opBytes{0xf7, 07}}, + {AIDIVQ, ydivl, Pw, opBytes{0xf7, 07}}, + {AIDIVW, ydivl, Pe, opBytes{0xf7, 07}}, + {AIMULB, ydivb, Pb, opBytes{0xf6, 05}}, + {AIMULL, yimul, Px, opBytes{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}}, + {AIMULQ, yimul, Pw, opBytes{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}}, + {AIMULW, yimul, Pe, opBytes{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}}, + {AIMUL3W, yimul3, Pe, opBytes{0x6b, 00, 0x69, 00}}, + {AIMUL3L, yimul3, Px, opBytes{0x6b, 00, 0x69, 00}}, + {AIMUL3Q, yimul3, Pw, opBytes{0x6b, 00, 0x69, 00}}, + {AINB, yin, Pb, opBytes{0xe4, 0xec}}, + {AINW, yin, Pe, opBytes{0xe5, 0xed}}, + {AINL, yin, Px, opBytes{0xe5, 0xed}}, + {AINCB, yscond, Pb, opBytes{0xfe, 00}}, + {AINCL, yincl, Px1, opBytes{0x40, 0xff, 00}}, + {AINCQ, yincq, Pw, opBytes{0xff, 00}}, + {AINCW, yincq, Pe, opBytes{0xff, 00}}, + {AINSB, ynone, Pb, opBytes{0x6c}}, + {AINSL, ynone, Px, opBytes{0x6d}}, + {AINSERTPS, yxshuf, Pq, opBytes{0x3a, 0x21, 0}}, + {AINSW, ynone, Pe, opBytes{0x6d}}, + {AICEBP, ynone, Px, opBytes{0xf1}}, + {AINT, yint, Px, opBytes{0xcd}}, + {AINTO, ynone, P32, opBytes{0xce}}, + {AIRETL, ynone, Px, opBytes{0xcf}}, + {AIRETQ, ynone, Pw, opBytes{0xcf}}, + {AIRETW, ynone, Pe, opBytes{0xcf}}, + {AJCC, yjcond, Px, opBytes{0x73, 0x83, 00}}, + {AJCS, yjcond, Px, opBytes{0x72, 0x82}}, + {AJCXZL, yloop, Px, opBytes{0xe3}}, + {AJCXZW, yloop, Px, opBytes{0xe3}}, + {AJCXZQ, yloop, Px, opBytes{0xe3}}, + {AJEQ, yjcond, Px, opBytes{0x74, 0x84}}, + {AJGE, yjcond, Px, opBytes{0x7d, 0x8d}}, + {AJGT, yjcond, Px, opBytes{0x7f, 0x8f}}, + {AJHI, yjcond, Px, opBytes{0x77, 0x87}}, + {AJLE, yjcond, Px, opBytes{0x7e, 0x8e}}, + {AJLS, yjcond, Px, opBytes{0x76, 0x86}}, + {AJLT, yjcond, Px, opBytes{0x7c, 0x8c}}, + {AJMI, yjcond, Px, opBytes{0x78, 0x88}}, + {obj.AJMP, yjmp, Px, opBytes{0xff, 04, 0xeb, 0xe9}}, + {AJNE, yjcond, Px, opBytes{0x75, 0x85}}, + {AJOC, yjcond, Px, opBytes{0x71, 0x81, 00}}, + {AJOS, yjcond, Px, opBytes{0x70, 0x80, 00}}, + {AJPC, yjcond, Px, opBytes{0x7b, 0x8b}}, + {AJPL, yjcond, Px, opBytes{0x79, 0x89}}, + {AJPS, yjcond, Px, opBytes{0x7a, 0x8a}}, + {AHADDPD, yxm, Pq, opBytes{0x7c}}, + {AHADDPS, yxm, Pf2, opBytes{0x7c}}, + {AHSUBPD, yxm, Pq, opBytes{0x7d}}, + {AHSUBPS, yxm, Pf2, opBytes{0x7d}}, + {ALAHF, ynone, Px, opBytes{0x9f}}, + {ALARL, yml_rl, Pm, opBytes{0x02}}, + {ALARQ, yml_rl, Pw, opBytes{0x0f, 0x02}}, + {ALARW, yml_rl, Pq, opBytes{0x02}}, + {ALDDQU, ylddqu, Pf2, opBytes{0xf0}}, + {ALDMXCSR, ysvrs_mo, Pm, opBytes{0xae, 02, 0xae, 02}}, + {ALEAL, ym_rl, Px, opBytes{0x8d}}, + {ALEAQ, ym_rl, Pw, opBytes{0x8d}}, + {ALEAVEL, ynone, P32, opBytes{0xc9}}, + {ALEAVEQ, ynone, Py, opBytes{0xc9}}, + {ALEAVEW, ynone, Pe, opBytes{0xc9}}, + {ALEAW, ym_rl, Pe, opBytes{0x8d}}, + {ALOCK, ynone, Px, opBytes{0xf0}}, + {ALODSB, ynone, Pb, opBytes{0xac}}, + {ALODSL, ynone, Px, opBytes{0xad}}, + {ALODSQ, ynone, Pw, opBytes{0xad}}, + {ALODSW, ynone, Pe, opBytes{0xad}}, + {ALONG, ybyte, Px, opBytes{4}}, + {ALOOP, yloop, Px, opBytes{0xe2}}, + {ALOOPEQ, yloop, Px, opBytes{0xe1}}, + {ALOOPNE, yloop, Px, opBytes{0xe0}}, + {ALTR, ydivl, Pm, opBytes{0x00, 03}}, + {ALZCNTL, yml_rl, Pf3, opBytes{0xbd}}, + {ALZCNTQ, yml_rl, Pfw, opBytes{0xbd}}, + {ALZCNTW, yml_rl, Pef3, opBytes{0xbd}}, + {ALSLL, yml_rl, Pm, opBytes{0x03}}, + {ALSLW, yml_rl, Pq, opBytes{0x03}}, + {ALSLQ, yml_rl, Pw, opBytes{0x0f, 0x03}}, + {AMASKMOVOU, yxr, Pe, opBytes{0xf7}}, + {AMASKMOVQ, ymr, Pm, opBytes{0xf7}}, + {AMAXPD, yxm, Pe, opBytes{0x5f}}, + {AMAXPS, yxm, Pm, opBytes{0x5f}}, + {AMAXSD, yxm, Pf2, opBytes{0x5f}}, + {AMAXSS, yxm, Pf3, opBytes{0x5f}}, + {AMINPD, yxm, Pe, opBytes{0x5d}}, + {AMINPS, yxm, Pm, opBytes{0x5d}}, + {AMINSD, yxm, Pf2, opBytes{0x5d}}, + {AMINSS, yxm, Pf3, opBytes{0x5d}}, + {AMONITOR, ynone, Px, opBytes{0x0f, 0x01, 0xc8, 0}}, + {AMWAIT, ynone, Px, opBytes{0x0f, 0x01, 0xc9, 0}}, + {AMOVAPD, yxmov, Pe, opBytes{0x28, 0x29}}, + {AMOVAPS, yxmov, Pm, opBytes{0x28, 0x29}}, + {AMOVB, ymovb, Pb, opBytes{0x88, 0x8a, 0xb0, 0xc6, 00}}, + {AMOVBLSX, ymb_rl, Pm, opBytes{0xbe}}, + {AMOVBLZX, ymb_rl, Pm, opBytes{0xb6}}, + {AMOVBQSX, ymb_rl, Pw, opBytes{0x0f, 0xbe}}, + {AMOVBQZX, ymb_rl, Pw, opBytes{0x0f, 0xb6}}, + {AMOVBWSX, ymb_rl, Pq, opBytes{0xbe}}, + {AMOVSWW, ymb_rl, Pe, opBytes{0x0f, 0xbf}}, + {AMOVBWZX, ymb_rl, Pq, opBytes{0xb6}}, + {AMOVZWW, ymb_rl, Pe, opBytes{0x0f, 0xb7}}, + {AMOVO, yxmov, Pe, opBytes{0x6f, 0x7f}}, + {AMOVOU, yxmov, Pf3, opBytes{0x6f, 0x7f}}, + {AMOVHLPS, yxr, Pm, opBytes{0x12}}, + {AMOVHPD, yxmov, Pe, opBytes{0x16, 0x17}}, + {AMOVHPS, yxmov, Pm, opBytes{0x16, 0x17}}, + {AMOVL, ymovl, Px, opBytes{0x89, 0x8b, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, + {AMOVLHPS, yxr, Pm, opBytes{0x16}}, + {AMOVLPD, yxmov, Pe, opBytes{0x12, 0x13}}, + {AMOVLPS, yxmov, Pm, opBytes{0x12, 0x13}}, + {AMOVLQSX, yml_rl, Pw, opBytes{0x63}}, + {AMOVLQZX, yml_rl, Px, opBytes{0x8b}}, + {AMOVMSKPD, yxrrl, Pq, opBytes{0x50}}, + {AMOVMSKPS, yxrrl, Pm, opBytes{0x50}}, + {AMOVNTO, yxr_ml, Pe, opBytes{0xe7}}, + {AMOVNTDQA, ylddqu, Pq4, opBytes{0x2a}}, + {AMOVNTPD, yxr_ml, Pe, opBytes{0x2b}}, + {AMOVNTPS, yxr_ml, Pm, opBytes{0x2b}}, + {AMOVNTQ, ymr_ml, Pm, opBytes{0xe7}}, + {AMOVQ, ymovq, Pw8, opBytes{0x6f, 0x7f, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, 0x89, 0x8b, 0xc7, 00, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, + {AMOVQOZX, ymrxr, Pf3, opBytes{0xd6, 0x7e}}, + {AMOVSB, ynone, Pb, opBytes{0xa4}}, + {AMOVSD, yxmov, Pf2, opBytes{0x10, 0x11}}, + {AMOVSL, ynone, Px, opBytes{0xa5}}, + {AMOVSQ, ynone, Pw, opBytes{0xa5}}, + {AMOVSS, yxmov, Pf3, opBytes{0x10, 0x11}}, + {AMOVSW, ynone, Pe, opBytes{0xa5}}, + {AMOVUPD, yxmov, Pe, opBytes{0x10, 0x11}}, + {AMOVUPS, yxmov, Pm, opBytes{0x10, 0x11}}, + {AMOVW, ymovw, Pe, opBytes{0x89, 0x8b, 0xb8, 0xc7, 00, 0}}, + {AMOVWLSX, yml_rl, Pm, opBytes{0xbf}}, + {AMOVWLZX, yml_rl, Pm, opBytes{0xb7}}, + {AMOVWQSX, yml_rl, Pw, opBytes{0x0f, 0xbf}}, + {AMOVWQZX, yml_rl, Pw, opBytes{0x0f, 0xb7}}, + {AMPSADBW, yxshuf, Pq, opBytes{0x3a, 0x42, 0}}, + {AMULB, ydivb, Pb, opBytes{0xf6, 04}}, + {AMULL, ydivl, Px, opBytes{0xf7, 04}}, + {AMULPD, yxm, Pe, opBytes{0x59}}, + {AMULPS, yxm, Ym, opBytes{0x59}}, + {AMULQ, ydivl, Pw, opBytes{0xf7, 04}}, + {AMULSD, yxm, Pf2, opBytes{0x59}}, + {AMULSS, yxm, Pf3, opBytes{0x59}}, + {AMULW, ydivl, Pe, opBytes{0xf7, 04}}, + {ANEGB, yscond, Pb, opBytes{0xf6, 03}}, + {ANEGL, yscond, Px, opBytes{0xf7, 03}}, + {ANEGQ, yscond, Pw, opBytes{0xf7, 03}}, + {ANEGW, yscond, Pe, opBytes{0xf7, 03}}, + {obj.ANOP, ynop, Px, opBytes{0, 0}}, + {ANOTB, yscond, Pb, opBytes{0xf6, 02}}, + {ANOTL, yscond, Px, opBytes{0xf7, 02}}, // TODO(rsc): yscond is wrong here. + {ANOTQ, yscond, Pw, opBytes{0xf7, 02}}, + {ANOTW, yscond, Pe, opBytes{0xf7, 02}}, + {AORB, yxorb, Pb, opBytes{0x0c, 0x80, 01, 0x08, 0x0a}}, + {AORL, yaddl, Px, opBytes{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}}, + {AORPD, yxm, Pq, opBytes{0x56}}, + {AORPS, yxm, Pm, opBytes{0x56}}, + {AORQ, yaddl, Pw, opBytes{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}}, + {AORW, yaddl, Pe, opBytes{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}}, + {AOUTB, yin, Pb, opBytes{0xe6, 0xee}}, + {AOUTL, yin, Px, opBytes{0xe7, 0xef}}, + {AOUTW, yin, Pe, opBytes{0xe7, 0xef}}, + {AOUTSB, ynone, Pb, opBytes{0x6e}}, + {AOUTSL, ynone, Px, opBytes{0x6f}}, + {AOUTSW, ynone, Pe, opBytes{0x6f}}, + {APABSB, yxm_q4, Pq4, opBytes{0x1c}}, + {APABSD, yxm_q4, Pq4, opBytes{0x1e}}, + {APABSW, yxm_q4, Pq4, opBytes{0x1d}}, + {APACKSSLW, ymm, Py1, opBytes{0x6b, Pe, 0x6b}}, + {APACKSSWB, ymm, Py1, opBytes{0x63, Pe, 0x63}}, + {APACKUSDW, yxm_q4, Pq4, opBytes{0x2b}}, + {APACKUSWB, ymm, Py1, opBytes{0x67, Pe, 0x67}}, + {APADDB, ymm, Py1, opBytes{0xfc, Pe, 0xfc}}, + {APADDL, ymm, Py1, opBytes{0xfe, Pe, 0xfe}}, + {APADDQ, yxm, Pe, opBytes{0xd4}}, + {APADDSB, ymm, Py1, opBytes{0xec, Pe, 0xec}}, + {APADDSW, ymm, Py1, opBytes{0xed, Pe, 0xed}}, + {APADDUSB, ymm, Py1, opBytes{0xdc, Pe, 0xdc}}, + {APADDUSW, ymm, Py1, opBytes{0xdd, Pe, 0xdd}}, + {APADDW, ymm, Py1, opBytes{0xfd, Pe, 0xfd}}, + {APALIGNR, ypalignr, Pq, opBytes{0x3a, 0x0f}}, + {APAND, ymm, Py1, opBytes{0xdb, Pe, 0xdb}}, + {APANDN, ymm, Py1, opBytes{0xdf, Pe, 0xdf}}, + {APAUSE, ynone, Px, opBytes{0xf3, 0x90}}, + {APAVGB, ymm, Py1, opBytes{0xe0, Pe, 0xe0}}, + {APAVGW, ymm, Py1, opBytes{0xe3, Pe, 0xe3}}, + {APBLENDW, yxshuf, Pq, opBytes{0x3a, 0x0e, 0}}, + {APCMPEQB, ymm, Py1, opBytes{0x74, Pe, 0x74}}, + {APCMPEQL, ymm, Py1, opBytes{0x76, Pe, 0x76}}, + {APCMPEQQ, yxm_q4, Pq4, opBytes{0x29}}, + {APCMPEQW, ymm, Py1, opBytes{0x75, Pe, 0x75}}, + {APCMPGTB, ymm, Py1, opBytes{0x64, Pe, 0x64}}, + {APCMPGTL, ymm, Py1, opBytes{0x66, Pe, 0x66}}, + {APCMPGTQ, yxm_q4, Pq4, opBytes{0x37}}, + {APCMPGTW, ymm, Py1, opBytes{0x65, Pe, 0x65}}, + {APCMPISTRI, yxshuf, Pq, opBytes{0x3a, 0x63, 0}}, + {APCMPISTRM, yxshuf, Pq, opBytes{0x3a, 0x62, 0}}, + {APEXTRW, yextrw, Pq, opBytes{0xc5, 0, 0x3a, 0x15, 0}}, + {APEXTRB, yextr, Pq, opBytes{0x3a, 0x14, 00}}, + {APEXTRD, yextr, Pq, opBytes{0x3a, 0x16, 00}}, + {APEXTRQ, yextr, Pq3, opBytes{0x3a, 0x16, 00}}, + {APHADDD, ymmxmm0f38, Px, opBytes{0x0F, 0x38, 0x02, 0, 0x66, 0x0F, 0x38, 0x02, 0}}, + {APHADDSW, yxm_q4, Pq4, opBytes{0x03}}, + {APHADDW, yxm_q4, Pq4, opBytes{0x01}}, + {APHMINPOSUW, yxm_q4, Pq4, opBytes{0x41}}, + {APHSUBD, yxm_q4, Pq4, opBytes{0x06}}, + {APHSUBSW, yxm_q4, Pq4, opBytes{0x07}}, + {APHSUBW, yxm_q4, Pq4, opBytes{0x05}}, + {APINSRW, yinsrw, Pq, opBytes{0xc4, 00}}, + {APINSRB, yinsr, Pq, opBytes{0x3a, 0x20, 00}}, + {APINSRD, yinsr, Pq, opBytes{0x3a, 0x22, 00}}, + {APINSRQ, yinsr, Pq3, opBytes{0x3a, 0x22, 00}}, + {APMADDUBSW, yxm_q4, Pq4, opBytes{0x04}}, + {APMADDWL, ymm, Py1, opBytes{0xf5, Pe, 0xf5}}, + {APMAXSB, yxm_q4, Pq4, opBytes{0x3c}}, + {APMAXSD, yxm_q4, Pq4, opBytes{0x3d}}, + {APMAXSW, yxm, Pe, opBytes{0xee}}, + {APMAXUB, yxm, Pe, opBytes{0xde}}, + {APMAXUD, yxm_q4, Pq4, opBytes{0x3f}}, + {APMAXUW, yxm_q4, Pq4, opBytes{0x3e}}, + {APMINSB, yxm_q4, Pq4, opBytes{0x38}}, + {APMINSD, yxm_q4, Pq4, opBytes{0x39}}, + {APMINSW, yxm, Pe, opBytes{0xea}}, + {APMINUB, yxm, Pe, opBytes{0xda}}, + {APMINUD, yxm_q4, Pq4, opBytes{0x3b}}, + {APMINUW, yxm_q4, Pq4, opBytes{0x3a}}, + {APMOVMSKB, ymskb, Px, opBytes{Pe, 0xd7, 0xd7}}, + {APMOVSXBD, yxm_q4, Pq4, opBytes{0x21}}, + {APMOVSXBQ, yxm_q4, Pq4, opBytes{0x22}}, + {APMOVSXBW, yxm_q4, Pq4, opBytes{0x20}}, + {APMOVSXDQ, yxm_q4, Pq4, opBytes{0x25}}, + {APMOVSXWD, yxm_q4, Pq4, opBytes{0x23}}, + {APMOVSXWQ, yxm_q4, Pq4, opBytes{0x24}}, + {APMOVZXBD, yxm_q4, Pq4, opBytes{0x31}}, + {APMOVZXBQ, yxm_q4, Pq4, opBytes{0x32}}, + {APMOVZXBW, yxm_q4, Pq4, opBytes{0x30}}, + {APMOVZXDQ, yxm_q4, Pq4, opBytes{0x35}}, + {APMOVZXWD, yxm_q4, Pq4, opBytes{0x33}}, + {APMOVZXWQ, yxm_q4, Pq4, opBytes{0x34}}, + {APMULDQ, yxm_q4, Pq4, opBytes{0x28}}, + {APMULHRSW, yxm_q4, Pq4, opBytes{0x0b}}, + {APMULHUW, ymm, Py1, opBytes{0xe4, Pe, 0xe4}}, + {APMULHW, ymm, Py1, opBytes{0xe5, Pe, 0xe5}}, + {APMULLD, yxm_q4, Pq4, opBytes{0x40}}, + {APMULLW, ymm, Py1, opBytes{0xd5, Pe, 0xd5}}, + {APMULULQ, ymm, Py1, opBytes{0xf4, Pe, 0xf4}}, + {APOPAL, ynone, P32, opBytes{0x61}}, + {APOPAW, ynone, Pe, opBytes{0x61}}, + {APOPCNTW, yml_rl, Pef3, opBytes{0xb8}}, + {APOPCNTL, yml_rl, Pf3, opBytes{0xb8}}, + {APOPCNTQ, yml_rl, Pfw, opBytes{0xb8}}, + {APOPFL, ynone, P32, opBytes{0x9d}}, + {APOPFQ, ynone, Py, opBytes{0x9d}}, + {APOPFW, ynone, Pe, opBytes{0x9d}}, + {APOPL, ypopl, P32, opBytes{0x58, 0x8f, 00}}, + {APOPQ, ypopl, Py, opBytes{0x58, 0x8f, 00}}, + {APOPW, ypopl, Pe, opBytes{0x58, 0x8f, 00}}, + {APOR, ymm, Py1, opBytes{0xeb, Pe, 0xeb}}, + {APSADBW, yxm, Pq, opBytes{0xf6}}, + {APSHUFHW, yxshuf, Pf3, opBytes{0x70, 00}}, + {APSHUFL, yxshuf, Pq, opBytes{0x70, 00}}, + {APSHUFLW, yxshuf, Pf2, opBytes{0x70, 00}}, + {APSHUFW, ymshuf, Pm, opBytes{0x70, 00}}, + {APSHUFB, ymshufb, Pq, opBytes{0x38, 0x00}}, + {APSIGNB, yxm_q4, Pq4, opBytes{0x08}}, + {APSIGND, yxm_q4, Pq4, opBytes{0x0a}}, + {APSIGNW, yxm_q4, Pq4, opBytes{0x09}}, + {APSLLO, ypsdq, Pq, opBytes{0x73, 07}}, + {APSLLL, yps, Py3, opBytes{0xf2, 0x72, 06, Pe, 0xf2, Pe, 0x72, 06}}, + {APSLLQ, yps, Py3, opBytes{0xf3, 0x73, 06, Pe, 0xf3, Pe, 0x73, 06}}, + {APSLLW, yps, Py3, opBytes{0xf1, 0x71, 06, Pe, 0xf1, Pe, 0x71, 06}}, + {APSRAL, yps, Py3, opBytes{0xe2, 0x72, 04, Pe, 0xe2, Pe, 0x72, 04}}, + {APSRAW, yps, Py3, opBytes{0xe1, 0x71, 04, Pe, 0xe1, Pe, 0x71, 04}}, + {APSRLO, ypsdq, Pq, opBytes{0x73, 03}}, + {APSRLL, yps, Py3, opBytes{0xd2, 0x72, 02, Pe, 0xd2, Pe, 0x72, 02}}, + {APSRLQ, yps, Py3, opBytes{0xd3, 0x73, 02, Pe, 0xd3, Pe, 0x73, 02}}, + {APSRLW, yps, Py3, opBytes{0xd1, 0x71, 02, Pe, 0xd1, Pe, 0x71, 02}}, + {APSUBB, yxm, Pe, opBytes{0xf8}}, + {APSUBL, yxm, Pe, opBytes{0xfa}}, + {APSUBQ, yxm, Pe, opBytes{0xfb}}, + {APSUBSB, yxm, Pe, opBytes{0xe8}}, + {APSUBSW, yxm, Pe, opBytes{0xe9}}, + {APSUBUSB, yxm, Pe, opBytes{0xd8}}, + {APSUBUSW, yxm, Pe, opBytes{0xd9}}, + {APSUBW, yxm, Pe, opBytes{0xf9}}, + {APTEST, yxm_q4, Pq4, opBytes{0x17}}, + {APUNPCKHBW, ymm, Py1, opBytes{0x68, Pe, 0x68}}, + {APUNPCKHLQ, ymm, Py1, opBytes{0x6a, Pe, 0x6a}}, + {APUNPCKHQDQ, yxm, Pe, opBytes{0x6d}}, + {APUNPCKHWL, ymm, Py1, opBytes{0x69, Pe, 0x69}}, + {APUNPCKLBW, ymm, Py1, opBytes{0x60, Pe, 0x60}}, + {APUNPCKLLQ, ymm, Py1, opBytes{0x62, Pe, 0x62}}, + {APUNPCKLQDQ, yxm, Pe, opBytes{0x6c}}, + {APUNPCKLWL, ymm, Py1, opBytes{0x61, Pe, 0x61}}, + {APUSHAL, ynone, P32, opBytes{0x60}}, + {APUSHAW, ynone, Pe, opBytes{0x60}}, + {APUSHFL, ynone, P32, opBytes{0x9c}}, + {APUSHFQ, ynone, Py, opBytes{0x9c}}, + {APUSHFW, ynone, Pe, opBytes{0x9c}}, + {APUSHL, ypushl, P32, opBytes{0x50, 0xff, 06, 0x6a, 0x68}}, + {APUSHQ, ypushl, Py, opBytes{0x50, 0xff, 06, 0x6a, 0x68}}, + {APUSHW, ypushl, Pe, opBytes{0x50, 0xff, 06, 0x6a, 0x68}}, + {APXOR, ymm, Py1, opBytes{0xef, Pe, 0xef}}, + {AQUAD, ybyte, Px, opBytes{8}}, + {ARCLB, yshb, Pb, opBytes{0xd0, 02, 0xc0, 02, 0xd2, 02}}, + {ARCLL, yshl, Px, opBytes{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}}, + {ARCLQ, yshl, Pw, opBytes{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}}, + {ARCLW, yshl, Pe, opBytes{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}}, + {ARCPPS, yxm, Pm, opBytes{0x53}}, + {ARCPSS, yxm, Pf3, opBytes{0x53}}, + {ARCRB, yshb, Pb, opBytes{0xd0, 03, 0xc0, 03, 0xd2, 03}}, + {ARCRL, yshl, Px, opBytes{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}}, + {ARCRQ, yshl, Pw, opBytes{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}}, + {ARCRW, yshl, Pe, opBytes{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}}, + {AREP, ynone, Px, opBytes{0xf3}}, + {AREPN, ynone, Px, opBytes{0xf2}}, + {obj.ARET, ynone, Px, opBytes{0xc3}}, + {ARETFW, yret, Pe, opBytes{0xcb, 0xca}}, + {ARETFL, yret, Px, opBytes{0xcb, 0xca}}, + {ARETFQ, yret, Pw, opBytes{0xcb, 0xca}}, + {AROLB, yshb, Pb, opBytes{0xd0, 00, 0xc0, 00, 0xd2, 00}}, + {AROLL, yshl, Px, opBytes{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}}, + {AROLQ, yshl, Pw, opBytes{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}}, + {AROLW, yshl, Pe, opBytes{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}}, + {ARORB, yshb, Pb, opBytes{0xd0, 01, 0xc0, 01, 0xd2, 01}}, + {ARORL, yshl, Px, opBytes{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}}, + {ARORQ, yshl, Pw, opBytes{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}}, + {ARORW, yshl, Pe, opBytes{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}}, + {ARSQRTPS, yxm, Pm, opBytes{0x52}}, + {ARSQRTSS, yxm, Pf3, opBytes{0x52}}, + {ASAHF, ynone, Px, opBytes{0x9e, 00, 0x86, 0xe0, 0x50, 0x9d}}, // XCHGB AH,AL; PUSH AX; POPFL + {ASALB, yshb, Pb, opBytes{0xd0, 04, 0xc0, 04, 0xd2, 04}}, + {ASALL, yshl, Px, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASALQ, yshl, Pw, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASALW, yshl, Pe, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASARB, yshb, Pb, opBytes{0xd0, 07, 0xc0, 07, 0xd2, 07}}, + {ASARL, yshl, Px, opBytes{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}}, + {ASARQ, yshl, Pw, opBytes{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}}, + {ASARW, yshl, Pe, opBytes{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}}, + {ASBBB, yxorb, Pb, opBytes{0x1c, 0x80, 03, 0x18, 0x1a}}, + {ASBBL, yaddl, Px, opBytes{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}}, + {ASBBQ, yaddl, Pw, opBytes{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}}, + {ASBBW, yaddl, Pe, opBytes{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}}, + {ASCASB, ynone, Pb, opBytes{0xae}}, + {ASCASL, ynone, Px, opBytes{0xaf}}, + {ASCASQ, ynone, Pw, opBytes{0xaf}}, + {ASCASW, ynone, Pe, opBytes{0xaf}}, + {ASETCC, yscond, Pb, opBytes{0x0f, 0x93, 00}}, + {ASETCS, yscond, Pb, opBytes{0x0f, 0x92, 00}}, + {ASETEQ, yscond, Pb, opBytes{0x0f, 0x94, 00}}, + {ASETGE, yscond, Pb, opBytes{0x0f, 0x9d, 00}}, + {ASETGT, yscond, Pb, opBytes{0x0f, 0x9f, 00}}, + {ASETHI, yscond, Pb, opBytes{0x0f, 0x97, 00}}, + {ASETLE, yscond, Pb, opBytes{0x0f, 0x9e, 00}}, + {ASETLS, yscond, Pb, opBytes{0x0f, 0x96, 00}}, + {ASETLT, yscond, Pb, opBytes{0x0f, 0x9c, 00}}, + {ASETMI, yscond, Pb, opBytes{0x0f, 0x98, 00}}, + {ASETNE, yscond, Pb, opBytes{0x0f, 0x95, 00}}, + {ASETOC, yscond, Pb, opBytes{0x0f, 0x91, 00}}, + {ASETOS, yscond, Pb, opBytes{0x0f, 0x90, 00}}, + {ASETPC, yscond, Pb, opBytes{0x0f, 0x9b, 00}}, + {ASETPL, yscond, Pb, opBytes{0x0f, 0x99, 00}}, + {ASETPS, yscond, Pb, opBytes{0x0f, 0x9a, 00}}, + {ASHLB, yshb, Pb, opBytes{0xd0, 04, 0xc0, 04, 0xd2, 04}}, + {ASHLL, yshl, Px, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASHLQ, yshl, Pw, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASHLW, yshl, Pe, opBytes{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}}, + {ASHRB, yshb, Pb, opBytes{0xd0, 05, 0xc0, 05, 0xd2, 05}}, + {ASHRL, yshl, Px, opBytes{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}}, + {ASHRQ, yshl, Pw, opBytes{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}}, + {ASHRW, yshl, Pe, opBytes{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}}, + {ASHUFPD, yxshuf, Pq, opBytes{0xc6, 00}}, + {ASHUFPS, yxshuf, Pm, opBytes{0xc6, 00}}, + {ASQRTPD, yxm, Pe, opBytes{0x51}}, + {ASQRTPS, yxm, Pm, opBytes{0x51}}, + {ASQRTSD, yxm, Pf2, opBytes{0x51}}, + {ASQRTSS, yxm, Pf3, opBytes{0x51}}, + {ASTC, ynone, Px, opBytes{0xf9}}, + {ASTD, ynone, Px, opBytes{0xfd}}, + {ASTI, ynone, Px, opBytes{0xfb}}, + {ASTMXCSR, ysvrs_om, Pm, opBytes{0xae, 03, 0xae, 03}}, + {ASTOSB, ynone, Pb, opBytes{0xaa}}, + {ASTOSL, ynone, Px, opBytes{0xab}}, + {ASTOSQ, ynone, Pw, opBytes{0xab}}, + {ASTOSW, ynone, Pe, opBytes{0xab}}, + {ASUBB, yxorb, Pb, opBytes{0x2c, 0x80, 05, 0x28, 0x2a}}, + {ASUBL, yaddl, Px, opBytes{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}}, + {ASUBPD, yxm, Pe, opBytes{0x5c}}, + {ASUBPS, yxm, Pm, opBytes{0x5c}}, + {ASUBQ, yaddl, Pw, opBytes{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}}, + {ASUBSD, yxm, Pf2, opBytes{0x5c}}, + {ASUBSS, yxm, Pf3, opBytes{0x5c}}, + {ASUBW, yaddl, Pe, opBytes{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}}, + {ASWAPGS, ynone, Pm, opBytes{0x01, 0xf8}}, + {ASYSCALL, ynone, Px, opBytes{0x0f, 0x05}}, // fast syscall + {ATESTB, yxorb, Pb, opBytes{0xa8, 0xf6, 00, 0x84, 0x84}}, + {ATESTL, ytestl, Px, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}}, + {ATESTQ, ytestl, Pw, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}}, + {ATESTW, ytestl, Pe, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}}, + {ATPAUSE, ywrfsbase, Pq, opBytes{0xae, 06}}, + {obj.ATEXT, ytext, Px, opBytes{}}, + {AUCOMISD, yxm, Pe, opBytes{0x2e}}, + {AUCOMISS, yxm, Pm, opBytes{0x2e}}, + {AUNPCKHPD, yxm, Pe, opBytes{0x15}}, + {AUNPCKHPS, yxm, Pm, opBytes{0x15}}, + {AUNPCKLPD, yxm, Pe, opBytes{0x14}}, + {AUNPCKLPS, yxm, Pm, opBytes{0x14}}, + {AUMONITOR, ywrfsbase, Pf3, opBytes{0xae, 06}}, + {AVERR, ydivl, Pm, opBytes{0x00, 04}}, + {AVERW, ydivl, Pm, opBytes{0x00, 05}}, + {AWAIT, ynone, Px, opBytes{0x9b}}, + {AWORD, ybyte, Px, opBytes{2}}, + {AXCHGB, yml_mb, Pb, opBytes{0x86, 0x86}}, + {AXCHGL, yxchg, Px, opBytes{0x90, 0x90, 0x87, 0x87}}, + {AXCHGQ, yxchg, Pw, opBytes{0x90, 0x90, 0x87, 0x87}}, + {AXCHGW, yxchg, Pe, opBytes{0x90, 0x90, 0x87, 0x87}}, + {AXLAT, ynone, Px, opBytes{0xd7}}, + {AXORB, yxorb, Pb, opBytes{0x34, 0x80, 06, 0x30, 0x32}}, + {AXORL, yaddl, Px, opBytes{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}}, + {AXORPD, yxm, Pe, opBytes{0x57}}, + {AXORPS, yxm, Pm, opBytes{0x57}}, + {AXORQ, yaddl, Pw, opBytes{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}}, + {AXORW, yaddl, Pe, opBytes{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}}, + {AFMOVB, yfmvx, Px, opBytes{0xdf, 04}}, + {AFMOVBP, yfmvp, Px, opBytes{0xdf, 06}}, + {AFMOVD, yfmvd, Px, opBytes{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}}, + {AFMOVDP, yfmvdp, Px, opBytes{0xdd, 03, 0xdd, 03}}, + {AFMOVF, yfmvf, Px, opBytes{0xd9, 00, 0xd9, 02}}, + {AFMOVFP, yfmvp, Px, opBytes{0xd9, 03}}, + {AFMOVL, yfmvf, Px, opBytes{0xdb, 00, 0xdb, 02}}, + {AFMOVLP, yfmvp, Px, opBytes{0xdb, 03}}, + {AFMOVV, yfmvx, Px, opBytes{0xdf, 05}}, + {AFMOVVP, yfmvp, Px, opBytes{0xdf, 07}}, + {AFMOVW, yfmvf, Px, opBytes{0xdf, 00, 0xdf, 02}}, + {AFMOVWP, yfmvp, Px, opBytes{0xdf, 03}}, + {AFMOVX, yfmvx, Px, opBytes{0xdb, 05}}, + {AFMOVXP, yfmvp, Px, opBytes{0xdb, 07}}, + {AFCMOVCC, yfcmv, Px, opBytes{0xdb, 00}}, + {AFCMOVCS, yfcmv, Px, opBytes{0xda, 00}}, + {AFCMOVEQ, yfcmv, Px, opBytes{0xda, 01}}, + {AFCMOVHI, yfcmv, Px, opBytes{0xdb, 02}}, + {AFCMOVLS, yfcmv, Px, opBytes{0xda, 02}}, + {AFCMOVB, yfcmv, Px, opBytes{0xda, 00}}, + {AFCMOVBE, yfcmv, Px, opBytes{0xda, 02}}, + {AFCMOVNB, yfcmv, Px, opBytes{0xdb, 00}}, + {AFCMOVNBE, yfcmv, Px, opBytes{0xdb, 02}}, + {AFCMOVE, yfcmv, Px, opBytes{0xda, 01}}, + {AFCMOVNE, yfcmv, Px, opBytes{0xdb, 01}}, + {AFCMOVNU, yfcmv, Px, opBytes{0xdb, 03}}, + {AFCMOVU, yfcmv, Px, opBytes{0xda, 03}}, + {AFCMOVUN, yfcmv, Px, opBytes{0xda, 03}}, + {AFCOMD, yfadd, Px, opBytes{0xdc, 02, 0xd8, 02, 0xdc, 02}}, // botch + {AFCOMDP, yfadd, Px, opBytes{0xdc, 03, 0xd8, 03, 0xdc, 03}}, // botch + {AFCOMDPP, ycompp, Px, opBytes{0xde, 03}}, + {AFCOMF, yfmvx, Px, opBytes{0xd8, 02}}, + {AFCOMFP, yfmvx, Px, opBytes{0xd8, 03}}, + {AFCOMI, yfcmv, Px, opBytes{0xdb, 06}}, + {AFCOMIP, yfcmv, Px, opBytes{0xdf, 06}}, + {AFCOML, yfmvx, Px, opBytes{0xda, 02}}, + {AFCOMLP, yfmvx, Px, opBytes{0xda, 03}}, + {AFCOMW, yfmvx, Px, opBytes{0xde, 02}}, + {AFCOMWP, yfmvx, Px, opBytes{0xde, 03}}, + {AFUCOM, ycompp, Px, opBytes{0xdd, 04}}, + {AFUCOMI, ycompp, Px, opBytes{0xdb, 05}}, + {AFUCOMIP, ycompp, Px, opBytes{0xdf, 05}}, + {AFUCOMP, ycompp, Px, opBytes{0xdd, 05}}, + {AFUCOMPP, ycompp, Px, opBytes{0xda, 13}}, + {AFADDDP, ycompp, Px, opBytes{0xde, 00}}, + {AFADDW, yfmvx, Px, opBytes{0xde, 00}}, + {AFADDL, yfmvx, Px, opBytes{0xda, 00}}, + {AFADDF, yfmvx, Px, opBytes{0xd8, 00}}, + {AFADDD, yfadd, Px, opBytes{0xdc, 00, 0xd8, 00, 0xdc, 00}}, + {AFMULDP, ycompp, Px, opBytes{0xde, 01}}, + {AFMULW, yfmvx, Px, opBytes{0xde, 01}}, + {AFMULL, yfmvx, Px, opBytes{0xda, 01}}, + {AFMULF, yfmvx, Px, opBytes{0xd8, 01}}, + {AFMULD, yfadd, Px, opBytes{0xdc, 01, 0xd8, 01, 0xdc, 01}}, + {AFSUBDP, ycompp, Px, opBytes{0xde, 05}}, + {AFSUBW, yfmvx, Px, opBytes{0xde, 04}}, + {AFSUBL, yfmvx, Px, opBytes{0xda, 04}}, + {AFSUBF, yfmvx, Px, opBytes{0xd8, 04}}, + {AFSUBD, yfadd, Px, opBytes{0xdc, 04, 0xd8, 04, 0xdc, 05}}, + {AFSUBRDP, ycompp, Px, opBytes{0xde, 04}}, + {AFSUBRW, yfmvx, Px, opBytes{0xde, 05}}, + {AFSUBRL, yfmvx, Px, opBytes{0xda, 05}}, + {AFSUBRF, yfmvx, Px, opBytes{0xd8, 05}}, + {AFSUBRD, yfadd, Px, opBytes{0xdc, 05, 0xd8, 05, 0xdc, 04}}, + {AFDIVDP, ycompp, Px, opBytes{0xde, 07}}, + {AFDIVW, yfmvx, Px, opBytes{0xde, 06}}, + {AFDIVL, yfmvx, Px, opBytes{0xda, 06}}, + {AFDIVF, yfmvx, Px, opBytes{0xd8, 06}}, + {AFDIVD, yfadd, Px, opBytes{0xdc, 06, 0xd8, 06, 0xdc, 07}}, + {AFDIVRDP, ycompp, Px, opBytes{0xde, 06}}, + {AFDIVRW, yfmvx, Px, opBytes{0xde, 07}}, + {AFDIVRL, yfmvx, Px, opBytes{0xda, 07}}, + {AFDIVRF, yfmvx, Px, opBytes{0xd8, 07}}, + {AFDIVRD, yfadd, Px, opBytes{0xdc, 07, 0xd8, 07, 0xdc, 06}}, + {AFXCHD, yfxch, Px, opBytes{0xd9, 01, 0xd9, 01}}, + {AFFREE, nil, 0, opBytes{}}, + {AFLDCW, ysvrs_mo, Px, opBytes{0xd9, 05, 0xd9, 05}}, + {AFLDENV, ysvrs_mo, Px, opBytes{0xd9, 04, 0xd9, 04}}, + {AFRSTOR, ysvrs_mo, Px, opBytes{0xdd, 04, 0xdd, 04}}, + {AFSAVE, ysvrs_om, Px, opBytes{0xdd, 06, 0xdd, 06}}, + {AFSTCW, ysvrs_om, Px, opBytes{0xd9, 07, 0xd9, 07}}, + {AFSTENV, ysvrs_om, Px, opBytes{0xd9, 06, 0xd9, 06}}, + {AFSTSW, ystsw, Px, opBytes{0xdd, 07, 0xdf, 0xe0}}, + {AF2XM1, ynone, Px, opBytes{0xd9, 0xf0}}, + {AFABS, ynone, Px, opBytes{0xd9, 0xe1}}, + {AFBLD, ysvrs_mo, Px, opBytes{0xdf, 04}}, + {AFBSTP, yclflush, Px, opBytes{0xdf, 06}}, + {AFCHS, ynone, Px, opBytes{0xd9, 0xe0}}, + {AFCLEX, ynone, Px, opBytes{0xdb, 0xe2}}, + {AFCOS, ynone, Px, opBytes{0xd9, 0xff}}, + {AFDECSTP, ynone, Px, opBytes{0xd9, 0xf6}}, + {AFINCSTP, ynone, Px, opBytes{0xd9, 0xf7}}, + {AFINIT, ynone, Px, opBytes{0xdb, 0xe3}}, + {AFLD1, ynone, Px, opBytes{0xd9, 0xe8}}, + {AFLDL2E, ynone, Px, opBytes{0xd9, 0xea}}, + {AFLDL2T, ynone, Px, opBytes{0xd9, 0xe9}}, + {AFLDLG2, ynone, Px, opBytes{0xd9, 0xec}}, + {AFLDLN2, ynone, Px, opBytes{0xd9, 0xed}}, + {AFLDPI, ynone, Px, opBytes{0xd9, 0xeb}}, + {AFLDZ, ynone, Px, opBytes{0xd9, 0xee}}, + {AFNOP, ynone, Px, opBytes{0xd9, 0xd0}}, + {AFPATAN, ynone, Px, opBytes{0xd9, 0xf3}}, + {AFPREM, ynone, Px, opBytes{0xd9, 0xf8}}, + {AFPREM1, ynone, Px, opBytes{0xd9, 0xf5}}, + {AFPTAN, ynone, Px, opBytes{0xd9, 0xf2}}, + {AFRNDINT, ynone, Px, opBytes{0xd9, 0xfc}}, + {AFSCALE, ynone, Px, opBytes{0xd9, 0xfd}}, + {AFSIN, ynone, Px, opBytes{0xd9, 0xfe}}, + {AFSINCOS, ynone, Px, opBytes{0xd9, 0xfb}}, + {AFSQRT, ynone, Px, opBytes{0xd9, 0xfa}}, + {AFTST, ynone, Px, opBytes{0xd9, 0xe4}}, + {AFXAM, ynone, Px, opBytes{0xd9, 0xe5}}, + {AFXTRACT, ynone, Px, opBytes{0xd9, 0xf4}}, + {AFYL2X, ynone, Px, opBytes{0xd9, 0xf1}}, + {AFYL2XP1, ynone, Px, opBytes{0xd9, 0xf9}}, + {ACMPXCHGB, yrb_mb, Pb, opBytes{0x0f, 0xb0}}, + {ACMPXCHGL, yrl_ml, Px, opBytes{0x0f, 0xb1}}, + {ACMPXCHGW, yrl_ml, Pe, opBytes{0x0f, 0xb1}}, + {ACMPXCHGQ, yrl_ml, Pw, opBytes{0x0f, 0xb1}}, + {ACMPXCHG8B, yscond, Pm, opBytes{0xc7, 01}}, + {ACMPXCHG16B, yscond, Pw, opBytes{0x0f, 0xc7, 01}}, + {AINVD, ynone, Pm, opBytes{0x08}}, + {AINVLPG, ydivb, Pm, opBytes{0x01, 07}}, + {AINVPCID, ycrc32l, Pe, opBytes{0x0f, 0x38, 0x82, 0}}, + {ALFENCE, ynone, Pm, opBytes{0xae, 0xe8}}, + {AMFENCE, ynone, Pm, opBytes{0xae, 0xf0}}, + {AMOVNTIL, yrl_ml, Pm, opBytes{0xc3}}, + {AMOVNTIQ, yrl_ml, Pw, opBytes{0x0f, 0xc3}}, + {ARDPKRU, ynone, Pm, opBytes{0x01, 0xee, 0}}, + {ARDMSR, ynone, Pm, opBytes{0x32}}, + {ARDPMC, ynone, Pm, opBytes{0x33}}, + {ARDTSC, ynone, Pm, opBytes{0x31}}, + {ARSM, ynone, Pm, opBytes{0xaa}}, + {ASFENCE, ynone, Pm, opBytes{0xae, 0xf8}}, + {ASYSRET, ynone, Pm, opBytes{0x07}}, + {AWBINVD, ynone, Pm, opBytes{0x09}}, + {AWRMSR, ynone, Pm, opBytes{0x30}}, + {AWRPKRU, ynone, Pm, opBytes{0x01, 0xef, 0}}, + {AXADDB, yrb_mb, Pb, opBytes{0x0f, 0xc0}}, + {AXADDL, yrl_ml, Px, opBytes{0x0f, 0xc1}}, + {AXADDQ, yrl_ml, Pw, opBytes{0x0f, 0xc1}}, + {AXADDW, yrl_ml, Pe, opBytes{0x0f, 0xc1}}, + {ACRC32B, ycrc32b, Px, opBytes{0xf2, 0x0f, 0x38, 0xf0, 0}}, + {ACRC32L, ycrc32l, Px, opBytes{0xf2, 0x0f, 0x38, 0xf1, 0}}, + {ACRC32Q, ycrc32l, Pw, opBytes{0xf2, 0x0f, 0x38, 0xf1, 0}}, + {ACRC32W, ycrc32l, Pe, opBytes{0xf2, 0x0f, 0x38, 0xf1, 0}}, + {APREFETCHT0, yprefetch, Pm, opBytes{0x18, 01}}, + {APREFETCHT1, yprefetch, Pm, opBytes{0x18, 02}}, + {APREFETCHT2, yprefetch, Pm, opBytes{0x18, 03}}, + {APREFETCHNTA, yprefetch, Pm, opBytes{0x18, 00}}, + {AMOVQL, yrl_ml, Px, opBytes{0x89}}, + {obj.AUNDEF, ynone, Px, opBytes{0x0f, 0x0b}}, + {AAESENC, yaes, Pq, opBytes{0x38, 0xdc, 0}}, + {AAESENCLAST, yaes, Pq, opBytes{0x38, 0xdd, 0}}, + {AAESDEC, yaes, Pq, opBytes{0x38, 0xde, 0}}, + {AAESDECLAST, yaes, Pq, opBytes{0x38, 0xdf, 0}}, + {AAESIMC, yaes, Pq, opBytes{0x38, 0xdb, 0}}, + {AAESKEYGENASSIST, yxshuf, Pq, opBytes{0x3a, 0xdf, 0}}, + {AROUNDPD, yxshuf, Pq, opBytes{0x3a, 0x09, 0}}, + {AROUNDPS, yxshuf, Pq, opBytes{0x3a, 0x08, 0}}, + {AROUNDSD, yxshuf, Pq, opBytes{0x3a, 0x0b, 0}}, + {AROUNDSS, yxshuf, Pq, opBytes{0x3a, 0x0a, 0}}, + {APSHUFD, yxshuf, Pq, opBytes{0x70, 0}}, + {APCLMULQDQ, yxshuf, Pq, opBytes{0x3a, 0x44, 0}}, + {APCMPESTRI, yxshuf, Pq, opBytes{0x3a, 0x61, 0}}, + {APCMPESTRM, yxshuf, Pq, opBytes{0x3a, 0x60, 0}}, + {AMOVDDUP, yxm, Pf2, opBytes{0x12}}, + {AMOVSHDUP, yxm, Pf3, opBytes{0x16}}, + {AMOVSLDUP, yxm, Pf3, opBytes{0x12}}, + {ARDTSCP, ynone, Pm, opBytes{0x01, 0xf9, 0}}, + {ASTAC, ynone, Pm, opBytes{0x01, 0xcb, 0}}, + {AUD1, ynone, Pm, opBytes{0xb9, 0}}, + {AUD2, ynone, Pm, opBytes{0x0b, 0}}, + {AUMWAIT, ywrfsbase, Pf2, opBytes{0xae, 06}}, + {ASYSENTER, ynone, Px, opBytes{0x0f, 0x34, 0}}, + {ASYSENTER64, ynone, Pw, opBytes{0x0f, 0x34, 0}}, + {ASYSEXIT, ynone, Px, opBytes{0x0f, 0x35, 0}}, + {ASYSEXIT64, ynone, Pw, opBytes{0x0f, 0x35, 0}}, + {ALMSW, ydivl, Pm, opBytes{0x01, 06}}, + {ALLDT, ydivl, Pm, opBytes{0x00, 02}}, + {ALIDT, ysvrs_mo, Pm, opBytes{0x01, 03}}, + {ALGDT, ysvrs_mo, Pm, opBytes{0x01, 02}}, + {ATZCNTW, ycrc32l, Pe, opBytes{0xf3, 0x0f, 0xbc, 0}}, + {ATZCNTL, ycrc32l, Px, opBytes{0xf3, 0x0f, 0xbc, 0}}, + {ATZCNTQ, ycrc32l, Pw, opBytes{0xf3, 0x0f, 0xbc, 0}}, + {AXRSTOR, ydivl, Px, opBytes{0x0f, 0xae, 05}}, + {AXRSTOR64, ydivl, Pw, opBytes{0x0f, 0xae, 05}}, + {AXRSTORS, ydivl, Px, opBytes{0x0f, 0xc7, 03}}, + {AXRSTORS64, ydivl, Pw, opBytes{0x0f, 0xc7, 03}}, + {AXSAVE, yclflush, Px, opBytes{0x0f, 0xae, 04}}, + {AXSAVE64, yclflush, Pw, opBytes{0x0f, 0xae, 04}}, + {AXSAVEOPT, yclflush, Px, opBytes{0x0f, 0xae, 06}}, + {AXSAVEOPT64, yclflush, Pw, opBytes{0x0f, 0xae, 06}}, + {AXSAVEC, yclflush, Px, opBytes{0x0f, 0xc7, 04}}, + {AXSAVEC64, yclflush, Pw, opBytes{0x0f, 0xc7, 04}}, + {AXSAVES, yclflush, Px, opBytes{0x0f, 0xc7, 05}}, + {AXSAVES64, yclflush, Pw, opBytes{0x0f, 0xc7, 05}}, + {ASGDT, yclflush, Pm, opBytes{0x01, 00}}, + {ASIDT, yclflush, Pm, opBytes{0x01, 01}}, + {ARDRANDW, yrdrand, Pe, opBytes{0x0f, 0xc7, 06}}, + {ARDRANDL, yrdrand, Px, opBytes{0x0f, 0xc7, 06}}, + {ARDRANDQ, yrdrand, Pw, opBytes{0x0f, 0xc7, 06}}, + {ARDSEEDW, yrdrand, Pe, opBytes{0x0f, 0xc7, 07}}, + {ARDSEEDL, yrdrand, Px, opBytes{0x0f, 0xc7, 07}}, + {ARDSEEDQ, yrdrand, Pw, opBytes{0x0f, 0xc7, 07}}, + {ASTRW, yincq, Pe, opBytes{0x0f, 0x00, 01}}, + {ASTRL, yincq, Px, opBytes{0x0f, 0x00, 01}}, + {ASTRQ, yincq, Pw, opBytes{0x0f, 0x00, 01}}, + {AXSETBV, ynone, Pm, opBytes{0x01, 0xd1, 0}}, + {AMOVBEW, ymovbe, Pq, opBytes{0x38, 0xf0, 0, 0x38, 0xf1, 0}}, + {AMOVBEL, ymovbe, Pm, opBytes{0x38, 0xf0, 0, 0x38, 0xf1, 0}}, + {AMOVBEQ, ymovbe, Pw, opBytes{0x0f, 0x38, 0xf0, 0, 0x0f, 0x38, 0xf1, 0}}, + {ANOPW, ydivl, Pe, opBytes{0x0f, 0x1f, 00}}, + {ANOPL, ydivl, Px, opBytes{0x0f, 0x1f, 00}}, + {ASLDTW, yincq, Pe, opBytes{0x0f, 0x00, 00}}, + {ASLDTL, yincq, Px, opBytes{0x0f, 0x00, 00}}, + {ASLDTQ, yincq, Pw, opBytes{0x0f, 0x00, 00}}, + {ASMSWW, yincq, Pe, opBytes{0x0f, 0x01, 04}}, + {ASMSWL, yincq, Px, opBytes{0x0f, 0x01, 04}}, + {ASMSWQ, yincq, Pw, opBytes{0x0f, 0x01, 04}}, + {ABLENDVPS, yblendvpd, Pq4, opBytes{0x14}}, + {ABLENDVPD, yblendvpd, Pq4, opBytes{0x15}}, + {APBLENDVB, yblendvpd, Pq4, opBytes{0x10}}, + {ASHA1MSG1, yaes, Px, opBytes{0x0f, 0x38, 0xc9, 0}}, + {ASHA1MSG2, yaes, Px, opBytes{0x0f, 0x38, 0xca, 0}}, + {ASHA1NEXTE, yaes, Px, opBytes{0x0f, 0x38, 0xc8, 0}}, + {ASHA256MSG1, yaes, Px, opBytes{0x0f, 0x38, 0xcc, 0}}, + {ASHA256MSG2, yaes, Px, opBytes{0x0f, 0x38, 0xcd, 0}}, + {ASHA1RNDS4, ysha1rnds4, Pm, opBytes{0x3a, 0xcc, 0}}, + {ASHA256RNDS2, ysha256rnds2, Px, opBytes{0x0f, 0x38, 0xcb, 0}}, + {ARDFSBASEL, yrdrand, Pf3, opBytes{0xae, 00}}, + {ARDFSBASEQ, yrdrand, Pfw, opBytes{0xae, 00}}, + {ARDGSBASEL, yrdrand, Pf3, opBytes{0xae, 01}}, + {ARDGSBASEQ, yrdrand, Pfw, opBytes{0xae, 01}}, + {AWRFSBASEL, ywrfsbase, Pf3, opBytes{0xae, 02}}, + {AWRFSBASEQ, ywrfsbase, Pfw, opBytes{0xae, 02}}, + {AWRGSBASEL, ywrfsbase, Pf3, opBytes{0xae, 03}}, + {AWRGSBASEQ, ywrfsbase, Pfw, opBytes{0xae, 03}}, + {ALFSW, ym_rl, Pe, opBytes{0x0f, 0xb4}}, + {ALFSL, ym_rl, Px, opBytes{0x0f, 0xb4}}, + {ALFSQ, ym_rl, Pw, opBytes{0x0f, 0xb4}}, + {ALGSW, ym_rl, Pe, opBytes{0x0f, 0xb5}}, + {ALGSL, ym_rl, Px, opBytes{0x0f, 0xb5}}, + {ALGSQ, ym_rl, Pw, opBytes{0x0f, 0xb5}}, + {ALSSW, ym_rl, Pe, opBytes{0x0f, 0xb2}}, + {ALSSL, ym_rl, Px, opBytes{0x0f, 0xb2}}, + {ALSSQ, ym_rl, Pw, opBytes{0x0f, 0xb2}}, + {ARDPID, yrdrand, Pf3, opBytes{0xc7, 07}}, + + {ABLENDPD, yxshuf, Pq, opBytes{0x3a, 0x0d, 0}}, + {ABLENDPS, yxshuf, Pq, opBytes{0x3a, 0x0c, 0}}, + {AXACQUIRE, ynone, Px, opBytes{0xf2}}, + {AXRELEASE, ynone, Px, opBytes{0xf3}}, + {AXBEGIN, yxbegin, Px, opBytes{0xc7, 0xf8}}, + {AXABORT, yxabort, Px, opBytes{0xc6, 0xf8}}, + {AXEND, ynone, Px, opBytes{0x0f, 01, 0xd5}}, + {AXTEST, ynone, Px, opBytes{0x0f, 01, 0xd6}}, + {AXGETBV, ynone, Pm, opBytes{01, 0xd0}}, + {obj.AFUNCDATA, yfuncdata, Px, opBytes{0, 0}}, + {obj.APCDATA, ypcdata, Px, opBytes{0, 0}}, + {obj.ADUFFCOPY, yduff, Px, opBytes{0xe8}}, + {obj.ADUFFZERO, yduff, Px, opBytes{0xe8}}, + + {obj.AEND, nil, 0, opBytes{}}, + {0, nil, 0, opBytes{}}, +} + +var opindex [(ALAST + 1) & obj.AMask]*Optab + +// useAbs reports whether s describes a symbol that must avoid pc-relative addressing. +// This happens on systems like Solaris that call .so functions instead of system calls. +// It does not seem to be necessary for any other systems. This is probably working +// around a Solaris-specific bug that should be fixed differently, but we don't know +// what that bug is. And this does fix it. +func useAbs(ctxt *obj.Link, s *obj.LSym) bool { + if ctxt.Headtype == objabi.Hsolaris { + // All the Solaris dynamic imports from libc.so begin with "libc_". + return strings.HasPrefix(s.Name, "libc_") + } + return ctxt.Arch.Family == sys.I386 && !ctxt.Flag_shared +} + +// single-instruction no-ops of various lengths. +// constructed by hand and disassembled with gdb to verify. +// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion. +var nop = [][16]uint8{ + {0x90}, + {0x66, 0x90}, + {0x0F, 0x1F, 0x00}, + {0x0F, 0x1F, 0x40, 0x00}, + {0x0F, 0x1F, 0x44, 0x00, 0x00}, + {0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00}, + {0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00}, + {0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, +} + +// Native Client rejects the repeated 0x66 prefix. +// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, +func fillnop(p []byte, n int) { + var m int + + for n > 0 { + m = n + if m > len(nop) { + m = len(nop) + } + copy(p[:m], nop[m-1][:m]) + p = p[m:] + n -= m + } +} + +func noppad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 { + s.Grow(int64(c) + int64(pad)) + fillnop(s.P[c:], int(pad)) + return c + pad +} + +func spadjop(ctxt *obj.Link, l, q obj.As) obj.As { + if ctxt.Arch.Family != sys.AMD64 || ctxt.Arch.PtrSize == 4 { + return l + } + return q +} + +// isJump returns whether p is a jump instruction. +// It is used to ensure that no standalone or macro-fused jump will straddle +// or end on a 32 byte boundary by inserting NOPs before the jumps. +func isJump(p *obj.Prog) bool { + return p.To.Target() != nil || p.As == obj.AJMP || p.As == obj.ACALL || + p.As == obj.ARET || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO +} + +// lookForJCC returns the first real instruction starting from p, if that instruction is a conditional +// jump. Otherwise, nil is returned. +func lookForJCC(p *obj.Prog) *obj.Prog { + // Skip any PCDATA, FUNCDATA or NOP instructions + var q *obj.Prog + for q = p.Link; q != nil && (q.As == obj.APCDATA || q.As == obj.AFUNCDATA || q.As == obj.ANOP); q = q.Link { + } + + if q == nil || q.To.Target() == nil || p.As == obj.AJMP || p.As == obj.ACALL { + return nil + } + + switch q.As { + case AJOS, AJOC, AJCS, AJCC, AJEQ, AJNE, AJLS, AJHI, + AJMI, AJPL, AJPS, AJPC, AJLT, AJGE, AJLE, AJGT: + default: + return nil + } + + return q +} + +// fusedJump determines whether p can be fused with a subsequent conditional jump instruction. +// If it can, we return true followed by the total size of the fused jump. If it can't, we return false. +// Macro fusion rules are derived from the Intel Optimization Manual (April 2019) section 3.4.2.2. +func fusedJump(p *obj.Prog) (bool, uint8) { + var fusedSize uint8 + + // The first instruction in a macro fused pair may be preceded by the LOCK prefix, + // or possibly an XACQUIRE/XRELEASE prefix followed by a LOCK prefix. If it is, we + // need to be careful to insert any padding before the locks rather than directly after them. + + if p.As == AXRELEASE || p.As == AXACQUIRE { + fusedSize += p.Isize + for p = p.Link; p != nil && (p.As == obj.APCDATA || p.As == obj.AFUNCDATA); p = p.Link { + } + if p == nil { + return false, 0 + } + } + if p.As == ALOCK { + fusedSize += p.Isize + for p = p.Link; p != nil && (p.As == obj.APCDATA || p.As == obj.AFUNCDATA); p = p.Link { + } + if p == nil { + return false, 0 + } + } + cmp := p.As == ACMPB || p.As == ACMPL || p.As == ACMPQ || p.As == ACMPW + + cmpAddSub := p.As == AADDB || p.As == AADDL || p.As == AADDW || p.As == AADDQ || + p.As == ASUBB || p.As == ASUBL || p.As == ASUBW || p.As == ASUBQ || cmp + + testAnd := p.As == ATESTB || p.As == ATESTL || p.As == ATESTQ || p.As == ATESTW || + p.As == AANDB || p.As == AANDL || p.As == AANDQ || p.As == AANDW + + incDec := p.As == AINCB || p.As == AINCL || p.As == AINCQ || p.As == AINCW || + p.As == ADECB || p.As == ADECL || p.As == ADECQ || p.As == ADECW + + if !cmpAddSub && !testAnd && !incDec { + return false, 0 + } + + if !incDec { + var argOne obj.AddrType + var argTwo obj.AddrType + if cmp { + argOne = p.From.Type + argTwo = p.To.Type + } else { + argOne = p.To.Type + argTwo = p.From.Type + } + if argOne == obj.TYPE_REG { + if argTwo != obj.TYPE_REG && argTwo != obj.TYPE_CONST && argTwo != obj.TYPE_MEM { + return false, 0 + } + } else if argOne == obj.TYPE_MEM { + if argTwo != obj.TYPE_REG { + return false, 0 + } + } else { + return false, 0 + } + } + + fusedSize += p.Isize + jmp := lookForJCC(p) + if jmp == nil { + return false, 0 + } + + fusedSize += jmp.Isize + + if testAnd { + return true, fusedSize + } + + if jmp.As == AJOC || jmp.As == AJOS || jmp.As == AJMI || + jmp.As == AJPL || jmp.As == AJPS || jmp.As == AJPC { + return false, 0 + } + + if cmpAddSub { + return true, fusedSize + } + + if jmp.As == AJCS || jmp.As == AJCC || jmp.As == AJHI || jmp.As == AJLS { + return false, 0 + } + + return true, fusedSize +} + +type padJumpsCtx int32 + +func makePjcCtx(ctxt *obj.Link) padJumpsCtx { + // Disable jump padding on 32 bit builds by setting + // padJumps to 0. + if ctxt.Arch.Family == sys.I386 { + return padJumpsCtx(0) + } + + // Disable jump padding for hand written assembly code. + if ctxt.IsAsm { + return padJumpsCtx(0) + } + + return padJumpsCtx(32) +} + +// padJump detects whether the instruction being assembled is a standalone or a macro-fused +// jump that needs to be padded. If it is, NOPs are inserted to ensure that the jump does +// not cross or end on a 32 byte boundary. +func (pjc padJumpsCtx) padJump(ctxt *obj.Link, s *obj.LSym, p *obj.Prog, c int32) int32 { + if pjc == 0 { + return c + } + + var toPad int32 + fj, fjSize := fusedJump(p) + mask := int32(pjc - 1) + if fj { + if (c&mask)+int32(fjSize) >= int32(pjc) { + toPad = int32(pjc) - (c & mask) + } + } else if isJump(p) { + if (c&mask)+int32(p.Isize) >= int32(pjc) { + toPad = int32(pjc) - (c & mask) + } + } + if toPad <= 0 { + return c + } + + return noppad(ctxt, s, c, toPad) +} + +// reAssemble is called if an instruction's size changes during assembly. If +// it does and the instruction is a standalone or a macro-fused jump we need to +// reassemble. +func (pjc padJumpsCtx) reAssemble(p *obj.Prog) bool { + if pjc == 0 { + return false + } + + fj, _ := fusedJump(p) + return fj || isJump(p) +} + +type nopPad struct { + p *obj.Prog // Instruction before the pad + n int32 // Size of the pad +} + +// Padding bytes to add to align code as requested. +// Alignment is restricted to powers of 2 between 8 and 2048 inclusive. +// +// pc: current offset in function, in bytes +// a: requested alignment, in bytes +// cursym: current function being assembled +// returns number of bytes of padding needed +func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { + if !((a&(a-1) == 0) && 8 <= a && a <= 2048) { + ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", a) + return 0 + } + + // By default function alignment is 32 bytes for amd64 + if cursym.Func().Align < int32(a) { + cursym.Func().Align = int32(a) + } + + if pc&(a-1) != 0 { + return int(a - (pc & (a - 1))) + } + + return 0 +} + +func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { + if ctxt.Retpoline && ctxt.Arch.Family == sys.I386 { + ctxt.Diag("-spectre=ret not supported on 386") + ctxt.Retpoline = false // don't keep printing + } + + pjc := makePjcCtx(ctxt) + + if s.P != nil { + return + } + + if ycover[0] == 0 { + ctxt.Diag("x86 tables not initialized, call x86.instinit first") + } + + for p := s.Func().Text; p != nil; p = p.Link { + if p.To.Type == obj.TYPE_BRANCH && p.To.Target() == nil { + p.To.SetTarget(p) + } + if p.As == AADJSP { + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_SP + // Generate 'ADDQ $x, SP' or 'SUBQ $x, SP', with x positive. + // One exception: It is smaller to encode $-0x80 than $0x80. + // For that case, flip the sign and the op: + // Instead of 'ADDQ $0x80, SP', generate 'SUBQ $-0x80, SP'. + switch v := p.From.Offset; { + case v == 0: + p.As = obj.ANOP + case v == 0x80 || (v < 0 && v != -0x80): + p.As = spadjop(ctxt, AADDL, AADDQ) + p.From.Offset *= -1 + default: + p.As = spadjop(ctxt, ASUBL, ASUBQ) + } + } + if ctxt.Retpoline && (p.As == obj.ACALL || p.As == obj.AJMP) && (p.To.Type == obj.TYPE_REG || p.To.Type == obj.TYPE_MEM) { + if p.To.Type != obj.TYPE_REG { + ctxt.Diag("non-retpoline-compatible: %v", p) + continue + } + p.To.Type = obj.TYPE_BRANCH + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ctxt.Lookup("runtime.retpoline" + obj.Rconv(int(p.To.Reg))) + p.To.Reg = 0 + p.To.Offset = 0 + } + } + + var count int64 // rough count of number of instructions + for p := s.Func().Text; p != nil; p = p.Link { + count++ + p.Back = branchShort // use short branches first time through + if q := p.To.Target(); q != nil && (q.Back&branchShort != 0) { + p.Back |= branchBackwards + q.Back |= branchLoopHead + } + } + s.GrowCap(count * 5) // preallocate roughly 5 bytes per instruction + + var ab AsmBuf + var n int + var c int32 + errors := ctxt.Errors + var nops []nopPad // Padding for a particular assembly (reuse slice storage if multiple assemblies) + nrelocs0 := len(s.R) + for { + // This loop continues while there are reasons to re-assemble + // whole block, like the presence of long forward jumps. + reAssemble := false + for i := range s.R[nrelocs0:] { + s.R[nrelocs0+i] = obj.Reloc{} + } + s.R = s.R[:nrelocs0] // preserve marker relocations generated by the compiler + s.P = s.P[:0] + c = 0 + var pPrev *obj.Prog + nops = nops[:0] + for p := s.Func().Text; p != nil; p = p.Link { + c0 := c + c = pjc.padJump(ctxt, s, p, c) + + if p.As == obj.APCALIGN { + aln := p.From.Offset + v := addpad(int64(c), aln, ctxt, s) + if v > 0 { + s.Grow(int64(c) + int64(v)) + fillnop(s.P[c:], int(v)) + } + + c += int32(v) + pPrev = p + continue + } + + if maxLoopPad > 0 && p.Back&branchLoopHead != 0 && c&(loopAlign-1) != 0 { + // pad with NOPs + v := -c & (loopAlign - 1) + + if v <= maxLoopPad { + s.Grow(int64(c) + int64(v)) + fillnop(s.P[c:], int(v)) + c += v + } + } + + p.Pc = int64(c) + + // process forward jumps to p + for q := p.Rel; q != nil; q = q.Forwd { + v := int32(p.Pc - (q.Pc + int64(q.Isize))) + if q.Back&branchShort != 0 { + if v > 127 { + reAssemble = true + q.Back ^= branchShort + } + + if q.As == AJCXZL || q.As == AXBEGIN { + s.P[q.Pc+2] = byte(v) + } else { + s.P[q.Pc+1] = byte(v) + } + } else { + binary.LittleEndian.PutUint32(s.P[q.Pc+int64(q.Isize)-4:], uint32(v)) + } + } + + p.Rel = nil + + p.Pc = int64(c) + ab.asmins(ctxt, s, p) + m := ab.Len() + if int(p.Isize) != m { + p.Isize = uint8(m) + if pjc.reAssemble(p) { + // We need to re-assemble here to check for jumps and fused jumps + // that span or end on 32 byte boundaries. + reAssemble = true + } + } + + s.Grow(p.Pc + int64(m)) + copy(s.P[p.Pc:], ab.Bytes()) + // If there was padding, remember it. + if pPrev != nil && !ctxt.IsAsm && c > c0 { + nops = append(nops, nopPad{p: pPrev, n: c - c0}) + } + c += int32(m) + pPrev = p + } + + n++ + if n > 1000 { + ctxt.Diag("span must be looping") + log.Fatalf("loop") + } + if !reAssemble { + break + } + if ctxt.Errors > errors { + return + } + } + // splice padding nops into Progs + for _, n := range nops { + pp := n.p + np := &obj.Prog{Link: pp.Link, Ctxt: pp.Ctxt, As: obj.ANOP, Pos: pp.Pos.WithNotStmt(), Pc: pp.Pc + int64(pp.Isize), Isize: uint8(n.n)} + pp.Link = np + } + + s.Size = int64(c) + + if false { /* debug['a'] > 1 */ + fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0) + var i int + for i = 0; i < len(s.P); i++ { + fmt.Printf(" %.2x", s.P[i]) + if i%16 == 15 { + fmt.Printf("\n %.6x", uint(i+1)) + } + } + + if i%16 != 0 { + fmt.Printf("\n") + } + + for i := 0; i < len(s.R); i++ { + r := &s.R[i] + fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add) + } + } + + // Mark nonpreemptible instruction sequences. + // The 2-instruction TLS access sequence + // MOVQ TLS, BX + // MOVQ 0(BX)(TLS*1), BX + // is not async preemptible, as if it is preempted and resumed on + // a different thread, the TLS address may become invalid. + if !CanUse1InsnTLS(ctxt) { + useTLS := func(p *obj.Prog) bool { + // Only need to mark the second instruction, which has + // REG_TLS as Index. (It is okay to interrupt and restart + // the first instruction.) + return p.From.Index == REG_TLS + } + obj.MarkUnsafePoints(ctxt, s.Func().Text, newprog, useTLS, nil) + } + + // Now that we know byte offsets, we can generate jump table entries. + // TODO: could this live in obj instead of obj/$ARCH? + for _, jt := range s.Func().JumpTables { + for i, p := range jt.Targets { + // The ith jumptable entry points to the p.Pc'th + // byte in the function symbol s. + jt.Sym.WriteAddr(ctxt, int64(i)*8, 8, s, p.Pc) + } + } +} + +func instinit(ctxt *obj.Link) { + if ycover[0] != 0 { + // Already initialized; stop now. + // This happens in the cmd/asm tests, + // each of which re-initializes the arch. + return + } + + switch ctxt.Headtype { + case objabi.Hplan9: + plan9privates = ctxt.Lookup("_privates") + } + + for i := range avxOptab { + c := avxOptab[i].as + if opindex[c&obj.AMask] != nil { + ctxt.Diag("phase error in avxOptab: %d (%v)", i, c) + } + opindex[c&obj.AMask] = &avxOptab[i] + } + for i := 1; optab[i].as != 0; i++ { + c := optab[i].as + if opindex[c&obj.AMask] != nil { + ctxt.Diag("phase error in optab: %d (%v)", i, c) + } + opindex[c&obj.AMask] = &optab[i] + } + + for i := 0; i < Ymax; i++ { + ycover[i*Ymax+i] = 1 + } + + ycover[Yi0*Ymax+Yu2] = 1 + ycover[Yi1*Ymax+Yu2] = 1 + + ycover[Yi0*Ymax+Yi8] = 1 + ycover[Yi1*Ymax+Yi8] = 1 + ycover[Yu2*Ymax+Yi8] = 1 + ycover[Yu7*Ymax+Yi8] = 1 + + ycover[Yi0*Ymax+Yu7] = 1 + ycover[Yi1*Ymax+Yu7] = 1 + ycover[Yu2*Ymax+Yu7] = 1 + + ycover[Yi0*Ymax+Yu8] = 1 + ycover[Yi1*Ymax+Yu8] = 1 + ycover[Yu2*Ymax+Yu8] = 1 + ycover[Yu7*Ymax+Yu8] = 1 + + ycover[Yi0*Ymax+Ys32] = 1 + ycover[Yi1*Ymax+Ys32] = 1 + ycover[Yu2*Ymax+Ys32] = 1 + ycover[Yu7*Ymax+Ys32] = 1 + ycover[Yu8*Ymax+Ys32] = 1 + ycover[Yi8*Ymax+Ys32] = 1 + + ycover[Yi0*Ymax+Yi32] = 1 + ycover[Yi1*Ymax+Yi32] = 1 + ycover[Yu2*Ymax+Yi32] = 1 + ycover[Yu7*Ymax+Yi32] = 1 + ycover[Yu8*Ymax+Yi32] = 1 + ycover[Yi8*Ymax+Yi32] = 1 + ycover[Ys32*Ymax+Yi32] = 1 + + ycover[Yi0*Ymax+Yi64] = 1 + ycover[Yi1*Ymax+Yi64] = 1 + ycover[Yu7*Ymax+Yi64] = 1 + ycover[Yu2*Ymax+Yi64] = 1 + ycover[Yu8*Ymax+Yi64] = 1 + ycover[Yi8*Ymax+Yi64] = 1 + ycover[Ys32*Ymax+Yi64] = 1 + ycover[Yi32*Ymax+Yi64] = 1 + + ycover[Yal*Ymax+Yrb] = 1 + ycover[Ycl*Ymax+Yrb] = 1 + ycover[Yax*Ymax+Yrb] = 1 + ycover[Ycx*Ymax+Yrb] = 1 + ycover[Yrx*Ymax+Yrb] = 1 + ycover[Yrl*Ymax+Yrb] = 1 // but not Yrl32 + + ycover[Ycl*Ymax+Ycx] = 1 + + ycover[Yax*Ymax+Yrx] = 1 + ycover[Ycx*Ymax+Yrx] = 1 + + ycover[Yax*Ymax+Yrl] = 1 + ycover[Ycx*Ymax+Yrl] = 1 + ycover[Yrx*Ymax+Yrl] = 1 + ycover[Yrl32*Ymax+Yrl] = 1 + + ycover[Yf0*Ymax+Yrf] = 1 + + ycover[Yal*Ymax+Ymb] = 1 + ycover[Ycl*Ymax+Ymb] = 1 + ycover[Yax*Ymax+Ymb] = 1 + ycover[Ycx*Ymax+Ymb] = 1 + ycover[Yrx*Ymax+Ymb] = 1 + ycover[Yrb*Ymax+Ymb] = 1 + ycover[Yrl*Ymax+Ymb] = 1 // but not Yrl32 + ycover[Ym*Ymax+Ymb] = 1 + + ycover[Yax*Ymax+Yml] = 1 + ycover[Ycx*Ymax+Yml] = 1 + ycover[Yrx*Ymax+Yml] = 1 + ycover[Yrl*Ymax+Yml] = 1 + ycover[Yrl32*Ymax+Yml] = 1 + ycover[Ym*Ymax+Yml] = 1 + + ycover[Yax*Ymax+Ymm] = 1 + ycover[Ycx*Ymax+Ymm] = 1 + ycover[Yrx*Ymax+Ymm] = 1 + ycover[Yrl*Ymax+Ymm] = 1 + ycover[Yrl32*Ymax+Ymm] = 1 + ycover[Ym*Ymax+Ymm] = 1 + ycover[Ymr*Ymax+Ymm] = 1 + + ycover[Yxr0*Ymax+Yxr] = 1 + + ycover[Ym*Ymax+Yxm] = 1 + ycover[Yxr0*Ymax+Yxm] = 1 + ycover[Yxr*Ymax+Yxm] = 1 + + ycover[Ym*Ymax+Yym] = 1 + ycover[Yyr*Ymax+Yym] = 1 + + ycover[Yxr0*Ymax+YxrEvex] = 1 + ycover[Yxr*Ymax+YxrEvex] = 1 + + ycover[Ym*Ymax+YxmEvex] = 1 + ycover[Yxr0*Ymax+YxmEvex] = 1 + ycover[Yxr*Ymax+YxmEvex] = 1 + ycover[YxrEvex*Ymax+YxmEvex] = 1 + + ycover[Yyr*Ymax+YyrEvex] = 1 + + ycover[Ym*Ymax+YymEvex] = 1 + ycover[Yyr*Ymax+YymEvex] = 1 + ycover[YyrEvex*Ymax+YymEvex] = 1 + + ycover[Ym*Ymax+Yzm] = 1 + ycover[Yzr*Ymax+Yzm] = 1 + + ycover[Yk0*Ymax+Yk] = 1 + ycover[Yknot0*Ymax+Yk] = 1 + + ycover[Yk0*Ymax+Ykm] = 1 + ycover[Yknot0*Ymax+Ykm] = 1 + ycover[Yk*Ymax+Ykm] = 1 + ycover[Ym*Ymax+Ykm] = 1 + + ycover[Yxvm*Ymax+YxvmEvex] = 1 + + ycover[Yyvm*Ymax+YyvmEvex] = 1 + + for i := 0; i < MAXREG; i++ { + reg[i] = -1 + if i >= REG_AL && i <= REG_R15B { + reg[i] = (i - REG_AL) & 7 + if i >= REG_SPB && i <= REG_DIB { + regrex[i] = 0x40 + } + if i >= REG_R8B && i <= REG_R15B { + regrex[i] = Rxr | Rxx | Rxb + } + } + + if i >= REG_AH && i <= REG_BH { + reg[i] = 4 + ((i - REG_AH) & 7) + } + if i >= REG_AX && i <= REG_R15 { + reg[i] = (i - REG_AX) & 7 + if i >= REG_R8 { + regrex[i] = Rxr | Rxx | Rxb + } + } + + if i >= REG_F0 && i <= REG_F0+7 { + reg[i] = (i - REG_F0) & 7 + } + if i >= REG_M0 && i <= REG_M0+7 { + reg[i] = (i - REG_M0) & 7 + } + if i >= REG_K0 && i <= REG_K0+7 { + reg[i] = (i - REG_K0) & 7 + } + if i >= REG_X0 && i <= REG_X0+15 { + reg[i] = (i - REG_X0) & 7 + if i >= REG_X0+8 { + regrex[i] = Rxr | Rxx | Rxb + } + } + if i >= REG_X16 && i <= REG_X16+15 { + reg[i] = (i - REG_X16) & 7 + if i >= REG_X16+8 { + regrex[i] = Rxr | Rxx | Rxb | RxrEvex + } else { + regrex[i] = RxrEvex + } + } + if i >= REG_Y0 && i <= REG_Y0+15 { + reg[i] = (i - REG_Y0) & 7 + if i >= REG_Y0+8 { + regrex[i] = Rxr | Rxx | Rxb + } + } + if i >= REG_Y16 && i <= REG_Y16+15 { + reg[i] = (i - REG_Y16) & 7 + if i >= REG_Y16+8 { + regrex[i] = Rxr | Rxx | Rxb | RxrEvex + } else { + regrex[i] = RxrEvex + } + } + if i >= REG_Z0 && i <= REG_Z0+15 { + reg[i] = (i - REG_Z0) & 7 + if i > REG_Z0+7 { + regrex[i] = Rxr | Rxx | Rxb + } + } + if i >= REG_Z16 && i <= REG_Z16+15 { + reg[i] = (i - REG_Z16) & 7 + if i >= REG_Z16+8 { + regrex[i] = Rxr | Rxx | Rxb | RxrEvex + } else { + regrex[i] = RxrEvex + } + } + + if i >= REG_CR+8 && i <= REG_CR+15 { + regrex[i] = Rxr + } + } +} + +var isAndroid = buildcfg.GOOS == "android" + +func prefixof(ctxt *obj.Link, a *obj.Addr) int { + if a.Reg < REG_CS && a.Index < REG_CS { // fast path + return 0 + } + if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE { + switch a.Reg { + case REG_CS: + return 0x2e + + case REG_DS: + return 0x3e + + case REG_ES: + return 0x26 + + case REG_FS: + return 0x64 + + case REG_GS: + return 0x65 + + case REG_TLS: + // NOTE: Systems listed here should be only systems that + // support direct TLS references like 8(TLS) implemented as + // direct references from FS or GS. Systems that require + // the initial-exec model, where you load the TLS base into + // a register and then index from that register, do not reach + // this code and should not be listed. + if ctxt.Arch.Family == sys.I386 { + switch ctxt.Headtype { + default: + if isAndroid { + return 0x65 // GS + } + log.Fatalf("unknown TLS base register for %v", ctxt.Headtype) + + case objabi.Hdarwin, + objabi.Hdragonfly, + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd: + return 0x65 // GS + } + } + + switch ctxt.Headtype { + default: + log.Fatalf("unknown TLS base register for %v", ctxt.Headtype) + + case objabi.Hlinux: + if isAndroid { + return 0x64 // FS + } + + if ctxt.Flag_shared { + log.Fatalf("unknown TLS base register for linux with -shared") + } else { + return 0x64 // FS + } + + case objabi.Hdragonfly, + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd, + objabi.Hsolaris: + return 0x64 // FS + + case objabi.Hdarwin: + return 0x65 // GS + } + } + } + + switch a.Index { + case REG_CS: + return 0x2e + + case REG_DS: + return 0x3e + + case REG_ES: + return 0x26 + + case REG_TLS: + if ctxt.Flag_shared && ctxt.Headtype != objabi.Hwindows { + // When building for inclusion into a shared library, an instruction of the form + // MOV off(CX)(TLS*1), AX + // becomes + // mov %gs:off(%ecx), %eax // on i386 + // mov %fs:off(%rcx), %rax // on amd64 + // which assumes that the correct TLS offset has been loaded into CX (today + // there is only one TLS variable -- g -- so this is OK). When not building for + // a shared library the instruction it becomes + // mov 0x0(%ecx), %eax // on i386 + // mov 0x0(%rcx), %rax // on amd64 + // and a R_TLS_LE relocation, and so does not require a prefix. + if ctxt.Arch.Family == sys.I386 { + return 0x65 // GS + } + return 0x64 // FS + } + + case REG_FS: + return 0x64 + + case REG_GS: + return 0x65 + } + + return 0 +} + +// oclassRegList returns multisource operand class for addr. +func oclassRegList(ctxt *obj.Link, addr *obj.Addr) int { + // TODO(quasilyte): when oclass register case is refactored into + // lookup table, use it here to get register kind more easily. + // Helper functions like regIsXmm should go away too (they will become redundant). + + regIsXmm := func(r int) bool { return r >= REG_X0 && r <= REG_X31 } + regIsYmm := func(r int) bool { return r >= REG_Y0 && r <= REG_Y31 } + regIsZmm := func(r int) bool { return r >= REG_Z0 && r <= REG_Z31 } + + reg0, reg1 := decodeRegisterRange(addr.Offset) + low := regIndex(int16(reg0)) + high := regIndex(int16(reg1)) + + if ctxt.Arch.Family == sys.I386 { + if low >= 8 || high >= 8 { + return Yxxx + } + } + + switch high - low { + case 3: + switch { + case regIsXmm(reg0) && regIsXmm(reg1): + return YxrEvexMulti4 + case regIsYmm(reg0) && regIsYmm(reg1): + return YyrEvexMulti4 + case regIsZmm(reg0) && regIsZmm(reg1): + return YzrMulti4 + default: + return Yxxx + } + default: + return Yxxx + } +} + +// oclassVMem returns V-mem (vector memory with VSIB) operand class. +// For addr that is not V-mem returns (Yxxx, false). +func oclassVMem(ctxt *obj.Link, addr *obj.Addr) (int, bool) { + switch addr.Index { + case REG_X0 + 0, + REG_X0 + 1, + REG_X0 + 2, + REG_X0 + 3, + REG_X0 + 4, + REG_X0 + 5, + REG_X0 + 6, + REG_X0 + 7: + return Yxvm, true + case REG_X8 + 0, + REG_X8 + 1, + REG_X8 + 2, + REG_X8 + 3, + REG_X8 + 4, + REG_X8 + 5, + REG_X8 + 6, + REG_X8 + 7: + if ctxt.Arch.Family == sys.I386 { + return Yxxx, true + } + return Yxvm, true + case REG_X16 + 0, + REG_X16 + 1, + REG_X16 + 2, + REG_X16 + 3, + REG_X16 + 4, + REG_X16 + 5, + REG_X16 + 6, + REG_X16 + 7, + REG_X16 + 8, + REG_X16 + 9, + REG_X16 + 10, + REG_X16 + 11, + REG_X16 + 12, + REG_X16 + 13, + REG_X16 + 14, + REG_X16 + 15: + if ctxt.Arch.Family == sys.I386 { + return Yxxx, true + } + return YxvmEvex, true + + case REG_Y0 + 0, + REG_Y0 + 1, + REG_Y0 + 2, + REG_Y0 + 3, + REG_Y0 + 4, + REG_Y0 + 5, + REG_Y0 + 6, + REG_Y0 + 7: + return Yyvm, true + case REG_Y8 + 0, + REG_Y8 + 1, + REG_Y8 + 2, + REG_Y8 + 3, + REG_Y8 + 4, + REG_Y8 + 5, + REG_Y8 + 6, + REG_Y8 + 7: + if ctxt.Arch.Family == sys.I386 { + return Yxxx, true + } + return Yyvm, true + case REG_Y16 + 0, + REG_Y16 + 1, + REG_Y16 + 2, + REG_Y16 + 3, + REG_Y16 + 4, + REG_Y16 + 5, + REG_Y16 + 6, + REG_Y16 + 7, + REG_Y16 + 8, + REG_Y16 + 9, + REG_Y16 + 10, + REG_Y16 + 11, + REG_Y16 + 12, + REG_Y16 + 13, + REG_Y16 + 14, + REG_Y16 + 15: + if ctxt.Arch.Family == sys.I386 { + return Yxxx, true + } + return YyvmEvex, true + + case REG_Z0 + 0, + REG_Z0 + 1, + REG_Z0 + 2, + REG_Z0 + 3, + REG_Z0 + 4, + REG_Z0 + 5, + REG_Z0 + 6, + REG_Z0 + 7: + return Yzvm, true + case REG_Z8 + 0, + REG_Z8 + 1, + REG_Z8 + 2, + REG_Z8 + 3, + REG_Z8 + 4, + REG_Z8 + 5, + REG_Z8 + 6, + REG_Z8 + 7, + REG_Z8 + 8, + REG_Z8 + 9, + REG_Z8 + 10, + REG_Z8 + 11, + REG_Z8 + 12, + REG_Z8 + 13, + REG_Z8 + 14, + REG_Z8 + 15, + REG_Z8 + 16, + REG_Z8 + 17, + REG_Z8 + 18, + REG_Z8 + 19, + REG_Z8 + 20, + REG_Z8 + 21, + REG_Z8 + 22, + REG_Z8 + 23: + if ctxt.Arch.Family == sys.I386 { + return Yxxx, true + } + return Yzvm, true + } + + return Yxxx, false +} + +func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { + switch a.Type { + case obj.TYPE_REGLIST: + return oclassRegList(ctxt, a) + + case obj.TYPE_NONE: + return Ynone + + case obj.TYPE_BRANCH: + return Ybr + + case obj.TYPE_INDIR: + if a.Name != obj.NAME_NONE && a.Reg == REG_NONE && a.Index == REG_NONE && a.Scale == 0 { + return Yindir + } + return Yxxx + + case obj.TYPE_MEM: + // Pseudo registers have negative index, but SP is + // not pseudo on x86, hence REG_SP check is not redundant. + if a.Index == REG_SP || a.Index < 0 { + // Can't use FP/SB/PC/SP as the index register. + return Yxxx + } + + if vmem, ok := oclassVMem(ctxt, a); ok { + return vmem + } + + if ctxt.Arch.Family == sys.AMD64 { + switch a.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF: + // Global variables can't use index registers and their + // base register is %rip (%rip is encoded as REG_NONE). + if a.Reg != REG_NONE || a.Index != REG_NONE || a.Scale != 0 { + return Yxxx + } + case obj.NAME_AUTO, obj.NAME_PARAM: + // These names must have a base of SP. The old compiler + // uses 0 for the base register. SSA uses REG_SP. + if a.Reg != REG_SP && a.Reg != 0 { + return Yxxx + } + case obj.NAME_NONE: + // everything is ok + default: + // unknown name + return Yxxx + } + } + return Ym + + case obj.TYPE_ADDR: + switch a.Name { + case obj.NAME_GOTREF: + ctxt.Diag("unexpected TYPE_ADDR with NAME_GOTREF") + return Yxxx + + case obj.NAME_EXTERN, + obj.NAME_STATIC: + if a.Sym != nil && useAbs(ctxt, a.Sym) { + return Yi32 + } + return Yiauto // use pc-relative addressing + + case obj.NAME_AUTO, + obj.NAME_PARAM: + return Yiauto + } + + // TODO(rsc): DUFFZERO/DUFFCOPY encoding forgot to set a->index + // and got Yi32 in an earlier version of this code. + // Keep doing that until we fix yduff etc. + if a.Sym != nil && strings.HasPrefix(a.Sym.Name, "runtime.duff") { + return Yi32 + } + + if a.Sym != nil || a.Name != obj.NAME_NONE { + ctxt.Diag("unexpected addr: %v", obj.Dconv(p, a)) + } + fallthrough + + case obj.TYPE_CONST: + if a.Sym != nil { + ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a)) + } + + v := a.Offset + if ctxt.Arch.Family == sys.I386 { + v = int64(int32(v)) + } + switch { + case v == 0: + return Yi0 + case v == 1: + return Yi1 + case v >= 0 && v <= 3: + return Yu2 + case v >= 0 && v <= 127: + return Yu7 + case v >= 0 && v <= 255: + return Yu8 + case v >= -128 && v <= 127: + return Yi8 + } + if ctxt.Arch.Family == sys.I386 { + return Yi32 + } + l := int32(v) + if int64(l) == v { + return Ys32 // can sign extend + } + if v>>32 == 0 { + return Yi32 // unsigned + } + return Yi64 + + case obj.TYPE_TEXTSIZE: + return Ytextsize + } + + if a.Type != obj.TYPE_REG { + ctxt.Diag("unexpected addr1: type=%d %v", a.Type, obj.Dconv(p, a)) + return Yxxx + } + + switch a.Reg { + case REG_AL: + return Yal + + case REG_AX: + return Yax + + /* + case REG_SPB: + */ + case REG_BPB, + REG_SIB, + REG_DIB, + REG_R8B, + REG_R9B, + REG_R10B, + REG_R11B, + REG_R12B, + REG_R13B, + REG_R14B, + REG_R15B: + if ctxt.Arch.Family == sys.I386 { + return Yxxx + } + fallthrough + + case REG_DL, + REG_BL, + REG_AH, + REG_CH, + REG_DH, + REG_BH: + return Yrb + + case REG_CL: + return Ycl + + case REG_CX: + return Ycx + + case REG_DX, REG_BX: + return Yrx + + case REG_R8, // not really Yrl + REG_R9, + REG_R10, + REG_R11, + REG_R12, + REG_R13, + REG_R14, + REG_R15: + if ctxt.Arch.Family == sys.I386 { + return Yxxx + } + fallthrough + + case REG_SP, REG_BP, REG_SI, REG_DI: + if ctxt.Arch.Family == sys.I386 { + return Yrl32 + } + return Yrl + + case REG_F0 + 0: + return Yf0 + + case REG_F0 + 1, + REG_F0 + 2, + REG_F0 + 3, + REG_F0 + 4, + REG_F0 + 5, + REG_F0 + 6, + REG_F0 + 7: + return Yrf + + case REG_M0 + 0, + REG_M0 + 1, + REG_M0 + 2, + REG_M0 + 3, + REG_M0 + 4, + REG_M0 + 5, + REG_M0 + 6, + REG_M0 + 7: + return Ymr + + case REG_X0: + return Yxr0 + + case REG_X0 + 1, + REG_X0 + 2, + REG_X0 + 3, + REG_X0 + 4, + REG_X0 + 5, + REG_X0 + 6, + REG_X0 + 7, + REG_X0 + 8, + REG_X0 + 9, + REG_X0 + 10, + REG_X0 + 11, + REG_X0 + 12, + REG_X0 + 13, + REG_X0 + 14, + REG_X0 + 15: + return Yxr + + case REG_X0 + 16, + REG_X0 + 17, + REG_X0 + 18, + REG_X0 + 19, + REG_X0 + 20, + REG_X0 + 21, + REG_X0 + 22, + REG_X0 + 23, + REG_X0 + 24, + REG_X0 + 25, + REG_X0 + 26, + REG_X0 + 27, + REG_X0 + 28, + REG_X0 + 29, + REG_X0 + 30, + REG_X0 + 31: + return YxrEvex + + case REG_Y0 + 0, + REG_Y0 + 1, + REG_Y0 + 2, + REG_Y0 + 3, + REG_Y0 + 4, + REG_Y0 + 5, + REG_Y0 + 6, + REG_Y0 + 7, + REG_Y0 + 8, + REG_Y0 + 9, + REG_Y0 + 10, + REG_Y0 + 11, + REG_Y0 + 12, + REG_Y0 + 13, + REG_Y0 + 14, + REG_Y0 + 15: + return Yyr + + case REG_Y0 + 16, + REG_Y0 + 17, + REG_Y0 + 18, + REG_Y0 + 19, + REG_Y0 + 20, + REG_Y0 + 21, + REG_Y0 + 22, + REG_Y0 + 23, + REG_Y0 + 24, + REG_Y0 + 25, + REG_Y0 + 26, + REG_Y0 + 27, + REG_Y0 + 28, + REG_Y0 + 29, + REG_Y0 + 30, + REG_Y0 + 31: + return YyrEvex + + case REG_Z0 + 0, + REG_Z0 + 1, + REG_Z0 + 2, + REG_Z0 + 3, + REG_Z0 + 4, + REG_Z0 + 5, + REG_Z0 + 6, + REG_Z0 + 7: + return Yzr + + case REG_Z0 + 8, + REG_Z0 + 9, + REG_Z0 + 10, + REG_Z0 + 11, + REG_Z0 + 12, + REG_Z0 + 13, + REG_Z0 + 14, + REG_Z0 + 15, + REG_Z0 + 16, + REG_Z0 + 17, + REG_Z0 + 18, + REG_Z0 + 19, + REG_Z0 + 20, + REG_Z0 + 21, + REG_Z0 + 22, + REG_Z0 + 23, + REG_Z0 + 24, + REG_Z0 + 25, + REG_Z0 + 26, + REG_Z0 + 27, + REG_Z0 + 28, + REG_Z0 + 29, + REG_Z0 + 30, + REG_Z0 + 31: + if ctxt.Arch.Family == sys.I386 { + return Yxxx + } + return Yzr + + case REG_K0: + return Yk0 + + case REG_K0 + 1, + REG_K0 + 2, + REG_K0 + 3, + REG_K0 + 4, + REG_K0 + 5, + REG_K0 + 6, + REG_K0 + 7: + return Yknot0 + + case REG_CS: + return Ycs + case REG_SS: + return Yss + case REG_DS: + return Yds + case REG_ES: + return Yes + case REG_FS: + return Yfs + case REG_GS: + return Ygs + case REG_TLS: + return Ytls + + case REG_GDTR: + return Ygdtr + case REG_IDTR: + return Yidtr + case REG_LDTR: + return Yldtr + case REG_MSW: + return Ymsw + case REG_TASK: + return Ytask + + case REG_CR + 0: + return Ycr0 + case REG_CR + 1: + return Ycr1 + case REG_CR + 2: + return Ycr2 + case REG_CR + 3: + return Ycr3 + case REG_CR + 4: + return Ycr4 + case REG_CR + 5: + return Ycr5 + case REG_CR + 6: + return Ycr6 + case REG_CR + 7: + return Ycr7 + case REG_CR + 8: + return Ycr8 + + case REG_DR + 0: + return Ydr0 + case REG_DR + 1: + return Ydr1 + case REG_DR + 2: + return Ydr2 + case REG_DR + 3: + return Ydr3 + case REG_DR + 4: + return Ydr4 + case REG_DR + 5: + return Ydr5 + case REG_DR + 6: + return Ydr6 + case REG_DR + 7: + return Ydr7 + + case REG_TR + 0: + return Ytr0 + case REG_TR + 1: + return Ytr1 + case REG_TR + 2: + return Ytr2 + case REG_TR + 3: + return Ytr3 + case REG_TR + 4: + return Ytr4 + case REG_TR + 5: + return Ytr5 + case REG_TR + 6: + return Ytr6 + case REG_TR + 7: + return Ytr7 + } + + return Yxxx +} + +// AsmBuf is a simple buffer to assemble variable-length x86 instructions into +// and hold assembly state. +type AsmBuf struct { + buf [100]byte + off int + rexflag int + vexflag bool // Per inst: true for VEX-encoded + evexflag bool // Per inst: true for EVEX-encoded + rep bool + repn bool + lock bool + + evex evexBits // Initialized when evexflag is true +} + +// Put1 appends one byte to the end of the buffer. +func (ab *AsmBuf) Put1(x byte) { + ab.buf[ab.off] = x + ab.off++ +} + +// Put2 appends two bytes to the end of the buffer. +func (ab *AsmBuf) Put2(x, y byte) { + ab.buf[ab.off+0] = x + ab.buf[ab.off+1] = y + ab.off += 2 +} + +// Put3 appends three bytes to the end of the buffer. +func (ab *AsmBuf) Put3(x, y, z byte) { + ab.buf[ab.off+0] = x + ab.buf[ab.off+1] = y + ab.buf[ab.off+2] = z + ab.off += 3 +} + +// Put4 appends four bytes to the end of the buffer. +func (ab *AsmBuf) Put4(x, y, z, w byte) { + ab.buf[ab.off+0] = x + ab.buf[ab.off+1] = y + ab.buf[ab.off+2] = z + ab.buf[ab.off+3] = w + ab.off += 4 +} + +// PutInt16 writes v into the buffer using little-endian encoding. +func (ab *AsmBuf) PutInt16(v int16) { + ab.buf[ab.off+0] = byte(v) + ab.buf[ab.off+1] = byte(v >> 8) + ab.off += 2 +} + +// PutInt32 writes v into the buffer using little-endian encoding. +func (ab *AsmBuf) PutInt32(v int32) { + ab.buf[ab.off+0] = byte(v) + ab.buf[ab.off+1] = byte(v >> 8) + ab.buf[ab.off+2] = byte(v >> 16) + ab.buf[ab.off+3] = byte(v >> 24) + ab.off += 4 +} + +// PutInt64 writes v into the buffer using little-endian encoding. +func (ab *AsmBuf) PutInt64(v int64) { + ab.buf[ab.off+0] = byte(v) + ab.buf[ab.off+1] = byte(v >> 8) + ab.buf[ab.off+2] = byte(v >> 16) + ab.buf[ab.off+3] = byte(v >> 24) + ab.buf[ab.off+4] = byte(v >> 32) + ab.buf[ab.off+5] = byte(v >> 40) + ab.buf[ab.off+6] = byte(v >> 48) + ab.buf[ab.off+7] = byte(v >> 56) + ab.off += 8 +} + +// Put copies b into the buffer. +func (ab *AsmBuf) Put(b []byte) { + copy(ab.buf[ab.off:], b) + ab.off += len(b) +} + +// PutOpBytesLit writes zero terminated sequence of bytes from op, +// starting at specified offset (e.g. z counter value). +// Trailing 0 is not written. +// +// Intended to be used for literal Z cases. +// Literal Z cases usually have "Zlit" in their name (Zlit, Zlitr_m, Zlitm_r). +func (ab *AsmBuf) PutOpBytesLit(offset int, op *opBytes) { + for int(op[offset]) != 0 { + ab.Put1(byte(op[offset])) + offset++ + } +} + +// Insert inserts b at offset i. +func (ab *AsmBuf) Insert(i int, b byte) { + ab.off++ + copy(ab.buf[i+1:ab.off], ab.buf[i:ab.off-1]) + ab.buf[i] = b +} + +// Last returns the byte at the end of the buffer. +func (ab *AsmBuf) Last() byte { return ab.buf[ab.off-1] } + +// Len returns the length of the buffer. +func (ab *AsmBuf) Len() int { return ab.off } + +// Bytes returns the contents of the buffer. +func (ab *AsmBuf) Bytes() []byte { return ab.buf[:ab.off] } + +// Reset empties the buffer. +func (ab *AsmBuf) Reset() { ab.off = 0 } + +// At returns the byte at offset i. +func (ab *AsmBuf) At(i int) byte { return ab.buf[i] } + +// asmidx emits SIB byte. +func (ab *AsmBuf) asmidx(ctxt *obj.Link, scale int, index int, base int) { + var i int + + // X/Y index register is used in VSIB. + switch index { + default: + goto bad + + case REG_NONE: + i = 4 << 3 + goto bas + + case REG_R8, + REG_R9, + REG_R10, + REG_R11, + REG_R12, + REG_R13, + REG_R14, + REG_R15, + REG_X8, + REG_X9, + REG_X10, + REG_X11, + REG_X12, + REG_X13, + REG_X14, + REG_X15, + REG_X16, + REG_X17, + REG_X18, + REG_X19, + REG_X20, + REG_X21, + REG_X22, + REG_X23, + REG_X24, + REG_X25, + REG_X26, + REG_X27, + REG_X28, + REG_X29, + REG_X30, + REG_X31, + REG_Y8, + REG_Y9, + REG_Y10, + REG_Y11, + REG_Y12, + REG_Y13, + REG_Y14, + REG_Y15, + REG_Y16, + REG_Y17, + REG_Y18, + REG_Y19, + REG_Y20, + REG_Y21, + REG_Y22, + REG_Y23, + REG_Y24, + REG_Y25, + REG_Y26, + REG_Y27, + REG_Y28, + REG_Y29, + REG_Y30, + REG_Y31, + REG_Z8, + REG_Z9, + REG_Z10, + REG_Z11, + REG_Z12, + REG_Z13, + REG_Z14, + REG_Z15, + REG_Z16, + REG_Z17, + REG_Z18, + REG_Z19, + REG_Z20, + REG_Z21, + REG_Z22, + REG_Z23, + REG_Z24, + REG_Z25, + REG_Z26, + REG_Z27, + REG_Z28, + REG_Z29, + REG_Z30, + REG_Z31: + if ctxt.Arch.Family == sys.I386 { + goto bad + } + fallthrough + + case REG_AX, + REG_CX, + REG_DX, + REG_BX, + REG_BP, + REG_SI, + REG_DI, + REG_X0, + REG_X1, + REG_X2, + REG_X3, + REG_X4, + REG_X5, + REG_X6, + REG_X7, + REG_Y0, + REG_Y1, + REG_Y2, + REG_Y3, + REG_Y4, + REG_Y5, + REG_Y6, + REG_Y7, + REG_Z0, + REG_Z1, + REG_Z2, + REG_Z3, + REG_Z4, + REG_Z5, + REG_Z6, + REG_Z7: + i = reg[index] << 3 + } + + switch scale { + default: + goto bad + + case 1: + break + + case 2: + i |= 1 << 6 + + case 4: + i |= 2 << 6 + + case 8: + i |= 3 << 6 + } + +bas: + switch base { + default: + goto bad + + case REG_NONE: // must be mod=00 + i |= 5 + + case REG_R8, + REG_R9, + REG_R10, + REG_R11, + REG_R12, + REG_R13, + REG_R14, + REG_R15: + if ctxt.Arch.Family == sys.I386 { + goto bad + } + fallthrough + + case REG_AX, + REG_CX, + REG_DX, + REG_BX, + REG_SP, + REG_BP, + REG_SI, + REG_DI: + i |= reg[base] + } + + ab.Put1(byte(i)) + return + +bad: + ctxt.Diag("asmidx: bad address %d/%d/%d", scale, index, base) + ab.Put1(0) +} + +func (ab *AsmBuf) relput4(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr) { + var rel obj.Reloc + + v := vaddr(ctxt, p, a, &rel) + if rel.Siz != 0 { + if rel.Siz != 4 { + ctxt.Diag("bad reloc") + } + r := obj.Addrel(cursym) + *r = rel + r.Off = int32(p.Pc + int64(ab.Len())) + } + + ab.PutInt32(int32(v)) +} + +func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 { + if r != nil { + *r = obj.Reloc{} + } + + switch a.Name { + case obj.NAME_STATIC, + obj.NAME_GOTREF, + obj.NAME_EXTERN: + s := a.Sym + if r == nil { + ctxt.Diag("need reloc for %v", obj.Dconv(p, a)) + log.Fatalf("reloc") + } + + if a.Name == obj.NAME_GOTREF { + r.Siz = 4 + r.Type = objabi.R_GOTPCREL + } else if useAbs(ctxt, s) { + r.Siz = 4 + r.Type = objabi.R_ADDR + } else { + r.Siz = 4 + r.Type = objabi.R_PCREL + } + + r.Off = -1 // caller must fill in + r.Sym = s + r.Add = a.Offset + + return 0 + } + + if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == REG_TLS { + if r == nil { + ctxt.Diag("need reloc for %v", obj.Dconv(p, a)) + log.Fatalf("reloc") + } + + if !ctxt.Flag_shared || isAndroid || ctxt.Headtype == objabi.Hdarwin { + r.Type = objabi.R_TLS_LE + r.Siz = 4 + r.Off = -1 // caller must fill in + r.Add = a.Offset + } + return 0 + } + + return a.Offset +} + +func (ab *AsmBuf) asmandsz(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) { + var base int + var rel obj.Reloc + + rex &= 0x40 | Rxr + if a.Offset != int64(int32(a.Offset)) { + // The rules are slightly different for 386 and AMD64, + // mostly for historical reasons. We may unify them later, + // but it must be discussed beforehand. + // + // For 64bit mode only LEAL is allowed to overflow. + // It's how https://golang.org/cl/59630 made it. + // crypto/sha1/sha1block_amd64.s depends on this feature. + // + // For 32bit mode rules are more permissive. + // If offset fits uint32, it's permitted. + // This is allowed for assembly that wants to use 32-bit hex + // constants, e.g. LEAL 0x99999999(AX), AX. + overflowOK := (ctxt.Arch.Family == sys.AMD64 && p.As == ALEAL) || + (ctxt.Arch.Family != sys.AMD64 && + int64(uint32(a.Offset)) == a.Offset && + ab.rexflag&Rxw == 0) + if !overflowOK { + ctxt.Diag("offset too large in %s", p) + } + } + v := int32(a.Offset) + rel.Siz = 0 + + switch a.Type { + case obj.TYPE_ADDR: + if a.Name == obj.NAME_NONE { + ctxt.Diag("unexpected TYPE_ADDR with NAME_NONE") + } + if a.Index == REG_TLS { + ctxt.Diag("unexpected TYPE_ADDR with index==REG_TLS") + } + goto bad + + case obj.TYPE_REG: + const regFirst = REG_AL + const regLast = REG_Z31 + if a.Reg < regFirst || regLast < a.Reg { + goto bad + } + if v != 0 { + goto bad + } + ab.Put1(byte(3<<6 | reg[a.Reg]<<0 | r<<3)) + ab.rexflag |= regrex[a.Reg]&(0x40|Rxb) | rex + return + } + + if a.Type != obj.TYPE_MEM { + goto bad + } + + if a.Index != REG_NONE && a.Index != REG_TLS && !(REG_CS <= a.Index && a.Index <= REG_GS) { + base := int(a.Reg) + switch a.Name { + case obj.NAME_EXTERN, + obj.NAME_GOTREF, + obj.NAME_STATIC: + if !useAbs(ctxt, a.Sym) && ctxt.Arch.Family == sys.AMD64 { + goto bad + } + if ctxt.Arch.Family == sys.I386 && ctxt.Flag_shared { + // The base register has already been set. It holds the PC + // of this instruction returned by a PC-reading thunk. + // See obj6.go:rewriteToPcrel. + } else { + base = REG_NONE + } + v = int32(vaddr(ctxt, p, a, &rel)) + + case obj.NAME_AUTO, + obj.NAME_PARAM: + base = REG_SP + } + + ab.rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex + if base == REG_NONE { + ab.Put1(byte(0<<6 | 4<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), int(a.Index), base) + goto putrelv + } + + if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 { + ab.Put1(byte(0<<6 | 4<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), int(a.Index), base) + return + } + + if disp8, ok := toDisp8(v, p, ab); ok && rel.Siz == 0 { + ab.Put1(byte(1<<6 | 4<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), int(a.Index), base) + ab.Put1(disp8) + return + } + + ab.Put1(byte(2<<6 | 4<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), int(a.Index), base) + goto putrelv + } + + base = int(a.Reg) + switch a.Name { + case obj.NAME_STATIC, + obj.NAME_GOTREF, + obj.NAME_EXTERN: + if a.Sym == nil { + ctxt.Diag("bad addr: %v", p) + } + if ctxt.Arch.Family == sys.I386 && ctxt.Flag_shared { + // The base register has already been set. It holds the PC + // of this instruction returned by a PC-reading thunk. + // See obj6.go:rewriteToPcrel. + } else { + base = REG_NONE + } + v = int32(vaddr(ctxt, p, a, &rel)) + + case obj.NAME_AUTO, + obj.NAME_PARAM: + base = REG_SP + } + + if base == REG_TLS { + v = int32(vaddr(ctxt, p, a, &rel)) + } + + ab.rexflag |= regrex[base]&Rxb | rex + if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS { + if (a.Sym == nil || !useAbs(ctxt, a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_GOTREF) || ctxt.Arch.Family != sys.AMD64 { + if a.Name == obj.NAME_GOTREF && (a.Offset != 0 || a.Index != 0 || a.Scale != 0) { + ctxt.Diag("%v has offset against gotref", p) + } + ab.Put1(byte(0<<6 | 5<<0 | r<<3)) + goto putrelv + } + + // temporary + ab.Put2( + byte(0<<6|4<<0|r<<3), // sib present + 0<<6|4<<3|5<<0, // DS:d32 + ) + goto putrelv + } + + if base == REG_SP || base == REG_R12 { + if v == 0 { + ab.Put1(byte(0<<6 | reg[base]<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), REG_NONE, base) + return + } + + if disp8, ok := toDisp8(v, p, ab); ok { + ab.Put1(byte(1<<6 | reg[base]<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), REG_NONE, base) + ab.Put1(disp8) + return + } + + ab.Put1(byte(2<<6 | reg[base]<<0 | r<<3)) + ab.asmidx(ctxt, int(a.Scale), REG_NONE, base) + goto putrelv + } + + if REG_AX <= base && base <= REG_R15 { + if a.Index == REG_TLS && !ctxt.Flag_shared && !isAndroid && + ctxt.Headtype != objabi.Hwindows { + rel = obj.Reloc{} + rel.Type = objabi.R_TLS_LE + rel.Siz = 4 + rel.Sym = nil + rel.Add = int64(v) + v = 0 + } + + if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 { + ab.Put1(byte(0<<6 | reg[base]<<0 | r<<3)) + return + } + + if disp8, ok := toDisp8(v, p, ab); ok && rel.Siz == 0 { + ab.Put2(byte(1<<6|reg[base]<<0|r<<3), disp8) + return + } + + ab.Put1(byte(2<<6 | reg[base]<<0 | r<<3)) + goto putrelv + } + + goto bad + +putrelv: + if rel.Siz != 0 { + if rel.Siz != 4 { + ctxt.Diag("bad rel") + goto bad + } + + r := obj.Addrel(cursym) + *r = rel + r.Off = int32(p.Pc + int64(ab.Len())) + } + + ab.PutInt32(v) + return + +bad: + ctxt.Diag("asmand: bad address %v", obj.Dconv(p, a)) +} + +func (ab *AsmBuf) asmand(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, ra *obj.Addr) { + ab.asmandsz(ctxt, cursym, p, a, reg[ra.Reg], regrex[ra.Reg], 0) +} + +func (ab *AsmBuf) asmando(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a *obj.Addr, o int) { + ab.asmandsz(ctxt, cursym, p, a, o, 0, 0) +} + +func bytereg(a *obj.Addr, t *uint8) { + if a.Type == obj.TYPE_REG && a.Index == REG_NONE && (REG_AX <= a.Reg && a.Reg <= REG_R15) { + a.Reg += REG_AL - REG_AX + *t = 0 + } +} + +func unbytereg(a *obj.Addr, t *uint8) { + if a.Type == obj.TYPE_REG && a.Index == REG_NONE && (REG_AL <= a.Reg && a.Reg <= REG_R15B) { + a.Reg += REG_AX - REG_AL + *t = 0 + } +} + +const ( + movLit uint8 = iota // Like Zlit + movRegMem + movMemReg + movRegMem2op + movMemReg2op + movFullPtr // Load full pointer, trash heap (unsupported) + movDoubleShift + movTLSReg +) + +var ymovtab = []movtab{ + // push + {APUSHL, Ycs, Ynone, Ynone, movLit, [4]uint8{0x0e, 0}}, + {APUSHL, Yss, Ynone, Ynone, movLit, [4]uint8{0x16, 0}}, + {APUSHL, Yds, Ynone, Ynone, movLit, [4]uint8{0x1e, 0}}, + {APUSHL, Yes, Ynone, Ynone, movLit, [4]uint8{0x06, 0}}, + {APUSHL, Yfs, Ynone, Ynone, movLit, [4]uint8{0x0f, 0xa0, 0}}, + {APUSHL, Ygs, Ynone, Ynone, movLit, [4]uint8{0x0f, 0xa8, 0}}, + {APUSHQ, Yfs, Ynone, Ynone, movLit, [4]uint8{0x0f, 0xa0, 0}}, + {APUSHQ, Ygs, Ynone, Ynone, movLit, [4]uint8{0x0f, 0xa8, 0}}, + {APUSHW, Ycs, Ynone, Ynone, movLit, [4]uint8{Pe, 0x0e, 0}}, + {APUSHW, Yss, Ynone, Ynone, movLit, [4]uint8{Pe, 0x16, 0}}, + {APUSHW, Yds, Ynone, Ynone, movLit, [4]uint8{Pe, 0x1e, 0}}, + {APUSHW, Yes, Ynone, Ynone, movLit, [4]uint8{Pe, 0x06, 0}}, + {APUSHW, Yfs, Ynone, Ynone, movLit, [4]uint8{Pe, 0x0f, 0xa0, 0}}, + {APUSHW, Ygs, Ynone, Ynone, movLit, [4]uint8{Pe, 0x0f, 0xa8, 0}}, + + // pop + {APOPL, Ynone, Ynone, Yds, movLit, [4]uint8{0x1f, 0}}, + {APOPL, Ynone, Ynone, Yes, movLit, [4]uint8{0x07, 0}}, + {APOPL, Ynone, Ynone, Yss, movLit, [4]uint8{0x17, 0}}, + {APOPL, Ynone, Ynone, Yfs, movLit, [4]uint8{0x0f, 0xa1, 0}}, + {APOPL, Ynone, Ynone, Ygs, movLit, [4]uint8{0x0f, 0xa9, 0}}, + {APOPQ, Ynone, Ynone, Yfs, movLit, [4]uint8{0x0f, 0xa1, 0}}, + {APOPQ, Ynone, Ynone, Ygs, movLit, [4]uint8{0x0f, 0xa9, 0}}, + {APOPW, Ynone, Ynone, Yds, movLit, [4]uint8{Pe, 0x1f, 0}}, + {APOPW, Ynone, Ynone, Yes, movLit, [4]uint8{Pe, 0x07, 0}}, + {APOPW, Ynone, Ynone, Yss, movLit, [4]uint8{Pe, 0x17, 0}}, + {APOPW, Ynone, Ynone, Yfs, movLit, [4]uint8{Pe, 0x0f, 0xa1, 0}}, + {APOPW, Ynone, Ynone, Ygs, movLit, [4]uint8{Pe, 0x0f, 0xa9, 0}}, + + // mov seg + {AMOVW, Yes, Ynone, Yml, movRegMem, [4]uint8{0x8c, 0, 0, 0}}, + {AMOVW, Ycs, Ynone, Yml, movRegMem, [4]uint8{0x8c, 1, 0, 0}}, + {AMOVW, Yss, Ynone, Yml, movRegMem, [4]uint8{0x8c, 2, 0, 0}}, + {AMOVW, Yds, Ynone, Yml, movRegMem, [4]uint8{0x8c, 3, 0, 0}}, + {AMOVW, Yfs, Ynone, Yml, movRegMem, [4]uint8{0x8c, 4, 0, 0}}, + {AMOVW, Ygs, Ynone, Yml, movRegMem, [4]uint8{0x8c, 5, 0, 0}}, + {AMOVW, Yml, Ynone, Yes, movMemReg, [4]uint8{0x8e, 0, 0, 0}}, + {AMOVW, Yml, Ynone, Ycs, movMemReg, [4]uint8{0x8e, 1, 0, 0}}, + {AMOVW, Yml, Ynone, Yss, movMemReg, [4]uint8{0x8e, 2, 0, 0}}, + {AMOVW, Yml, Ynone, Yds, movMemReg, [4]uint8{0x8e, 3, 0, 0}}, + {AMOVW, Yml, Ynone, Yfs, movMemReg, [4]uint8{0x8e, 4, 0, 0}}, + {AMOVW, Yml, Ynone, Ygs, movMemReg, [4]uint8{0x8e, 5, 0, 0}}, + + // mov cr + {AMOVL, Ycr0, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 0, 0}}, + {AMOVL, Ycr2, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 2, 0}}, + {AMOVL, Ycr3, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 3, 0}}, + {AMOVL, Ycr4, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 4, 0}}, + {AMOVL, Ycr8, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 8, 0}}, + {AMOVQ, Ycr0, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 0, 0}}, + {AMOVQ, Ycr2, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 2, 0}}, + {AMOVQ, Ycr3, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 3, 0}}, + {AMOVQ, Ycr4, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 4, 0}}, + {AMOVQ, Ycr8, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x20, 8, 0}}, + {AMOVL, Yrl, Ynone, Ycr0, movMemReg2op, [4]uint8{0x0f, 0x22, 0, 0}}, + {AMOVL, Yrl, Ynone, Ycr2, movMemReg2op, [4]uint8{0x0f, 0x22, 2, 0}}, + {AMOVL, Yrl, Ynone, Ycr3, movMemReg2op, [4]uint8{0x0f, 0x22, 3, 0}}, + {AMOVL, Yrl, Ynone, Ycr4, movMemReg2op, [4]uint8{0x0f, 0x22, 4, 0}}, + {AMOVL, Yrl, Ynone, Ycr8, movMemReg2op, [4]uint8{0x0f, 0x22, 8, 0}}, + {AMOVQ, Yrl, Ynone, Ycr0, movMemReg2op, [4]uint8{0x0f, 0x22, 0, 0}}, + {AMOVQ, Yrl, Ynone, Ycr2, movMemReg2op, [4]uint8{0x0f, 0x22, 2, 0}}, + {AMOVQ, Yrl, Ynone, Ycr3, movMemReg2op, [4]uint8{0x0f, 0x22, 3, 0}}, + {AMOVQ, Yrl, Ynone, Ycr4, movMemReg2op, [4]uint8{0x0f, 0x22, 4, 0}}, + {AMOVQ, Yrl, Ynone, Ycr8, movMemReg2op, [4]uint8{0x0f, 0x22, 8, 0}}, + + // mov dr + {AMOVL, Ydr0, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 0, 0}}, + {AMOVL, Ydr6, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 6, 0}}, + {AMOVL, Ydr7, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 7, 0}}, + {AMOVQ, Ydr0, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 0, 0}}, + {AMOVQ, Ydr2, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 2, 0}}, + {AMOVQ, Ydr3, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 3, 0}}, + {AMOVQ, Ydr6, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 6, 0}}, + {AMOVQ, Ydr7, Ynone, Yrl, movRegMem2op, [4]uint8{0x0f, 0x21, 7, 0}}, + {AMOVL, Yrl, Ynone, Ydr0, movMemReg2op, [4]uint8{0x0f, 0x23, 0, 0}}, + {AMOVL, Yrl, Ynone, Ydr6, movMemReg2op, [4]uint8{0x0f, 0x23, 6, 0}}, + {AMOVL, Yrl, Ynone, Ydr7, movMemReg2op, [4]uint8{0x0f, 0x23, 7, 0}}, + {AMOVQ, Yrl, Ynone, Ydr0, movMemReg2op, [4]uint8{0x0f, 0x23, 0, 0}}, + {AMOVQ, Yrl, Ynone, Ydr2, movMemReg2op, [4]uint8{0x0f, 0x23, 2, 0}}, + {AMOVQ, Yrl, Ynone, Ydr3, movMemReg2op, [4]uint8{0x0f, 0x23, 3, 0}}, + {AMOVQ, Yrl, Ynone, Ydr6, movMemReg2op, [4]uint8{0x0f, 0x23, 6, 0}}, + {AMOVQ, Yrl, Ynone, Ydr7, movMemReg2op, [4]uint8{0x0f, 0x23, 7, 0}}, + + // mov tr + {AMOVL, Ytr6, Ynone, Yml, movRegMem2op, [4]uint8{0x0f, 0x24, 6, 0}}, + {AMOVL, Ytr7, Ynone, Yml, movRegMem2op, [4]uint8{0x0f, 0x24, 7, 0}}, + {AMOVL, Yml, Ynone, Ytr6, movMemReg2op, [4]uint8{0x0f, 0x26, 6, 0xff}}, + {AMOVL, Yml, Ynone, Ytr7, movMemReg2op, [4]uint8{0x0f, 0x26, 7, 0xff}}, + + // lgdt, sgdt, lidt, sidt + {AMOVL, Ym, Ynone, Ygdtr, movMemReg2op, [4]uint8{0x0f, 0x01, 2, 0}}, + {AMOVL, Ygdtr, Ynone, Ym, movRegMem2op, [4]uint8{0x0f, 0x01, 0, 0}}, + {AMOVL, Ym, Ynone, Yidtr, movMemReg2op, [4]uint8{0x0f, 0x01, 3, 0}}, + {AMOVL, Yidtr, Ynone, Ym, movRegMem2op, [4]uint8{0x0f, 0x01, 1, 0}}, + {AMOVQ, Ym, Ynone, Ygdtr, movMemReg2op, [4]uint8{0x0f, 0x01, 2, 0}}, + {AMOVQ, Ygdtr, Ynone, Ym, movRegMem2op, [4]uint8{0x0f, 0x01, 0, 0}}, + {AMOVQ, Ym, Ynone, Yidtr, movMemReg2op, [4]uint8{0x0f, 0x01, 3, 0}}, + {AMOVQ, Yidtr, Ynone, Ym, movRegMem2op, [4]uint8{0x0f, 0x01, 1, 0}}, + + // lldt, sldt + {AMOVW, Yml, Ynone, Yldtr, movMemReg2op, [4]uint8{0x0f, 0x00, 2, 0}}, + {AMOVW, Yldtr, Ynone, Yml, movRegMem2op, [4]uint8{0x0f, 0x00, 0, 0}}, + + // lmsw, smsw + {AMOVW, Yml, Ynone, Ymsw, movMemReg2op, [4]uint8{0x0f, 0x01, 6, 0}}, + {AMOVW, Ymsw, Ynone, Yml, movRegMem2op, [4]uint8{0x0f, 0x01, 4, 0}}, + + // ltr, str + {AMOVW, Yml, Ynone, Ytask, movMemReg2op, [4]uint8{0x0f, 0x00, 3, 0}}, + {AMOVW, Ytask, Ynone, Yml, movRegMem2op, [4]uint8{0x0f, 0x00, 1, 0}}, + + /* load full pointer - unsupported + {AMOVL, Yml, Ycol, movFullPtr, [4]uint8{0, 0, 0, 0}}, + {AMOVW, Yml, Ycol, movFullPtr, [4]uint8{Pe, 0, 0, 0}}, + */ + + // double shift + {ASHLL, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{0xa4, 0xa5, 0, 0}}, + {ASHLL, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{0xa4, 0xa5, 0, 0}}, + {ASHLL, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{0xa4, 0xa5, 0, 0}}, + {ASHRL, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{0xac, 0xad, 0, 0}}, + {ASHRL, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{0xac, 0xad, 0, 0}}, + {ASHRL, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{0xac, 0xad, 0, 0}}, + {ASHLQ, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xa4, 0xa5, 0}}, + {ASHLQ, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xa4, 0xa5, 0}}, + {ASHLQ, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xa4, 0xa5, 0}}, + {ASHRQ, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xac, 0xad, 0}}, + {ASHRQ, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xac, 0xad, 0}}, + {ASHRQ, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{Pw, 0xac, 0xad, 0}}, + {ASHLW, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xa4, 0xa5, 0}}, + {ASHLW, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xa4, 0xa5, 0}}, + {ASHLW, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xa4, 0xa5, 0}}, + {ASHRW, Yi8, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xac, 0xad, 0}}, + {ASHRW, Ycl, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xac, 0xad, 0}}, + {ASHRW, Ycx, Yrl, Yml, movDoubleShift, [4]uint8{Pe, 0xac, 0xad, 0}}, + + // load TLS base + {AMOVL, Ytls, Ynone, Yrl, movTLSReg, [4]uint8{0, 0, 0, 0}}, + {AMOVQ, Ytls, Ynone, Yrl, movTLSReg, [4]uint8{0, 0, 0, 0}}, + {0, 0, 0, 0, 0, [4]uint8{}}, +} + +func isax(a *obj.Addr) bool { + switch a.Reg { + case REG_AX, REG_AL, REG_AH: + return true + } + + return a.Index == REG_AX +} + +func subreg(p *obj.Prog, from int, to int) { + if false { /* debug['Q'] */ + fmt.Printf("\n%v\ts/%v/%v/\n", p, rconv(from), rconv(to)) + } + + if int(p.From.Reg) == from { + p.From.Reg = int16(to) + p.Ft = 0 + } + + if int(p.To.Reg) == from { + p.To.Reg = int16(to) + p.Tt = 0 + } + + if int(p.From.Index) == from { + p.From.Index = int16(to) + p.Ft = 0 + } + + if int(p.To.Index) == from { + p.To.Index = int16(to) + p.Tt = 0 + } + + if false { /* debug['Q'] */ + fmt.Printf("%v\n", p) + } +} + +func (ab *AsmBuf) mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int { + switch op { + case Pm, Pe, Pf2, Pf3: + if osize != 1 { + if op != Pm { + ab.Put1(byte(op)) + } + ab.Put1(Pm) + z++ + op = int(o.op[z]) + break + } + fallthrough + + default: + if ab.Len() == 0 || ab.Last() != Pm { + ab.Put1(Pm) + } + } + + ab.Put1(byte(op)) + return z +} + +var bpduff1 = []byte{ + 0x48, 0x89, 0x6c, 0x24, 0xf0, // MOVQ BP, -16(SP) + 0x48, 0x8d, 0x6c, 0x24, 0xf0, // LEAQ -16(SP), BP +} + +var bpduff2 = []byte{ + 0x48, 0x8b, 0x6d, 0x00, // MOVQ 0(BP), BP +} + +// asmevex emits EVEX pregis and opcode byte. +// In addition to asmvex r/m, vvvv and reg fields also requires optional +// K-masking register. +// +// Expects asmbuf.evex to be properly initialized. +func (ab *AsmBuf) asmevex(ctxt *obj.Link, p *obj.Prog, rm, v, r, k *obj.Addr) { + ab.evexflag = true + evex := ab.evex + + rexR := byte(1) + evexR := byte(1) + rexX := byte(1) + rexB := byte(1) + if r != nil { + if regrex[r.Reg]&Rxr != 0 { + rexR = 0 // "ModR/M.reg" selector 4th bit. + } + if regrex[r.Reg]&RxrEvex != 0 { + evexR = 0 // "ModR/M.reg" selector 5th bit. + } + } + if rm != nil { + if rm.Index == REG_NONE && regrex[rm.Reg]&RxrEvex != 0 { + rexX = 0 + } else if regrex[rm.Index]&Rxx != 0 { + rexX = 0 + } + if regrex[rm.Reg]&Rxb != 0 { + rexB = 0 + } + } + // P0 = [R][X][B][R'][00][mm] + p0 := (rexR << 7) | + (rexX << 6) | + (rexB << 5) | + (evexR << 4) | + (0 << 2) | + (evex.M() << 0) + + vexV := byte(0) + if v != nil { + // 4bit-wide reg index. + vexV = byte(reg[v.Reg]|(regrex[v.Reg]&Rxr)<<1) & 0xF + } + vexV ^= 0x0F + // P1 = [W][vvvv][1][pp] + p1 := (evex.W() << 7) | + (vexV << 3) | + (1 << 2) | + (evex.P() << 0) + + suffix := evexSuffixMap[p.Scond] + evexZ := byte(0) + evexLL := evex.L() + evexB := byte(0) + evexV := byte(1) + evexA := byte(0) + if suffix.zeroing { + if !evex.ZeroingEnabled() { + ctxt.Diag("unsupported zeroing: %v", p) + } + if k == nil { + // When you request zeroing you must specify a mask register. + // See issue 57952. + ctxt.Diag("mask register must be specified for .Z instructions: %v", p) + } else if k.Reg == REG_K0 { + // The mask register must not be K0. That restriction is already + // handled by the Yknot0 restriction in the opcode tables, so we + // won't ever reach here. But put something sensible here just in case. + ctxt.Diag("mask register must not be K0 for .Z instructions: %v", p) + } + evexZ = 1 + } + switch { + case suffix.rounding != rcUnset: + if rm != nil && rm.Type == obj.TYPE_MEM { + ctxt.Diag("illegal rounding with memory argument: %v", p) + } else if !evex.RoundingEnabled() { + ctxt.Diag("unsupported rounding: %v", p) + } + evexB = 1 + evexLL = suffix.rounding + case suffix.broadcast: + if rm == nil || rm.Type != obj.TYPE_MEM { + ctxt.Diag("illegal broadcast without memory argument: %v", p) + } else if !evex.BroadcastEnabled() { + ctxt.Diag("unsupported broadcast: %v", p) + } + evexB = 1 + case suffix.sae: + if rm != nil && rm.Type == obj.TYPE_MEM { + ctxt.Diag("illegal SAE with memory argument: %v", p) + } else if !evex.SaeEnabled() { + ctxt.Diag("unsupported SAE: %v", p) + } + evexB = 1 + } + if rm != nil && regrex[rm.Index]&RxrEvex != 0 { + evexV = 0 + } else if v != nil && regrex[v.Reg]&RxrEvex != 0 { + evexV = 0 // VSR selector 5th bit. + } + if k != nil { + evexA = byte(reg[k.Reg]) + } + // P2 = [z][L'L][b][V'][aaa] + p2 := (evexZ << 7) | + (evexLL << 5) | + (evexB << 4) | + (evexV << 3) | + (evexA << 0) + + const evexEscapeByte = 0x62 + ab.Put4(evexEscapeByte, p0, p1, p2) + ab.Put1(evex.opcode) +} + +// Emit VEX prefix and opcode byte. +// The three addresses are the r/m, vvvv, and reg fields. +// The reg and rm arguments appear in the same order as the +// arguments to asmand, which typically follows the call to asmvex. +// The final two arguments are the VEX prefix (see encoding above) +// and the opcode byte. +// For details about vex prefix see: +// https://en.wikipedia.org/wiki/VEX_prefix#Technical_description +func (ab *AsmBuf) asmvex(ctxt *obj.Link, rm, v, r *obj.Addr, vex, opcode uint8) { + ab.vexflag = true + rexR := 0 + if r != nil { + rexR = regrex[r.Reg] & Rxr + } + rexB := 0 + rexX := 0 + if rm != nil { + rexB = regrex[rm.Reg] & Rxb + rexX = regrex[rm.Index] & Rxx + } + vexM := (vex >> 3) & 0x7 + vexWLP := vex & 0x87 + vexV := byte(0) + if v != nil { + vexV = byte(reg[v.Reg]|(regrex[v.Reg]&Rxr)<<1) & 0xF + } + vexV ^= 0xF + if vexM == 1 && (rexX|rexB) == 0 && vex&vexW1 == 0 { + // Can use 2-byte encoding. + ab.Put2(0xc5, byte(rexR<<5)^0x80|vexV<<3|vexWLP) + } else { + // Must use 3-byte encoding. + ab.Put3(0xc4, + (byte(rexR|rexX|rexB)<<5)^0xE0|vexM, + vexV<<3|vexWLP, + ) + } + ab.Put1(opcode) +} + +// regIndex returns register index that fits in 5 bits. +// +// R : 3 bit | legacy instructions | N/A +// [R/V]EX.R : 1 bit | REX / VEX extension bit | Rxr +// EVEX.R : 1 bit | EVEX extension bit | RxrEvex +// +// Examples: +// +// REG_Z30 => 30 +// REG_X15 => 15 +// REG_R9 => 9 +// REG_AX => 0 +func regIndex(r int16) int { + lower3bits := reg[r] + high4bit := regrex[r] & Rxr << 1 + high5bit := regrex[r] & RxrEvex << 0 + return lower3bits | high4bit | high5bit +} + +// avx2gatherValid reports whether p satisfies AVX2 gather constraints. +// Reports errors via ctxt. +func avx2gatherValid(ctxt *obj.Link, p *obj.Prog) bool { + // If any pair of the index, mask, or destination registers + // are the same, illegal instruction trap (#UD) is triggered. + index := regIndex(p.GetFrom3().Index) + mask := regIndex(p.From.Reg) + dest := regIndex(p.To.Reg) + if dest == mask || dest == index || mask == index { + ctxt.Diag("mask, index, and destination registers should be distinct: %v", p) + return false + } + + return true +} + +// avx512gatherValid reports whether p satisfies AVX512 gather constraints. +// Reports errors via ctxt. +func avx512gatherValid(ctxt *obj.Link, p *obj.Prog) bool { + // Illegal instruction trap (#UD) is triggered if the destination vector + // register is the same as index vector in VSIB. + index := regIndex(p.From.Index) + dest := regIndex(p.To.Reg) + if dest == index { + ctxt.Diag("index and destination registers should be distinct: %v", p) + return false + } + + return true +} + +func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { + o := opindex[p.As&obj.AMask] + + if o == nil { + ctxt.Diag("asmins: missing op %v", p) + return + } + + if pre := prefixof(ctxt, &p.From); pre != 0 { + ab.Put1(byte(pre)) + } + if pre := prefixof(ctxt, &p.To); pre != 0 { + ab.Put1(byte(pre)) + } + + // Checks to warn about instruction/arguments combinations that + // will unconditionally trigger illegal instruction trap (#UD). + switch p.As { + case AVGATHERDPD, + AVGATHERQPD, + AVGATHERDPS, + AVGATHERQPS, + AVPGATHERDD, + AVPGATHERQD, + AVPGATHERDQ, + AVPGATHERQQ: + if p.GetFrom3() == nil { + // gathers need a 3rd arg. See issue 58822. + ctxt.Diag("need a third arg for gather instruction: %v", p) + return + } + // AVX512 gather requires explicit K mask. + if p.GetFrom3().Reg >= REG_K0 && p.GetFrom3().Reg <= REG_K7 { + if !avx512gatherValid(ctxt, p) { + return + } + } else { + if !avx2gatherValid(ctxt, p) { + return + } + } + } + + if p.Ft == 0 { + p.Ft = uint8(oclass(ctxt, p, &p.From)) + } + if p.Tt == 0 { + p.Tt = uint8(oclass(ctxt, p, &p.To)) + } + + ft := int(p.Ft) * Ymax + var f3t int + tt := int(p.Tt) * Ymax + + xo := obj.Bool2int(o.op[0] == 0x0f) + z := 0 + var a *obj.Addr + var l int + var op int + var q *obj.Prog + var r *obj.Reloc + var rel obj.Reloc + var v int64 + + args := make([]int, 0, argListMax) + if ft != Ynone*Ymax { + args = append(args, ft) + } + for i := range p.RestArgs { + args = append(args, oclass(ctxt, p, &p.RestArgs[i].Addr)*Ymax) + } + if tt != Ynone*Ymax { + args = append(args, tt) + } + + for _, yt := range o.ytab { + // ytab matching is purely args-based, + // but AVX512 suffixes like "Z" or "RU_SAE" will + // add EVEX-only filter that will reject non-EVEX matches. + // + // Consider "VADDPD.BCST 2032(DX), X0, X0". + // Without this rule, operands will lead to VEX-encoded form + // and produce "c5b15813" encoding. + if !yt.match(args) { + // "xo" is always zero for VEX/EVEX encoded insts. + z += int(yt.zoffset) + xo + } else { + if p.Scond != 0 && !evexZcase(yt.zcase) { + // Do not signal error and continue to search + // for matching EVEX-encoded form. + z += int(yt.zoffset) + continue + } + + switch o.prefix { + case Px1: // first option valid only in 32-bit mode + if ctxt.Arch.Family == sys.AMD64 && z == 0 { + z += int(yt.zoffset) + xo + continue + } + case Pq: // 16 bit escape and opcode escape + ab.Put2(Pe, Pm) + + case Pq3: // 16 bit escape and opcode escape + REX.W + ab.rexflag |= Pw + ab.Put2(Pe, Pm) + + case Pq4: // 66 0F 38 + ab.Put3(0x66, 0x0F, 0x38) + + case Pq4w: // 66 0F 38 + REX.W + ab.rexflag |= Pw + ab.Put3(0x66, 0x0F, 0x38) + + case Pq5: // F3 0F 38 + ab.Put3(0xF3, 0x0F, 0x38) + + case Pq5w: // F3 0F 38 + REX.W + ab.rexflag |= Pw + ab.Put3(0xF3, 0x0F, 0x38) + + case Pf2, // xmm opcode escape + Pf3: + ab.Put2(o.prefix, Pm) + + case Pef3: + ab.Put3(Pe, Pf3, Pm) + + case Pfw: // xmm opcode escape + REX.W + ab.rexflag |= Pw + ab.Put2(Pf3, Pm) + + case Pm: // opcode escape + ab.Put1(Pm) + + case Pe: // 16 bit escape + ab.Put1(Pe) + + case Pw: // 64-bit escape + if ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal 64: %v", p) + } + ab.rexflag |= Pw + + case Pw8: // 64-bit escape if z >= 8 + if z >= 8 { + if ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal 64: %v", p) + } + ab.rexflag |= Pw + } + + case Pb: // botch + if ctxt.Arch.Family != sys.AMD64 && (isbadbyte(&p.From) || isbadbyte(&p.To)) { + goto bad + } + // NOTE(rsc): This is probably safe to do always, + // but when enabled it chooses different encodings + // than the old cmd/internal/obj/i386 code did, + // which breaks our "same bits out" checks. + // In particular, CMPB AX, $0 encodes as 80 f8 00 + // in the original obj/i386, and it would encode + // (using a valid, shorter form) as 3c 00 if we enabled + // the call to bytereg here. + if ctxt.Arch.Family == sys.AMD64 { + bytereg(&p.From, &p.Ft) + bytereg(&p.To, &p.Tt) + } + + case P32: // 32 bit but illegal if 64-bit mode + if ctxt.Arch.Family == sys.AMD64 { + ctxt.Diag("asmins: illegal in 64-bit mode: %v", p) + } + + case Py: // 64-bit only, no prefix + if ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal in %d-bit mode: %v", ctxt.Arch.RegSize*8, p) + } + + case Py1: // 64-bit only if z < 1, no prefix + if z < 1 && ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal in %d-bit mode: %v", ctxt.Arch.RegSize*8, p) + } + + case Py3: // 64-bit only if z < 3, no prefix + if z < 3 && ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal in %d-bit mode: %v", ctxt.Arch.RegSize*8, p) + } + } + + if z >= len(o.op) { + log.Fatalf("asmins bad table %v", p) + } + op = int(o.op[z]) + if op == 0x0f { + ab.Put1(byte(op)) + z++ + op = int(o.op[z]) + } + + switch yt.zcase { + default: + ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p) + return + + case Zpseudo: + break + + case Zlit: + ab.PutOpBytesLit(z, &o.op) + + case Zlitr_m: + ab.PutOpBytesLit(z, &o.op) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zlitm_r: + ab.PutOpBytesLit(z, &o.op) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zlit_m_r: + ab.PutOpBytesLit(z, &o.op) + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + + case Zmb_r: + bytereg(&p.From, &p.Ft) + fallthrough + + case Zm_r: + ab.Put1(byte(op)) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Z_m_r: + ab.Put1(byte(op)) + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + + case Zm2_r: + ab.Put2(byte(op), o.op[z+1]) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zm_r_xm: + ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zm_r_xm_nr: + ab.rexflag = 0 + ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zm_r_i_xm: + ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmand(ctxt, cursym, p, &p.From, p.GetFrom3()) + ab.Put1(byte(p.To.Offset)) + + case Zibm_r, Zibr_m: + ab.PutOpBytesLit(z, &o.op) + if yt.zcase == Zibr_m { + ab.asmand(ctxt, cursym, p, &p.To, p.GetFrom3()) + } else { + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + } + switch { + default: + ab.Put1(byte(p.From.Offset)) + case yt.args[0] == Yi32 && o.prefix == Pe: + ab.PutInt16(int16(p.From.Offset)) + case yt.args[0] == Yi32: + ab.PutInt32(int32(p.From.Offset)) + } + + case Zaut_r: + ab.Put1(0x8d) // leal + if p.From.Type != obj.TYPE_ADDR { + ctxt.Diag("asmins: Zaut sb type ADDR") + } + p.From.Type = obj.TYPE_MEM + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + p.From.Type = obj.TYPE_ADDR + + case Zm_o: + ab.Put1(byte(op)) + ab.asmando(ctxt, cursym, p, &p.From, int(o.op[z+1])) + + case Zr_m: + ab.Put1(byte(op)) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zvex: + ab.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) + + case Zvex_rm_v_r: + ab.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zvex_rm_v_ro: + ab.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) + ab.asmando(ctxt, cursym, p, &p.From, int(o.op[z+2])) + + case Zvex_i_rm_vo: + ab.asmvex(ctxt, p.GetFrom3(), &p.To, nil, o.op[z], o.op[z+1]) + ab.asmando(ctxt, cursym, p, p.GetFrom3(), int(o.op[z+2])) + ab.Put1(byte(p.From.Offset)) + + case Zvex_i_r_v: + ab.asmvex(ctxt, p.GetFrom3(), &p.To, nil, o.op[z], o.op[z+1]) + regnum := byte(0x7) + if p.GetFrom3().Reg >= REG_X0 && p.GetFrom3().Reg <= REG_X15 { + regnum &= byte(p.GetFrom3().Reg - REG_X0) + } else { + regnum &= byte(p.GetFrom3().Reg - REG_Y0) + } + ab.Put1(o.op[z+2] | regnum) + ab.Put1(byte(p.From.Offset)) + + case Zvex_i_rm_v_r: + imm, from, from3, to := unpackOps4(p) + ab.asmvex(ctxt, from, from3, to, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, from, to) + ab.Put1(byte(imm.Offset)) + + case Zvex_i_rm_r: + ab.asmvex(ctxt, p.GetFrom3(), nil, &p.To, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + ab.Put1(byte(p.From.Offset)) + + case Zvex_v_rm_r: + ab.asmvex(ctxt, p.GetFrom3(), &p.From, &p.To, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + + case Zvex_r_v_rm: + ab.asmvex(ctxt, &p.To, p.GetFrom3(), &p.From, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zvex_rm_r_vo: + ab.asmvex(ctxt, &p.From, &p.To, p.GetFrom3(), o.op[z], o.op[z+1]) + ab.asmando(ctxt, cursym, p, &p.From, int(o.op[z+2])) + + case Zvex_i_r_rm: + ab.asmvex(ctxt, &p.To, nil, p.GetFrom3(), o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, &p.To, p.GetFrom3()) + ab.Put1(byte(p.From.Offset)) + + case Zvex_hr_rm_v_r: + hr, from, from3, to := unpackOps4(p) + ab.asmvex(ctxt, from, from3, to, o.op[z], o.op[z+1]) + ab.asmand(ctxt, cursym, p, from, to) + ab.Put1(byte(regIndex(hr.Reg) << 4)) + + case Zevex_k_rmo: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.To, nil, nil, &p.From) + ab.asmando(ctxt, cursym, p, &p.To, int(o.op[z+3])) + + case Zevex_i_rm_vo: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, p.GetFrom3(), &p.To, nil, nil) + ab.asmando(ctxt, cursym, p, p.GetFrom3(), int(o.op[z+3])) + ab.Put1(byte(p.From.Offset)) + + case Zevex_i_rm_k_vo: + imm, from, kmask, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, from, to, nil, kmask) + ab.asmando(ctxt, cursym, p, from, int(o.op[z+3])) + ab.Put1(byte(imm.Offset)) + + case Zevex_i_r_rm: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.To, nil, p.GetFrom3(), nil) + ab.asmand(ctxt, cursym, p, &p.To, p.GetFrom3()) + ab.Put1(byte(p.From.Offset)) + + case Zevex_i_r_k_rm: + imm, from, kmask, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, to, nil, from, kmask) + ab.asmand(ctxt, cursym, p, to, from) + ab.Put1(byte(imm.Offset)) + + case Zevex_i_rm_r: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, p.GetFrom3(), nil, &p.To, nil) + ab.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) + ab.Put1(byte(p.From.Offset)) + + case Zevex_i_rm_k_r: + imm, from, kmask, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, from, nil, to, kmask) + ab.asmand(ctxt, cursym, p, from, to) + ab.Put1(byte(imm.Offset)) + + case Zevex_i_rm_v_r: + imm, from, from3, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, from, from3, to, nil) + ab.asmand(ctxt, cursym, p, from, to) + ab.Put1(byte(imm.Offset)) + + case Zevex_i_rm_v_k_r: + imm, from, from3, kmask, to := unpackOps5(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, from, from3, to, kmask) + ab.asmand(ctxt, cursym, p, from, to) + ab.Put1(byte(imm.Offset)) + + case Zevex_r_v_rm: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.To, p.GetFrom3(), &p.From, nil) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zevex_rm_v_r: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.From, p.GetFrom3(), &p.To, nil) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zevex_rm_k_r: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.From, nil, &p.To, p.GetFrom3()) + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case Zevex_r_k_rm: + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, &p.To, nil, &p.From, p.GetFrom3()) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zevex_rm_v_k_r: + from, from3, kmask, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, from, from3, to, kmask) + ab.asmand(ctxt, cursym, p, from, to) + + case Zevex_r_v_k_rm: + from, from3, kmask, to := unpackOps4(p) + ab.evex = newEVEXBits(z, &o.op) + ab.asmevex(ctxt, p, to, from3, from, kmask) + ab.asmand(ctxt, cursym, p, to, from) + + case Zr_m_xm: + ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zr_m_xm_nr: + ab.rexflag = 0 + ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmand(ctxt, cursym, p, &p.To, &p.From) + + case Zo_m: + ab.Put1(byte(op)) + ab.asmando(ctxt, cursym, p, &p.To, int(o.op[z+1])) + + case Zcallindreg: + r = obj.Addrel(cursym) + r.Off = int32(p.Pc) + r.Type = objabi.R_CALLIND + r.Siz = 0 + fallthrough + + case Zo_m64: + ab.Put1(byte(op)) + ab.asmandsz(ctxt, cursym, p, &p.To, int(o.op[z+1]), 0, 1) + + case Zm_ibo: + ab.Put1(byte(op)) + ab.asmando(ctxt, cursym, p, &p.From, int(o.op[z+1])) + ab.Put1(byte(vaddr(ctxt, p, &p.To, nil))) + + case Zibo_m: + ab.Put1(byte(op)) + ab.asmando(ctxt, cursym, p, &p.To, int(o.op[z+1])) + ab.Put1(byte(vaddr(ctxt, p, &p.From, nil))) + + case Zibo_m_xm: + z = ab.mediaop(ctxt, o, op, int(yt.zoffset), z) + ab.asmando(ctxt, cursym, p, &p.To, int(o.op[z+1])) + ab.Put1(byte(vaddr(ctxt, p, &p.From, nil))) + + case Z_ib, Zib_: + if yt.zcase == Zib_ { + a = &p.From + } else { + a = &p.To + } + ab.Put1(byte(op)) + if p.As == AXABORT { + ab.Put1(o.op[z+1]) + } + ab.Put1(byte(vaddr(ctxt, p, a, nil))) + + case Zib_rp: + ab.rexflag |= regrex[p.To.Reg] & (Rxb | 0x40) + ab.Put2(byte(op+reg[p.To.Reg]), byte(vaddr(ctxt, p, &p.From, nil))) + + case Zil_rp: + ab.rexflag |= regrex[p.To.Reg] & Rxb + ab.Put1(byte(op + reg[p.To.Reg])) + if o.prefix == Pe { + v = vaddr(ctxt, p, &p.From, nil) + ab.PutInt16(int16(v)) + } else { + ab.relput4(ctxt, cursym, p, &p.From) + } + + case Zo_iw: + ab.Put1(byte(op)) + if p.From.Type != obj.TYPE_NONE { + v = vaddr(ctxt, p, &p.From, nil) + ab.PutInt16(int16(v)) + } + + case Ziq_rp: + v = vaddr(ctxt, p, &p.From, &rel) + l = int(v >> 32) + if l == 0 && rel.Siz != 8 { + ab.rexflag &^= (0x40 | Rxw) + + ab.rexflag |= regrex[p.To.Reg] & Rxb + ab.Put1(byte(0xb8 + reg[p.To.Reg])) + if rel.Type != 0 { + r = obj.Addrel(cursym) + *r = rel + r.Off = int32(p.Pc + int64(ab.Len())) + } + + ab.PutInt32(int32(v)) + } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { // sign extend + ab.Put1(0xc7) + ab.asmando(ctxt, cursym, p, &p.To, 0) + + ab.PutInt32(int32(v)) // need all 8 + } else { + ab.rexflag |= regrex[p.To.Reg] & Rxb + ab.Put1(byte(op + reg[p.To.Reg])) + if rel.Type != 0 { + r = obj.Addrel(cursym) + *r = rel + r.Off = int32(p.Pc + int64(ab.Len())) + } + + ab.PutInt64(v) + } + + case Zib_rr: + ab.Put1(byte(op)) + ab.asmand(ctxt, cursym, p, &p.To, &p.To) + ab.Put1(byte(vaddr(ctxt, p, &p.From, nil))) + + case Z_il, Zil_: + if yt.zcase == Zil_ { + a = &p.From + } else { + a = &p.To + } + ab.Put1(byte(op)) + if o.prefix == Pe { + v = vaddr(ctxt, p, a, nil) + ab.PutInt16(int16(v)) + } else { + ab.relput4(ctxt, cursym, p, a) + } + + case Zm_ilo, Zilo_m: + ab.Put1(byte(op)) + if yt.zcase == Zilo_m { + a = &p.From + ab.asmando(ctxt, cursym, p, &p.To, int(o.op[z+1])) + } else { + a = &p.To + ab.asmando(ctxt, cursym, p, &p.From, int(o.op[z+1])) + } + + if o.prefix == Pe { + v = vaddr(ctxt, p, a, nil) + ab.PutInt16(int16(v)) + } else { + ab.relput4(ctxt, cursym, p, a) + } + + case Zil_rr: + ab.Put1(byte(op)) + ab.asmand(ctxt, cursym, p, &p.To, &p.To) + if o.prefix == Pe { + v = vaddr(ctxt, p, &p.From, nil) + ab.PutInt16(int16(v)) + } else { + ab.relput4(ctxt, cursym, p, &p.From) + } + + case Z_rp: + ab.rexflag |= regrex[p.To.Reg] & (Rxb | 0x40) + ab.Put1(byte(op + reg[p.To.Reg])) + + case Zrp_: + ab.rexflag |= regrex[p.From.Reg] & (Rxb | 0x40) + ab.Put1(byte(op + reg[p.From.Reg])) + + case Zcallcon, Zjmpcon: + if yt.zcase == Zcallcon { + ab.Put1(byte(op)) + } else { + ab.Put1(o.op[z+1]) + } + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Type = objabi.R_PCREL + r.Siz = 4 + r.Add = p.To.Offset + ab.PutInt32(0) + + case Zcallind: + ab.Put2(byte(op), o.op[z+1]) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + if ctxt.Arch.Family == sys.AMD64 { + r.Type = objabi.R_PCREL + } else { + r.Type = objabi.R_ADDR + } + r.Siz = 4 + r.Add = p.To.Offset + r.Sym = p.To.Sym + ab.PutInt32(0) + + case Zcall, Zcallduff: + if p.To.Sym == nil { + ctxt.Diag("call without target") + ctxt.DiagFlush() + log.Fatalf("bad code") + } + + if yt.zcase == Zcallduff && ctxt.Flag_dynlink { + ctxt.Diag("directly calling duff when dynamically linking Go") + } + + if yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { + // Maintain BP around call, since duffcopy/duffzero can't do it + // (the call jumps into the middle of the function). + // This makes it possible to see call sites for duffcopy/duffzero in + // BP-based profiling tools like Linux perf (which is the + // whole point of maintaining frame pointers in Go). + // MOVQ BP, -16(SP) + // LEAQ -16(SP), BP + ab.Put(bpduff1) + } + ab.Put1(byte(op)) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Sym = p.To.Sym + r.Add = p.To.Offset + r.Type = objabi.R_CALL + r.Siz = 4 + ab.PutInt32(0) + + if yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { + // Pop BP pushed above. + // MOVQ 0(BP), BP + ab.Put(bpduff2) + } + + // TODO: jump across functions needs reloc + case Zbr, Zjmp, Zloop: + if p.As == AXBEGIN { + ab.Put1(byte(op)) + } + if p.To.Sym != nil { + if yt.zcase != Zjmp { + ctxt.Diag("branch to ATEXT") + ctxt.DiagFlush() + log.Fatalf("bad code") + } + + ab.Put1(o.op[z+1]) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Sym = p.To.Sym + // Note: R_CALL instead of R_PCREL. R_CALL is more permissive in that + // it can point to a trampoline instead of the destination itself. + r.Type = objabi.R_CALL + r.Siz = 4 + ab.PutInt32(0) + break + } + + // Assumes q is in this function. + // TODO: Check in input, preserve in brchain. + + // Fill in backward jump now. + q = p.To.Target() + + if q == nil { + ctxt.Diag("jmp/branch/loop without target") + ctxt.DiagFlush() + log.Fatalf("bad code") + } + + if p.Back&branchBackwards != 0 { + v = q.Pc - (p.Pc + 2) + if v >= -128 && p.As != AXBEGIN { + if p.As == AJCXZL { + ab.Put1(0x67) + } + ab.Put2(byte(op), byte(v)) + } else if yt.zcase == Zloop { + ctxt.Diag("loop too far: %v", p) + } else { + v -= 5 - 2 + if p.As == AXBEGIN { + v-- + } + if yt.zcase == Zbr { + ab.Put1(0x0f) + v-- + } + + ab.Put1(o.op[z+1]) + ab.PutInt32(int32(v)) + } + + break + } + + // Annotate target; will fill in later. + p.Forwd = q.Rel + + q.Rel = p + if p.Back&branchShort != 0 && p.As != AXBEGIN { + if p.As == AJCXZL { + ab.Put1(0x67) + } + ab.Put2(byte(op), 0) + } else if yt.zcase == Zloop { + ctxt.Diag("loop too far: %v", p) + } else { + if yt.zcase == Zbr { + ab.Put1(0x0f) + } + ab.Put1(o.op[z+1]) + ab.PutInt32(0) + } + + case Zbyte: + v = vaddr(ctxt, p, &p.From, &rel) + if rel.Siz != 0 { + rel.Siz = uint8(op) + r = obj.Addrel(cursym) + *r = rel + r.Off = int32(p.Pc + int64(ab.Len())) + } + + ab.Put1(byte(v)) + if op > 1 { + ab.Put1(byte(v >> 8)) + if op > 2 { + ab.PutInt16(int16(v >> 16)) + if op > 4 { + ab.PutInt32(int32(v >> 32)) + } + } + } + } + + return + } + } + f3t = Ynone * Ymax + if p.GetFrom3() != nil { + f3t = oclass(ctxt, p, p.GetFrom3()) * Ymax + } + for mo := ymovtab; mo[0].as != 0; mo = mo[1:] { + var pp obj.Prog + var t []byte + if p.As == mo[0].as { + if ycover[ft+int(mo[0].ft)] != 0 && ycover[f3t+int(mo[0].f3t)] != 0 && ycover[tt+int(mo[0].tt)] != 0 { + t = mo[0].op[:] + switch mo[0].code { + default: + ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p) + + case movLit: + for z = 0; t[z] != 0; z++ { + ab.Put1(t[z]) + } + + case movRegMem: + ab.Put1(t[0]) + ab.asmando(ctxt, cursym, p, &p.To, int(t[1])) + + case movMemReg: + ab.Put1(t[0]) + ab.asmando(ctxt, cursym, p, &p.From, int(t[1])) + + case movRegMem2op: // r,m - 2op + ab.Put2(t[0], t[1]) + ab.asmando(ctxt, cursym, p, &p.To, int(t[2])) + ab.rexflag |= regrex[p.From.Reg] & (Rxr | 0x40) + + case movMemReg2op: + ab.Put2(t[0], t[1]) + ab.asmando(ctxt, cursym, p, &p.From, int(t[2])) + ab.rexflag |= regrex[p.To.Reg] & (Rxr | 0x40) + + case movFullPtr: + if t[0] != 0 { + ab.Put1(t[0]) + } + switch p.To.Index { + default: + goto bad + + case REG_DS: + ab.Put1(0xc5) + + case REG_SS: + ab.Put2(0x0f, 0xb2) + + case REG_ES: + ab.Put1(0xc4) + + case REG_FS: + ab.Put2(0x0f, 0xb4) + + case REG_GS: + ab.Put2(0x0f, 0xb5) + } + + ab.asmand(ctxt, cursym, p, &p.From, &p.To) + + case movDoubleShift: + if t[0] == Pw { + if ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal 64: %v", p) + } + ab.rexflag |= Pw + t = t[1:] + } else if t[0] == Pe { + ab.Put1(Pe) + t = t[1:] + } + + switch p.From.Type { + default: + goto bad + + case obj.TYPE_CONST: + ab.Put2(0x0f, t[0]) + ab.asmandsz(ctxt, cursym, p, &p.To, reg[p.GetFrom3().Reg], regrex[p.GetFrom3().Reg], 0) + ab.Put1(byte(p.From.Offset)) + + case obj.TYPE_REG: + switch p.From.Reg { + default: + goto bad + + case REG_CL, REG_CX: + ab.Put2(0x0f, t[1]) + ab.asmandsz(ctxt, cursym, p, &p.To, reg[p.GetFrom3().Reg], regrex[p.GetFrom3().Reg], 0) + } + } + + // NOTE: The systems listed here are the ones that use the "TLS initial exec" model, + // where you load the TLS base register into a register and then index off that + // register to access the actual TLS variables. Systems that allow direct TLS access + // are handled in prefixof above and should not be listed here. + case movTLSReg: + if ctxt.Arch.Family == sys.AMD64 && p.As != AMOVQ || ctxt.Arch.Family == sys.I386 && p.As != AMOVL { + ctxt.Diag("invalid load of TLS: %v", p) + } + + if ctxt.Arch.Family == sys.I386 { + // NOTE: The systems listed here are the ones that use the "TLS initial exec" model, + // where you load the TLS base register into a register and then index off that + // register to access the actual TLS variables. Systems that allow direct TLS access + // are handled in prefixof above and should not be listed here. + switch ctxt.Headtype { + default: + log.Fatalf("unknown TLS base location for %v", ctxt.Headtype) + + case objabi.Hlinux, objabi.Hfreebsd: + if ctxt.Flag_shared { + // Note that this is not generating the same insns as the other cases. + // MOV TLS, dst + // becomes + // call __x86.get_pc_thunk.dst + // movl (gotpc + g@gotntpoff)(dst), dst + // which is encoded as + // call __x86.get_pc_thunk.dst + // movq 0(dst), dst + // and R_CALL & R_TLS_IE relocs. This all assumes the only tls variable we access + // is g, which we can't check here, but will when we assemble the second + // instruction. + dst := p.To.Reg + ab.Put1(0xe8) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Type = objabi.R_CALL + r.Siz = 4 + r.Sym = ctxt.Lookup("__x86.get_pc_thunk." + strings.ToLower(rconv(int(dst)))) + ab.PutInt32(0) + + ab.Put2(0x8B, byte(2<<6|reg[dst]|(reg[dst]<<3))) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Type = objabi.R_TLS_IE + r.Siz = 4 + r.Add = 2 + ab.PutInt32(0) + } else { + // ELF TLS base is 0(GS). + pp.From = p.From + + pp.From.Type = obj.TYPE_MEM + pp.From.Reg = REG_GS + pp.From.Offset = 0 + pp.From.Index = REG_NONE + pp.From.Scale = 0 + ab.Put2(0x65, // GS + 0x8B) + ab.asmand(ctxt, cursym, p, &pp.From, &p.To) + } + case objabi.Hplan9: + pp.From = obj.Addr{} + pp.From.Type = obj.TYPE_MEM + pp.From.Name = obj.NAME_EXTERN + pp.From.Sym = plan9privates + pp.From.Offset = 0 + pp.From.Index = REG_NONE + ab.Put1(0x8B) + ab.asmand(ctxt, cursym, p, &pp.From, &p.To) + } + break + } + + switch ctxt.Headtype { + default: + log.Fatalf("unknown TLS base location for %v", ctxt.Headtype) + + case objabi.Hlinux, objabi.Hfreebsd: + if !ctxt.Flag_shared { + log.Fatalf("unknown TLS base location for linux/freebsd without -shared") + } + // Note that this is not generating the same insn as the other cases. + // MOV TLS, R_to + // becomes + // movq g@gottpoff(%rip), R_to + // which is encoded as + // movq 0(%rip), R_to + // and a R_TLS_IE reloc. This all assumes the only tls variable we access + // is g, which we can't check here, but will when we assemble the second + // instruction. + ab.rexflag = Pw | (regrex[p.To.Reg] & Rxr) + + ab.Put2(0x8B, byte(0x05|(reg[p.To.Reg]<<3))) + r = obj.Addrel(cursym) + r.Off = int32(p.Pc + int64(ab.Len())) + r.Type = objabi.R_TLS_IE + r.Siz = 4 + r.Add = -4 + ab.PutInt32(0) + + case objabi.Hplan9: + pp.From = obj.Addr{} + pp.From.Type = obj.TYPE_MEM + pp.From.Name = obj.NAME_EXTERN + pp.From.Sym = plan9privates + pp.From.Offset = 0 + pp.From.Index = REG_NONE + ab.rexflag |= Pw + ab.Put1(0x8B) + ab.asmand(ctxt, cursym, p, &pp.From, &p.To) + + case objabi.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c. + // TLS base is 0(FS). + pp.From = p.From + + pp.From.Type = obj.TYPE_MEM + pp.From.Name = obj.NAME_NONE + pp.From.Reg = REG_NONE + pp.From.Offset = 0 + pp.From.Index = REG_NONE + pp.From.Scale = 0 + ab.rexflag |= Pw + ab.Put2(0x64, // FS + 0x8B) + ab.asmand(ctxt, cursym, p, &pp.From, &p.To) + } + } + return + } + } + } + goto bad + +bad: + if ctxt.Arch.Family != sys.AMD64 { + // here, the assembly has failed. + // if it's a byte instruction that has + // unaddressable registers, try to + // exchange registers and reissue the + // instruction with the operands renamed. + pp := *p + + unbytereg(&pp.From, &pp.Ft) + unbytereg(&pp.To, &pp.Tt) + + z := int(p.From.Reg) + if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI { + // TODO(rsc): Use this code for x86-64 too. It has bug fixes not present in the amd64 code base. + // For now, different to keep bit-for-bit compatibility. + if ctxt.Arch.Family == sys.I386 { + breg := byteswapreg(ctxt, &p.To) + if breg != REG_AX { + ab.Put1(0x87) // xchg lhs,bx + ab.asmando(ctxt, cursym, p, &p.From, reg[breg]) + subreg(&pp, z, breg) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(0x87) // xchg lhs,bx + ab.asmando(ctxt, cursym, p, &p.From, reg[breg]) + } else { + ab.Put1(byte(0x90 + reg[z])) // xchg lsh,ax + subreg(&pp, z, REG_AX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(byte(0x90 + reg[z])) // xchg lsh,ax + } + return + } + + if isax(&p.To) || p.To.Type == obj.TYPE_NONE { + // We certainly don't want to exchange + // with AX if the op is MUL or DIV. + ab.Put1(0x87) // xchg lhs,bx + ab.asmando(ctxt, cursym, p, &p.From, reg[REG_BX]) + subreg(&pp, z, REG_BX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(0x87) // xchg lhs,bx + ab.asmando(ctxt, cursym, p, &p.From, reg[REG_BX]) + } else { + ab.Put1(byte(0x90 + reg[z])) // xchg lsh,ax + subreg(&pp, z, REG_AX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(byte(0x90 + reg[z])) // xchg lsh,ax + } + return + } + + z = int(p.To.Reg) + if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI { + // TODO(rsc): Use this code for x86-64 too. It has bug fixes not present in the amd64 code base. + // For now, different to keep bit-for-bit compatibility. + if ctxt.Arch.Family == sys.I386 { + breg := byteswapreg(ctxt, &p.From) + if breg != REG_AX { + ab.Put1(0x87) //xchg rhs,bx + ab.asmando(ctxt, cursym, p, &p.To, reg[breg]) + subreg(&pp, z, breg) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(0x87) // xchg rhs,bx + ab.asmando(ctxt, cursym, p, &p.To, reg[breg]) + } else { + ab.Put1(byte(0x90 + reg[z])) // xchg rsh,ax + subreg(&pp, z, REG_AX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(byte(0x90 + reg[z])) // xchg rsh,ax + } + return + } + + if isax(&p.From) { + ab.Put1(0x87) // xchg rhs,bx + ab.asmando(ctxt, cursym, p, &p.To, reg[REG_BX]) + subreg(&pp, z, REG_BX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(0x87) // xchg rhs,bx + ab.asmando(ctxt, cursym, p, &p.To, reg[REG_BX]) + } else { + ab.Put1(byte(0x90 + reg[z])) // xchg rsh,ax + subreg(&pp, z, REG_AX) + ab.doasm(ctxt, cursym, &pp) + ab.Put1(byte(0x90 + reg[z])) // xchg rsh,ax + } + return + } + } + + ctxt.Diag("%s: invalid instruction: %v", cursym.Name, p) +} + +// byteswapreg returns a byte-addressable register (AX, BX, CX, DX) +// which is not referenced in a. +// If a is empty, it returns BX to account for MULB-like instructions +// that might use DX and AX. +func byteswapreg(ctxt *obj.Link, a *obj.Addr) int { + cana, canb, canc, cand := true, true, true, true + if a.Type == obj.TYPE_NONE { + cana, cand = false, false + } + + if a.Type == obj.TYPE_REG || ((a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Name == obj.NAME_NONE) { + switch a.Reg { + case REG_NONE: + cana, cand = false, false + case REG_AX, REG_AL, REG_AH: + cana = false + case REG_BX, REG_BL, REG_BH: + canb = false + case REG_CX, REG_CL, REG_CH: + canc = false + case REG_DX, REG_DL, REG_DH: + cand = false + } + } + + if a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR { + switch a.Index { + case REG_AX: + cana = false + case REG_BX: + canb = false + case REG_CX: + canc = false + case REG_DX: + cand = false + } + } + + switch { + case cana: + return REG_AX + case canb: + return REG_BX + case canc: + return REG_CX + case cand: + return REG_DX + default: + ctxt.Diag("impossible byte register") + ctxt.DiagFlush() + log.Fatalf("bad code") + return 0 + } +} + +func isbadbyte(a *obj.Addr) bool { + return a.Type == obj.TYPE_REG && (REG_BP <= a.Reg && a.Reg <= REG_DI || REG_BPB <= a.Reg && a.Reg <= REG_DIB) +} + +func (ab *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { + ab.Reset() + + ab.rexflag = 0 + ab.vexflag = false + ab.evexflag = false + mark := ab.Len() + ab.doasm(ctxt, cursym, p) + if ab.rexflag != 0 && !ab.vexflag && !ab.evexflag { + // as befits the whole approach of the architecture, + // the rex prefix must appear before the first opcode byte + // (and thus after any 66/67/f2/f3/26/2e/3e prefix bytes, but + // before the 0f opcode escape!), or it might be ignored. + // note that the handbook often misleadingly shows 66/f2/f3 in `opcode'. + if ctxt.Arch.Family != sys.AMD64 { + ctxt.Diag("asmins: illegal in mode %d: %v (%d %d)", ctxt.Arch.RegSize*8, p, p.Ft, p.Tt) + } + n := ab.Len() + var np int + for np = mark; np < n; np++ { + c := ab.At(np) + if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 { + break + } + } + ab.Insert(np, byte(0x40|ab.rexflag)) + } + + n := ab.Len() + for i := len(cursym.R) - 1; i >= 0; i-- { + r := &cursym.R[i] + if int64(r.Off) < p.Pc { + break + } + if ab.rexflag != 0 && !ab.vexflag && !ab.evexflag { + r.Off++ + } + if r.Type == objabi.R_PCREL { + if ctxt.Arch.Family == sys.AMD64 || p.As == obj.AJMP || p.As == obj.ACALL { + // PC-relative addressing is relative to the end of the instruction, + // but the relocations applied by the linker are relative to the end + // of the relocation. Because immediate instruction + // arguments can follow the PC-relative memory reference in the + // instruction encoding, the two may not coincide. In this case, + // adjust addend so that linker can keep relocating relative to the + // end of the relocation. + r.Add -= p.Pc + int64(n) - (int64(r.Off) + int64(r.Siz)) + } else if ctxt.Arch.Family == sys.I386 { + // On 386 PC-relative addressing (for non-call/jmp instructions) + // assumes that the previous instruction loaded the PC of the end + // of that instruction into CX, so the adjustment is relative to + // that. + r.Add += int64(r.Off) - p.Pc + int64(r.Siz) + } + } + if r.Type == objabi.R_GOTPCREL && ctxt.Arch.Family == sys.I386 { + // On 386, R_GOTPCREL makes the same assumptions as R_PCREL. + r.Add += int64(r.Off) - p.Pc + int64(r.Siz) + } + + } +} + +// unpackOps4 extracts 4 operands from p. +func unpackOps4(p *obj.Prog) (arg0, arg1, arg2, dst *obj.Addr) { + return &p.From, &p.RestArgs[0].Addr, &p.RestArgs[1].Addr, &p.To +} + +// unpackOps5 extracts 5 operands from p. +func unpackOps5(p *obj.Prog) (arg0, arg1, arg2, arg3, dst *obj.Addr) { + return &p.From, &p.RestArgs[0].Addr, &p.RestArgs[1].Addr, &p.RestArgs[2].Addr, &p.To +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm_test.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..458a91258a35b002b946da61d9709286e735a549 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/asm_test.go @@ -0,0 +1,342 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "testing" +) + +type oclassTest struct { + arg *obj.Addr + want int // Expected oclass return value for a given arg +} + +// Filled inside init, because it's easier to do with helper functions. +var ( + oclassTestsAMD64 []*oclassTest + oclassTests386 []*oclassTest +) + +func init() { + // Required for tests that access any of + // opindex/ycover/reg/regrex global tables. + var ctxt obj.Link + instinit(&ctxt) + + regAddr := func(reg int16) *obj.Addr { + return &obj.Addr{Type: obj.TYPE_REG, Reg: reg} + } + immAddr := func(v int64) *obj.Addr { + return &obj.Addr{Type: obj.TYPE_CONST, Offset: v} + } + regListAddr := func(regFrom, regTo int16) *obj.Addr { + return &obj.Addr{Type: obj.TYPE_REGLIST, Offset: EncodeRegisterRange(regFrom, regTo)} + } + memAddr := func(base, index int16) *obj.Addr { + return &obj.Addr{Type: obj.TYPE_MEM, Reg: base, Index: index} + } + + // TODO(quasilyte): oclass doesn't return Yxxx for X/Y regs with + // ID higher than 7. We don't encode such instructions, but this + // behavior seems inconsistent. It should probably either + // never check for arch or do it in all cases. + + oclassTestsCommon := []*oclassTest{ + {&obj.Addr{Type: obj.TYPE_NONE}, Ynone}, + {&obj.Addr{Type: obj.TYPE_BRANCH}, Ybr}, + {&obj.Addr{Type: obj.TYPE_TEXTSIZE}, Ytextsize}, + + {&obj.Addr{Type: obj.TYPE_INDIR, Name: obj.NAME_EXTERN}, Yindir}, + {&obj.Addr{Type: obj.TYPE_INDIR, Name: obj.NAME_GOTREF}, Yindir}, + + {&obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_AUTO}, Yiauto}, + {&obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_PARAM}, Yiauto}, + {&obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN}, Yiauto}, + {&obj.Addr{Type: obj.TYPE_ADDR, Sym: &obj.LSym{Name: "runtime.duff"}}, Yi32}, + {&obj.Addr{Type: obj.TYPE_ADDR, Offset: 4}, Yu7}, + {&obj.Addr{Type: obj.TYPE_ADDR, Offset: 255}, Yu8}, + + {immAddr(0), Yi0}, + {immAddr(1), Yi1}, + {immAddr(2), Yu2}, + {immAddr(3), Yu2}, + {immAddr(4), Yu7}, + {immAddr(86), Yu7}, + {immAddr(127), Yu7}, + {immAddr(128), Yu8}, + {immAddr(200), Yu8}, + {immAddr(255), Yu8}, + {immAddr(-1), Yi8}, + {immAddr(-100), Yi8}, + {immAddr(-128), Yi8}, + + {regAddr(REG_AL), Yal}, + {regAddr(REG_AX), Yax}, + {regAddr(REG_DL), Yrb}, + {regAddr(REG_DH), Yrb}, + {regAddr(REG_BH), Yrb}, + {regAddr(REG_CL), Ycl}, + {regAddr(REG_CX), Ycx}, + {regAddr(REG_DX), Yrx}, + {regAddr(REG_BX), Yrx}, + {regAddr(REG_F0), Yf0}, + {regAddr(REG_F3), Yrf}, + {regAddr(REG_F7), Yrf}, + {regAddr(REG_M0), Ymr}, + {regAddr(REG_M3), Ymr}, + {regAddr(REG_M7), Ymr}, + {regAddr(REG_X0), Yxr0}, + {regAddr(REG_X6), Yxr}, + {regAddr(REG_X13), Yxr}, + {regAddr(REG_X20), YxrEvex}, + {regAddr(REG_X31), YxrEvex}, + {regAddr(REG_Y0), Yyr}, + {regAddr(REG_Y6), Yyr}, + {regAddr(REG_Y13), Yyr}, + {regAddr(REG_Y20), YyrEvex}, + {regAddr(REG_Y31), YyrEvex}, + {regAddr(REG_Z0), Yzr}, + {regAddr(REG_Z6), Yzr}, + {regAddr(REG_K0), Yk0}, + {regAddr(REG_K5), Yknot0}, + {regAddr(REG_K7), Yknot0}, + {regAddr(REG_CS), Ycs}, + {regAddr(REG_SS), Yss}, + {regAddr(REG_DS), Yds}, + {regAddr(REG_ES), Yes}, + {regAddr(REG_FS), Yfs}, + {regAddr(REG_GS), Ygs}, + {regAddr(REG_TLS), Ytls}, + {regAddr(REG_GDTR), Ygdtr}, + {regAddr(REG_IDTR), Yidtr}, + {regAddr(REG_LDTR), Yldtr}, + {regAddr(REG_MSW), Ymsw}, + {regAddr(REG_TASK), Ytask}, + {regAddr(REG_CR0), Ycr0}, + {regAddr(REG_CR5), Ycr5}, + {regAddr(REG_CR8), Ycr8}, + {regAddr(REG_DR0), Ydr0}, + {regAddr(REG_DR5), Ydr5}, + {regAddr(REG_DR7), Ydr7}, + {regAddr(REG_TR0), Ytr0}, + {regAddr(REG_TR5), Ytr5}, + {regAddr(REG_TR7), Ytr7}, + + {regListAddr(REG_X0, REG_X3), YxrEvexMulti4}, + {regListAddr(REG_X4, REG_X7), YxrEvexMulti4}, + {regListAddr(REG_Y0, REG_Y3), YyrEvexMulti4}, + {regListAddr(REG_Y4, REG_Y7), YyrEvexMulti4}, + {regListAddr(REG_Z0, REG_Z3), YzrMulti4}, + {regListAddr(REG_Z4, REG_Z7), YzrMulti4}, + + {memAddr(REG_AL, REG_NONE), Ym}, + {memAddr(REG_AL, REG_SI), Ym}, + {memAddr(REG_SI, REG_CX), Ym}, + {memAddr(REG_DI, REG_X0), Yxvm}, + {memAddr(REG_DI, REG_X7), Yxvm}, + {memAddr(REG_DI, REG_Y0), Yyvm}, + {memAddr(REG_DI, REG_Y7), Yyvm}, + {memAddr(REG_DI, REG_Z0), Yzvm}, + {memAddr(REG_DI, REG_Z7), Yzvm}, + } + + oclassTestsAMD64 = []*oclassTest{ + {immAddr(-200), Ys32}, + {immAddr(500), Ys32}, + {immAddr(0x7FFFFFFF), Ys32}, + {immAddr(0x7FFFFFFF + 1), Yi32}, + {immAddr(0xFFFFFFFF), Yi32}, + {immAddr(0xFFFFFFFF + 1), Yi64}, + + {regAddr(REG_BPB), Yrb}, + {regAddr(REG_SIB), Yrb}, + {regAddr(REG_DIB), Yrb}, + {regAddr(REG_R8B), Yrb}, + {regAddr(REG_R12B), Yrb}, + {regAddr(REG_R8), Yrl}, + {regAddr(REG_R13), Yrl}, + {regAddr(REG_R15), Yrl}, + {regAddr(REG_SP), Yrl}, + {regAddr(REG_SI), Yrl}, + {regAddr(REG_DI), Yrl}, + {regAddr(REG_Z13), Yzr}, + {regAddr(REG_Z20), Yzr}, + {regAddr(REG_Z31), Yzr}, + + {regListAddr(REG_X10, REG_X13), YxrEvexMulti4}, + {regListAddr(REG_X24, REG_X27), YxrEvexMulti4}, + {regListAddr(REG_Y10, REG_Y13), YyrEvexMulti4}, + {regListAddr(REG_Y24, REG_Y27), YyrEvexMulti4}, + {regListAddr(REG_Z10, REG_Z13), YzrMulti4}, + {regListAddr(REG_Z24, REG_Z27), YzrMulti4}, + + {memAddr(REG_DI, REG_X20), YxvmEvex}, + {memAddr(REG_DI, REG_X27), YxvmEvex}, + {memAddr(REG_DI, REG_Y20), YyvmEvex}, + {memAddr(REG_DI, REG_Y27), YyvmEvex}, + {memAddr(REG_DI, REG_Z20), Yzvm}, + {memAddr(REG_DI, REG_Z27), Yzvm}, + } + + oclassTests386 = []*oclassTest{ + {&obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: &obj.LSym{}}, Yi32}, + + {immAddr(-200), Yi32}, + + {regAddr(REG_SP), Yrl32}, + {regAddr(REG_SI), Yrl32}, + {regAddr(REG_DI), Yrl32}, + } + + // Add tests that are arch-independent for all sets. + oclassTestsAMD64 = append(oclassTestsAMD64, oclassTestsCommon...) + oclassTests386 = append(oclassTests386, oclassTestsCommon...) +} + +func TestOclass(t *testing.T) { + runTest := func(t *testing.T, ctxt *obj.Link, tests []*oclassTest) { + var p obj.Prog + for _, test := range tests { + have := oclass(ctxt, &p, test.arg) + if have != test.want { + t.Errorf("oclass(%q):\nhave: %d\nwant: %d", + obj.Dconv(&p, test.arg), have, test.want) + } + } + } + + // TODO(quasilyte): test edge cases for Hsolaris, etc? + + t.Run("linux/AMD64", func(t *testing.T) { + ctxtAMD64 := obj.Linknew(&Linkamd64) + ctxtAMD64.Headtype = objabi.Hlinux // See #32028 + runTest(t, ctxtAMD64, oclassTestsAMD64) + }) + + t.Run("linux/386", func(t *testing.T) { + ctxt386 := obj.Linknew(&Link386) + ctxt386.Headtype = objabi.Hlinux // See #32028 + runTest(t, ctxt386, oclassTests386) + }) +} + +func TestRegisterListEncDec(t *testing.T) { + tests := []struct { + printed string + reg0 int16 + reg1 int16 + }{ + {"[R10-R13]", REG_R10, REG_R13}, + {"[X0-AX]", REG_X0, REG_AX}, + + {"[X0-X3]", REG_X0, REG_X3}, + {"[X21-X24]", REG_X21, REG_X24}, + + {"[Y0-Y3]", REG_Y0, REG_Y3}, + {"[Y21-Y24]", REG_Y21, REG_Y24}, + + {"[Z0-Z3]", REG_Z0, REG_Z3}, + {"[Z21-Z24]", REG_Z21, REG_Z24}, + } + + for _, test := range tests { + enc := EncodeRegisterRange(test.reg0, test.reg1) + reg0, reg1 := decodeRegisterRange(enc) + + if int16(reg0) != test.reg0 { + t.Errorf("%s reg0 mismatch: have %d, want %d", + test.printed, reg0, test.reg0) + } + if int16(reg1) != test.reg1 { + t.Errorf("%s reg1 mismatch: have %d, want %d", + test.printed, reg1, test.reg1) + } + wantPrinted := test.printed + if rlconv(enc) != wantPrinted { + t.Errorf("%s string mismatch: have %s, want %s", + test.printed, rlconv(enc), wantPrinted) + } + } +} + +func TestRegIndex(t *testing.T) { + tests := []struct { + regFrom int + regTo int + }{ + {REG_AL, REG_R15B}, + {REG_AX, REG_R15}, + {REG_M0, REG_M7}, + {REG_K0, REG_K7}, + {REG_X0, REG_X31}, + {REG_Y0, REG_Y31}, + {REG_Z0, REG_Z31}, + } + + for _, test := range tests { + for index, reg := 0, test.regFrom; reg <= test.regTo; index, reg = index+1, reg+1 { + have := regIndex(int16(reg)) + want := index + if have != want { + regName := rconv(int(reg)) + t.Errorf("regIndex(%s):\nhave: %d\nwant: %d", + regName, have, want) + } + } + } +} + +// TestPCALIGN verifies the correctness of the PCALIGN by checking if the +// code can be aligned to the alignment value. +func TestPCALIGN(t *testing.T) { + testenv.MustHaveGoBuild(t) + dir := t.TempDir() + tmpfile := filepath.Join(dir, "test.s") + tmpout := filepath.Join(dir, "test.o") + + var testCases = []struct { + name string + code string + out string + }{ + { + name: "8-byte alignment", + code: "TEXT ·foo(SB),$0-0\nMOVQ $0, AX\nPCALIGN $8\nMOVQ $1, BX\nRET\n", + out: `0x0008\s00008\s\(.*\)\tMOVQ\t\$1,\sBX`, + }, + { + name: "16-byte alignment", + code: "TEXT ·foo(SB),$0-0\nMOVQ $0, AX\nPCALIGN $16\nMOVQ $2, CX\nRET\n", + out: `0x0010\s00016\s\(.*\)\tMOVQ\t\$2,\sCX`, + }, + } + + for _, test := range testCases { + if err := os.WriteFile(tmpfile, []byte(test.code), 0644); err != nil { + t.Fatal(err) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-S", "-o", tmpout, tmpfile) + cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux") + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("The %s build failed: %v, output: %s", test.name, err, out) + continue + } + + matched, err := regexp.MatchString(test.out, string(out)) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Errorf("The %s testing failed!\ninput: %s\noutput: %s\n", test.name, test.code, out) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/avx_optabs.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/avx_optabs.go new file mode 100644 index 0000000000000000000000000000000000000000..b8ff4699d1548cc544ea32e4f878a2525fecbfb6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/avx_optabs.go @@ -0,0 +1,4628 @@ +// Code generated by x86avxgen. DO NOT EDIT. + +package x86 + +// VEX instructions that come in two forms: +// VTHING xmm2/m128, xmmV, xmm1 +// VTHING ymm2/m256, ymmV, ymm1 +// +// The opcode array in the corresponding Optab entry +// should contain the (VEX prefixes, opcode byte) pair +// for each of the two forms. +// For example, the entries for VPXOR are: +// +// VPXOR xmm2/m128, xmmV, xmm1 +// VEX.NDS.128.66.0F.WIG EF /r +// +// VPXOR ymm2/m256, ymmV, ymm1 +// VEX.NDS.256.66.0F.WIG EF /r +// +// Produce this optab entry: +// +// {AVPXOR, yvex_xy3, Pavx, opBytes{vex128|vex66|vex0F|vexWIG, 0xEF, vex256|vex66|vex0F|vexWIG, 0xEF}} +// +// VEX requires at least 2 bytes inside opBytes: +// - VEX prefixes (vex-prefixed constants) +// - Opcode byte +// +// EVEX instructions extend VEX form variety: +// VTHING zmm2/m512, zmmV, zmm1 -- implicit K0 (merging) +// VTHING zmm2/m512, zmmV, K, zmm1 -- explicit K mask (can't use K0) +// +// EVEX requires at least 3 bytes inside opBytes: +// - EVEX prefixes (evex-prefixed constants); similar to VEX +// - Displacement multiplier info (scale / broadcast scale) +// - Opcode byte; similar to VEX +// +// Both VEX and EVEX instructions may have opdigit (opcode extension) byte +// which follows the primary opcode byte. +// Because it can only have value of 0-7, it is written in octal notation. +// +// x86.csv can be very useful for figuring out proper [E]VEX parts. + +var _yandnl = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yrl, Yrl}}, +} + +var _ybextrl = []ytab{ + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yrl, Yml, Yrl}}, +} + +var _yblsil = []ytab{ + {zcase: Zvex_rm_r_vo, zoffset: 3, args: argList{Yml, Yrl}}, +} + +var _ykaddb = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yk, Yk}}, +} + +var _ykmovb = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yk, Ym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yrl}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ykm, Yk}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yrl, Yk}}, +} + +var _yknotb = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yk}}, +} + +var _ykshiftlb = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yu8, Yk, Yk}}, +} + +var _yrorxl = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yml, Yrl}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yml, Yrl}}, +} + +var _yv4fmaddps = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YzrMulti4, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Ym, YzrMulti4, Yknot0, Yzr}}, +} + +var _yv4fmaddss = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YxrEvexMulti4, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Ym, YxrEvexMulti4, Yknot0, YxrEvex}}, +} + +var _yvaddpd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, +} + +var _yvaddsd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, +} + +var _yvaddsubpd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, +} + +var _yvaesdec = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yzm, Yzr, Yzr}}, +} + +var _yvaesimc = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, +} + +var _yvaeskeygenassist = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, +} + +var _yvalignd = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvandnpd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvblendmpd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvblendpd = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, +} + +var _yvblendvpd = []ytab{ + {zcase: Zvex_hr_rm_v_r, zoffset: 2, args: argList{Yxr, Yxm, Yxr, Yxr}}, + {zcase: Zvex_hr_rm_v_r, zoffset: 2, args: argList{Yyr, Yym, Yyr, Yyr}}, +} + +var _yvbroadcastf128 = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, +} + +var _yvbroadcastf32x2 = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, +} + +var _yvbroadcastf32x4 = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, Yzr}}, +} + +var _yvbroadcastf32x8 = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, Yzr}}, +} + +var _yvbroadcasti32x2 = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, +} + +var _yvbroadcastsd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, +} + +var _yvbroadcastss = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, +} + +var _yvcmppd = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yk}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, Yk}}, +} + +var _yvcmpsd = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, +} + +var _yvcomisd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex}}, +} + +var _yvcompresspd = []ytab{ + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, +} + +var _yvcvtdq2pd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, +} + +var _yvcvtdq2ps = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, +} + +var _yvcvtpd2dq = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, YyrEvex}}, +} + +var _yvcvtpd2dqx = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, +} + +var _yvcvtpd2dqy = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YxrEvex}}, +} + +var _yvcvtpd2qq = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, +} + +var _yvcvtpd2udqx = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, +} + +var _yvcvtpd2udqy = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YxrEvex}}, +} + +var _yvcvtph2ps = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, +} + +var _yvcvtps2ph = []ytab{ + {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yxm}}, + {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yxm}}, + {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yyr, Yxm}}, + {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yyr, Yxm}}, + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YymEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YymEvex}}, + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YxrEvex, YxmEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YyrEvex, YxmEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YyrEvex, Yknot0, YxmEvex}}, +} + +var _yvcvtps2qq = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, +} + +var _yvcvtsd2si = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yrl}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, Yrl}}, +} + +var _yvcvtsd2usil = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, Yrl}}, +} + +var _yvcvtsi2sdl = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex, YxrEvex}}, +} + +var _yvcvtudq2pd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, +} + +var _yvcvtusi2sdl = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex, YxrEvex}}, +} + +var _yvdppd = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, +} + +var _yvexp2pd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, +} + +var _yvexpandpd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, +} + +var _yvextractf128 = []ytab{ + {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yyr, Yxm}}, + {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yyr, Yxm}}, +} + +var _yvextractf32x4 = []ytab{ + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YyrEvex, YxmEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YyrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YxmEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YxmEvex}}, +} + +var _yvextractf32x8 = []ytab{ + {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YymEvex}}, + {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YymEvex}}, +} + +var _yvextractps = []ytab{ + {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yml}}, + {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yml}}, + {zcase: Zevex_i_r_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yml}}, +} + +var _yvfixupimmpd = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, +} + +var _yvfixupimmsd = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, +} + +var _yvfpclasspdx = []ytab{ + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, Yk}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, Yk}}, +} + +var _yvfpclasspdy = []ytab{ + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, Yk}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, Yk}}, +} + +var _yvfpclasspdz = []ytab{ + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yk}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yk}}, +} + +var _yvgatherdpd = []ytab{ + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yyr, Yxvm, Yyr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, Yzr}}, +} + +var _yvgatherdps = []ytab{ + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yyr, Yyvm, Yyr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzvm, Yknot0, Yzr}}, +} + +var _yvgatherpf0dpd = []ytab{ + {zcase: Zevex_k_rmo, zoffset: 4, args: argList{Yknot0, YyvmEvex}}, +} + +var _yvgatherpf0dps = []ytab{ + {zcase: Zevex_k_rmo, zoffset: 4, args: argList{Yknot0, Yzvm}}, +} + +var _yvgatherqps = []ytab{ + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, + {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yyvm, Yxr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzvm, Yknot0, YyrEvex}}, +} + +var _yvgetexpsd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, +} + +var _yvgetmantpd = []ytab{ + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, +} + +var _yvgf2p8affineinvqb = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvinsertf128 = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yyr, Yyr}}, +} + +var _yvinsertf32x4 = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yzr, Yknot0, Yzr}}, +} + +var _yvinsertf32x8 = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yzr, Yknot0, Yzr}}, +} + +var _yvinsertps = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, +} + +var _yvlddqu = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, +} + +var _yvldmxcsr = []ytab{ + {zcase: Zvex_rm_v_ro, zoffset: 3, args: argList{Ym}}, +} + +var _yvmaskmovdqu = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr}}, +} + +var _yvmaskmovpd = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxr, Ym}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yyr, Ym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr, Yyr}}, +} + +var _yvmovapd = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, +} + +var _yvmovd = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yml}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Yml}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex}}, +} + +var _yvmovddup = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, +} + +var _yvmovdqa = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, +} + +var _yvmovdqa32 = []ytab{ + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, +} + +var _yvmovhlps = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxrEvex, YxrEvex, YxrEvex}}, +} + +var _yvmovhpd = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr, Yxr}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Ym}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YxrEvex, YxrEvex}}, +} + +var _yvmovmskpd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yrl}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yyr, Yrl}}, +} + +var _yvmovntdq = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Ym}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Ym}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YyrEvex, Ym}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{Yzr, Ym}}, +} + +var _yvmovntdqa = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, Yzr}}, +} + +var _yvmovq = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yml}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Yml}}, + {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex}}, +} + +var _yvmovsd = []ytab{ + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, + {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_r_v_k_rm, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, Ym}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, Ym}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}}, +} + +var _yvpbroadcastb = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, + {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, +} + +var _yvpbroadcastmb2q = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, Yzr}}, +} + +var _yvpclmulqdq = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yzr}}, +} + +var _yvpcmpb = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, Yk}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yk}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yk}}, +} + +var _yvpcmpeqb = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yk}}, +} + +var _yvperm2f128 = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, +} + +var _yvpermd = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvpermilpd = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvpermpd = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yu8, Yym, Yyr}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvpermq = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvpextrw = []ytab{ + {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yml}}, + {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yml}}, + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxr, Yrl}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxr, Yrl}}, + {zcase: Zevex_i_r_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yml}}, + {zcase: Zevex_i_rm_r, zoffset: 3, args: argList{Yu8, YxrEvex, Yrl}}, +} + +var _yvpinsrb = []ytab{ + {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yml, Yxr, Yxr}}, + {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, Yml, YxrEvex, YxrEvex}}, +} + +var _yvpmovb2m = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxrEvex, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YyrEvex, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yzr, Yk}}, +} + +var _yvpmovdb = []ytab{ + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YxmEvex}}, +} + +var _yvpmovdw = []ytab{ + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YxmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxmEvex}}, + {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, YymEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YymEvex}}, +} + +var _yvprold = []ytab{ + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, +} + +var _yvpscatterdd = []ytab{ + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YyvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzvm}}, +} + +var _yvpscatterdq = []ytab{ + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YyvmEvex}}, +} + +var _yvpscatterqd = []ytab{ + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YyvmEvex}}, + {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, Yzvm}}, +} + +var _yvpshufbitqmb = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, Yk}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yk}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yk}}, +} + +var _yvpshufd = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, +} + +var _yvpslld = []ytab{ + {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yxr, Yxr}}, + {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yxr, Yxr}}, + {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yyr, Yyr}}, + {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yyr, Yyr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr, Yyr}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, Yzr, Yknot0, Yzr}}, +} + +var _yvpslldq = []ytab{ + {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yxr, Yxr}}, + {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yxr, Yxr}}, + {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yyr, Yyr}}, + {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yyr, Yyr}}, + {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, Yzm, Yzr}}, +} + +var _yvpsraq = []ytab{ + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, + {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, Yzr, Yknot0, Yzr}}, +} + +var _yvptest = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, +} + +var _yvrcpss = []ytab{ + {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, +} + +var _yvroundpd = []ytab{ + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, + {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, + {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, +} + +var _yvscalefpd = []ytab{ + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, + {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, +} + +var _yvshuff32x4 = []ytab{ + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, + {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, + {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, +} + +var _yvzeroall = []ytab{ + {zcase: Zvex, zoffset: 2, args: argList{}}, +} + +var avxOptab = [...]Optab{ + {as: AANDNL, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF2, + }}, + {as: AANDNQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF2, + }}, + {as: ABEXTRL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF7, + }}, + {as: ABEXTRQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF7, + }}, + {as: ABLSIL, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 03, + }}, + {as: ABLSIQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 03, + }}, + {as: ABLSMSKL, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 02, + }}, + {as: ABLSMSKQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 02, + }}, + {as: ABLSRL, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 01, + }}, + {as: ABLSRQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 01, + }}, + {as: ABZHIL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW0, 0xF5, + }}, + {as: ABZHIQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F38 | vexW1, 0xF5, + }}, + {as: AKADDB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x4A, + }}, + {as: AKADDD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x4A, + }}, + {as: AKADDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x4A, + }}, + {as: AKADDW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x4A, + }}, + {as: AKANDB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x41, + }}, + {as: AKANDD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x41, + }}, + {as: AKANDNB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x42, + }}, + {as: AKANDND, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x42, + }}, + {as: AKANDNQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x42, + }}, + {as: AKANDNW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x42, + }}, + {as: AKANDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x41, + }}, + {as: AKANDW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x41, + }}, + {as: AKMOVB, ytab: _ykmovb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x91, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x93, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x90, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x92, + }}, + {as: AKMOVD, ytab: _ykmovb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x91, + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x93, + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x90, + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x92, + }}, + {as: AKMOVQ, ytab: _ykmovb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW1, 0x91, + avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x93, + avxEscape | vex128 | vex0F | vexW1, 0x90, + avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x92, + }}, + {as: AKMOVW, ytab: _ykmovb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x91, + avxEscape | vex128 | vex0F | vexW0, 0x93, + avxEscape | vex128 | vex0F | vexW0, 0x90, + avxEscape | vex128 | vex0F | vexW0, 0x92, + }}, + {as: AKNOTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x44, + }}, + {as: AKNOTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x44, + }}, + {as: AKNOTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW1, 0x44, + }}, + {as: AKNOTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x44, + }}, + {as: AKORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x45, + }}, + {as: AKORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x45, + }}, + {as: AKORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x45, + }}, + {as: AKORTESTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x98, + }}, + {as: AKORTESTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x98, + }}, + {as: AKORTESTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW1, 0x98, + }}, + {as: AKORTESTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x98, + }}, + {as: AKORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x45, + }}, + {as: AKSHIFTLB, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x32, + }}, + {as: AKSHIFTLD, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x33, + }}, + {as: AKSHIFTLQ, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x33, + }}, + {as: AKSHIFTLW, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x32, + }}, + {as: AKSHIFTRB, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x30, + }}, + {as: AKSHIFTRD, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x31, + }}, + {as: AKSHIFTRQ, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x31, + }}, + {as: AKSHIFTRW, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x30, + }}, + {as: AKTESTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x99, + }}, + {as: AKTESTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x99, + }}, + {as: AKTESTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW1, 0x99, + }}, + {as: AKTESTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x99, + }}, + {as: AKUNPCKBW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x4B, + }}, + {as: AKUNPCKDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x4B, + }}, + {as: AKUNPCKWD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x4B, + }}, + {as: AKXNORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x46, + }}, + {as: AKXNORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x46, + }}, + {as: AKXNORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x46, + }}, + {as: AKXNORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x46, + }}, + {as: AKXORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x47, + }}, + {as: AKXORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW1, 0x47, + }}, + {as: AKXORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW1, 0x47, + }}, + {as: AKXORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x47, + }}, + {as: AMULXL, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF6, + }}, + {as: AMULXQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF6, + }}, + {as: APDEPL, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF5, + }}, + {as: APDEPQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF5, + }}, + {as: APEXTL, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F38 | vexW0, 0xF5, + }}, + {as: APEXTQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F38 | vexW1, 0xF5, + }}, + {as: ARORXL, ytab: _yrorxl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F3A | vexW0, 0xF0, + }}, + {as: ARORXQ, ytab: _yrorxl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F3A | vexW1, 0xF0, + }}, + {as: ASARXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F38 | vexW0, 0xF7, + }}, + {as: ASARXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F38 | vexW1, 0xF7, + }}, + {as: ASHLXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xF7, + }}, + {as: ASHLXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xF7, + }}, + {as: ASHRXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF7, + }}, + {as: ASHRXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF7, + }}, + {as: AV4FMADDPS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x9A, + }}, + {as: AV4FMADDSS, ytab: _yv4fmaddss, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x9B, + }}, + {as: AV4FNMADDPS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xAA, + }}, + {as: AV4FNMADDSS, ytab: _yv4fmaddss, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xAB, + }}, + {as: AVADDPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x58, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x58, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x58, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x58, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x58, + }}, + {as: AVADDPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x58, + avxEscape | vex256 | vex0F | vexW0, 0x58, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x58, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x58, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x58, + }}, + {as: AVADDSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x58, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x58, + }}, + {as: AVADDSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x58, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x58, + }}, + {as: AVADDSUBPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD0, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD0, + }}, + {as: AVADDSUBPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xD0, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xD0, + }}, + {as: AVAESDEC, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDE, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDE, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDE, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDE, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDE, + }}, + {as: AVAESDECLAST, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDF, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDF, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDF, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDF, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDF, + }}, + {as: AVAESENC, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDC, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDC, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDC, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDC, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDC, + }}, + {as: AVAESENCLAST, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDD, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDD, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDD, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDD, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDD, + }}, + {as: AVAESIMC, ytab: _yvaesimc, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDB, + }}, + {as: AVAESKEYGENASSIST, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0xDF, + }}, + {as: AVALIGND, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x03, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x03, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x03, + }}, + {as: AVALIGNQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x03, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x03, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x03, + }}, + {as: AVANDNPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x55, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x55, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x55, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x55, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x55, + }}, + {as: AVANDNPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x55, + avxEscape | vex256 | vex0F | vexW0, 0x55, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x55, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x55, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x55, + }}, + {as: AVANDPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x54, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x54, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x54, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x54, + }}, + {as: AVANDPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x54, + avxEscape | vex256 | vex0F | vexW0, 0x54, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x54, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x54, + }}, + {as: AVBLENDMPD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x65, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x65, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x65, + }}, + {as: AVBLENDMPS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x65, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x65, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x65, + }}, + {as: AVBLENDPD, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0D, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0D, + }}, + {as: AVBLENDPS, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0C, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0C, + }}, + {as: AVBLENDVPD, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4B, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4B, + }}, + {as: AVBLENDVPS, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4A, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4A, + }}, + {as: AVBROADCASTF128, ytab: _yvbroadcastf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1A, + }}, + {as: AVBROADCASTF32X2, ytab: _yvbroadcastf32x2, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x19, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x19, + }}, + {as: AVBROADCASTF32X4, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1A, + }}, + {as: AVBROADCASTF32X8, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1B, + }}, + {as: AVBROADCASTF64X2, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x1A, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x1A, + }}, + {as: AVBROADCASTF64X4, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x1B, + }}, + {as: AVBROADCASTI128, ytab: _yvbroadcastf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x5A, + }}, + {as: AVBROADCASTI32X2, ytab: _yvbroadcasti32x2, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, + }}, + {as: AVBROADCASTI32X4, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x5A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x5A, + }}, + {as: AVBROADCASTI32X8, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x5B, + }}, + {as: AVBROADCASTI64X2, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x5A, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x5A, + }}, + {as: AVBROADCASTI64X4, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x5B, + }}, + {as: AVBROADCASTSD, ytab: _yvbroadcastsd, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x19, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x19, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x19, + }}, + {as: AVBROADCASTSS, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x18, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x18, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, + }}, + {as: AVCMPPD, ytab: _yvcmppd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC2, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xC2, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled, 0xC2, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8, 0xC2, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8, 0xC2, + }}, + {as: AVCMPPS, ytab: _yvcmppd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0xC2, + avxEscape | vex256 | vex0F | vexW0, 0xC2, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled, 0xC2, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4, 0xC2, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4, 0xC2, + }}, + {as: AVCMPSD, ytab: _yvcmpsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xC2, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0xC2, + }}, + {as: AVCMPSS, ytab: _yvcmpsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0xC2, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0xC2, + }}, + {as: AVCOMISD, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2F, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2F, + }}, + {as: AVCOMISS, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x2F, + avxEscape | evex128 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2F, + }}, + {as: AVCOMPRESSPD, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, + }}, + {as: AVCOMPRESSPS, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, + }}, + {as: AVCVTDQ2PD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0xE6, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0xE6, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0xE6, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xE6, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTDQ2PS, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5B, + avxEscape | vex256 | vex0F | vexW0, 0x5B, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTPD2DQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTPD2DQX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xE6, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTPD2DQY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xE6, + avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTPD2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTPD2PSX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5A, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTPD2PSY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5A, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTPD2QQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7B, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7B, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7B, + }}, + {as: AVCVTPD2UDQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTPD2UDQX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTPD2UDQY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTPD2UQQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x79, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x79, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTPH2PS, ytab: _yvcvtph2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x13, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x13, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexSaeEnabled | evexZeroingEnabled, 0x13, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x13, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x13, + }}, + {as: AVCVTPS2DQ, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5B, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5B, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTPS2PD, ytab: _yvcvtph2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5A, + avxEscape | vex256 | vex0F | vexW0, 0x5A, + avxEscape | evex512 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5A, + avxEscape | evex128 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x5A, + avxEscape | evex256 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTPS2PH, ytab: _yvcvtps2ph, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x1D, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x1D, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexSaeEnabled | evexZeroingEnabled, 0x1D, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN8 | evexZeroingEnabled, 0x1D, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x1D, + }}, + {as: AVCVTPS2QQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x7B, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7B, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7B, + }}, + {as: AVCVTPS2UDQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x79, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x79, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTPS2UQQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x79, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x79, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x79, + }}, + {as: AVCVTQQ2PD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xE6, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, + avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTQQ2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTQQ2PSX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTQQ2PSY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTSD2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2D, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexRoundingEnabled, 0x2D, + }}, + {as: AVCVTSD2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2D, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2D, + }}, + {as: AVCVTSD2SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5A, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTSD2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexRoundingEnabled, 0x79, + }}, + {as: AVCVTSD2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x79, + }}, + {as: AVCVTSI2SDL, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2A, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN4, 0x2A, + }}, + {as: AVCVTSI2SDQ, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2A, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2A, + }}, + {as: AVCVTSI2SSL, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2A, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x2A, + }}, + {as: AVCVTSI2SSQ, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2A, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2A, + }}, + {as: AVCVTSS2SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5A, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5A, + }}, + {as: AVCVTSS2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2D, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x2D, + }}, + {as: AVCVTSS2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2D, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexRoundingEnabled, 0x2D, + }}, + {as: AVCVTSS2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x79, + }}, + {as: AVCVTSS2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexRoundingEnabled, 0x79, + }}, + {as: AVCVTTPD2DQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTTPD2DQX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE6, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTTPD2DQY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE6, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, + }}, + {as: AVCVTTPD2QQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x7A, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTTPD2UDQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTPD2UDQX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTPD2UDQY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTPD2UQQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x78, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x78, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTPS2DQ, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5B, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x5B, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5B, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, + }}, + {as: AVCVTTPS2QQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x7A, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTTPS2UDQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x78, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x78, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTPS2UQQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x78, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x78, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x78, + }}, + {as: AVCVTTSD2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2C, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexSaeEnabled, 0x2C, + }}, + {as: AVCVTTSD2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2C, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2C, + }}, + {as: AVCVTTSD2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexSaeEnabled, 0x78, + }}, + {as: AVCVTTSD2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x78, + }}, + {as: AVCVTTSS2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2C, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2C, + }}, + {as: AVCVTTSS2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2C, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexSaeEnabled, 0x2C, + }}, + {as: AVCVTTSS2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x78, + }}, + {as: AVCVTTSS2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexSaeEnabled, 0x78, + }}, + {as: AVCVTUDQ2PD, ytab: _yvcvtudq2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUDQ2PS, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUQQ2PD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUQQ2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUQQ2PSX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUQQ2PSY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, + }}, + {as: AVCVTUSI2SDL, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN4, 0x7B, + }}, + {as: AVCVTUSI2SDQ, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x7B, + }}, + {as: AVCVTUSI2SSL, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x7B, + }}, + {as: AVCVTUSI2SSQ, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x7B, + }}, + {as: AVDBPSADBW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x42, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x42, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexZeroingEnabled, 0x42, + }}, + {as: AVDIVPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5E, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5E, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5E, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5E, + }}, + {as: AVDIVPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5E, + avxEscape | vex256 | vex0F | vexW0, 0x5E, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5E, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5E, + }}, + {as: AVDIVSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5E, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, + }}, + {as: AVDIVSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5E, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, + }}, + {as: AVDPPD, ytab: _yvdppd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x41, + }}, + {as: AVDPPS, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x40, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x40, + }}, + {as: AVEXP2PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xC8, + }}, + {as: AVEXP2PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xC8, + }}, + {as: AVEXPANDPD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, + }}, + {as: AVEXPANDPS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, + }}, + {as: AVEXTRACTF128, ytab: _yvextractf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x19, + }}, + {as: AVEXTRACTF32X4, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x19, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x19, + }}, + {as: AVEXTRACTF32X8, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x1B, + }}, + {as: AVEXTRACTF64X2, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x19, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x19, + }}, + {as: AVEXTRACTF64X4, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x1B, + }}, + {as: AVEXTRACTI128, ytab: _yvextractf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x39, + }}, + {as: AVEXTRACTI32X4, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x39, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x39, + }}, + {as: AVEXTRACTI32X8, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x3B, + }}, + {as: AVEXTRACTI64X2, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x39, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x39, + }}, + {as: AVEXTRACTI64X4, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x3B, + }}, + {as: AVEXTRACTPS, ytab: _yvextractps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x17, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x17, + }}, + {as: AVFIXUPIMMPD, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x54, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x54, + }}, + {as: AVFIXUPIMMPS, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x54, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x54, + }}, + {as: AVFIXUPIMMSD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x55, + }}, + {as: AVFIXUPIMMSS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x55, + }}, + {as: AVFMADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x98, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x98, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x98, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x98, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x98, + }}, + {as: AVFMADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x98, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x98, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x98, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x98, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x98, + }}, + {as: AVFMADD132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x99, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x99, + }}, + {as: AVFMADD132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x99, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x99, + }}, + {as: AVFMADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA8, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA8, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA8, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA8, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA8, + }}, + {as: AVFMADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA8, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA8, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA8, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA8, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA8, + }}, + {as: AVFMADD213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA9, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA9, + }}, + {as: AVFMADD213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA9, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA9, + }}, + {as: AVFMADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB8, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB8, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB8, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB8, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB8, + }}, + {as: AVFMADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB8, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB8, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB8, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB8, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB8, + }}, + {as: AVFMADD231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB9, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB9, + }}, + {as: AVFMADD231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB9, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB9, + }}, + {as: AVFMADDSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x96, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x96, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x96, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x96, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x96, + }}, + {as: AVFMADDSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x96, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x96, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x96, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x96, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x96, + }}, + {as: AVFMADDSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA6, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA6, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA6, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA6, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA6, + }}, + {as: AVFMADDSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA6, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA6, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA6, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA6, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA6, + }}, + {as: AVFMADDSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB6, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB6, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB6, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB6, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB6, + }}, + {as: AVFMADDSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB6, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB6, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB6, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB6, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB6, + }}, + {as: AVFMSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9A, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9A, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9A, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9A, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9A, + }}, + {as: AVFMSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9A, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9A, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9A, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9A, + }}, + {as: AVFMSUB132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9B, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9B, + }}, + {as: AVFMSUB132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9B, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9B, + }}, + {as: AVFMSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAA, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAA, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAA, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAA, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAA, + }}, + {as: AVFMSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAA, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAA, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAA, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAA, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAA, + }}, + {as: AVFMSUB213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAB, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAB, + }}, + {as: AVFMSUB213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAB, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAB, + }}, + {as: AVFMSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBA, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBA, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBA, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBA, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBA, + }}, + {as: AVFMSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBA, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBA, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBA, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBA, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBA, + }}, + {as: AVFMSUB231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBB, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBB, + }}, + {as: AVFMSUB231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBB, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBB, + }}, + {as: AVFMSUBADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x97, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x97, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x97, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x97, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x97, + }}, + {as: AVFMSUBADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x97, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x97, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x97, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x97, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x97, + }}, + {as: AVFMSUBADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA7, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA7, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA7, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA7, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA7, + }}, + {as: AVFMSUBADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA7, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA7, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA7, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA7, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA7, + }}, + {as: AVFMSUBADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB7, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB7, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB7, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB7, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB7, + }}, + {as: AVFMSUBADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB7, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB7, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB7, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB7, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB7, + }}, + {as: AVFNMADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9C, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9C, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9C, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9C, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9C, + }}, + {as: AVFNMADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9C, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9C, + }}, + {as: AVFNMADD132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9D, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9D, + }}, + {as: AVFNMADD132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9D, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9D, + }}, + {as: AVFNMADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAC, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAC, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAC, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAC, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAC, + }}, + {as: AVFNMADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAC, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAC, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAC, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAC, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAC, + }}, + {as: AVFNMADD213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAD, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAD, + }}, + {as: AVFNMADD213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAD, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAD, + }}, + {as: AVFNMADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBC, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBC, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBC, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBC, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBC, + }}, + {as: AVFNMADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBC, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBC, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBC, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBC, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBC, + }}, + {as: AVFNMADD231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBD, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBD, + }}, + {as: AVFNMADD231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBD, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBD, + }}, + {as: AVFNMSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9E, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9E, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9E, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9E, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9E, + }}, + {as: AVFNMSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9E, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9E, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9E, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9E, + }}, + {as: AVFNMSUB132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9F, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9F, + }}, + {as: AVFNMSUB132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9F, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9F, + }}, + {as: AVFNMSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAE, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAE, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAE, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAE, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAE, + }}, + {as: AVFNMSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAE, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAE, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAE, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAE, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAE, + }}, + {as: AVFNMSUB213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAF, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAF, + }}, + {as: AVFNMSUB213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAF, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAF, + }}, + {as: AVFNMSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBE, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBE, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBE, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBE, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBE, + }}, + {as: AVFNMSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBE, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBE, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBE, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBE, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBE, + }}, + {as: AVFNMSUB231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBF, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBF, + }}, + {as: AVFNMSUB231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBF, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBF, + }}, + {as: AVFPCLASSPDX, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x66, + }}, + {as: AVFPCLASSPDY, ytab: _yvfpclasspdy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x66, + }}, + {as: AVFPCLASSPDZ, ytab: _yvfpclasspdz, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x66, + }}, + {as: AVFPCLASSPSX, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x66, + }}, + {as: AVFPCLASSPSY, ytab: _yvfpclasspdy, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x66, + }}, + {as: AVFPCLASSPSZ, ytab: _yvfpclasspdz, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x66, + }}, + {as: AVFPCLASSSD, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x67, + }}, + {as: AVFPCLASSSS, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x67, + }}, + {as: AVGATHERDPD, ytab: _yvgatherdpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x92, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x92, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x92, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x92, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x92, + }}, + {as: AVGATHERDPS, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x92, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x92, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x92, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x92, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x92, + }}, + {as: AVGATHERPF0DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 01, + }}, + {as: AVGATHERPF0DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 01, + }}, + {as: AVGATHERPF0QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 01, + }}, + {as: AVGATHERPF0QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 01, + }}, + {as: AVGATHERPF1DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 02, + }}, + {as: AVGATHERPF1DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 02, + }}, + {as: AVGATHERPF1QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 02, + }}, + {as: AVGATHERPF1QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 02, + }}, + {as: AVGATHERQPD, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x93, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x93, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x93, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x93, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x93, + }}, + {as: AVGATHERQPS, ytab: _yvgatherqps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x93, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x93, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x93, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x93, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x93, + }}, + {as: AVGETEXPPD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x42, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x42, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x42, + }}, + {as: AVGETEXPPS, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x42, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x42, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x42, + }}, + {as: AVGETEXPSD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x43, + }}, + {as: AVGETEXPSS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x43, + }}, + {as: AVGETMANTPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x26, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x26, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x26, + }}, + {as: AVGETMANTPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x26, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x26, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x26, + }}, + {as: AVGETMANTSD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x27, + }}, + {as: AVGETMANTSS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x27, + }}, + {as: AVGF2P8AFFINEINVQB, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0xCF, + avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0xCF, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xCF, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xCF, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xCF, + }}, + {as: AVGF2P8AFFINEQB, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0xCE, + avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0xCE, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xCE, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xCE, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xCE, + }}, + {as: AVGF2P8MULB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xCF, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xCF, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xCF, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0xCF, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0xCF, + }}, + {as: AVHADDPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7C, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7C, + }}, + {as: AVHADDPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x7C, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x7C, + }}, + {as: AVHSUBPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7D, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7D, + }}, + {as: AVHSUBPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x7D, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x7D, + }}, + {as: AVINSERTF128, ytab: _yvinsertf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x18, + }}, + {as: AVINSERTF32X4, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x18, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x18, + }}, + {as: AVINSERTF32X8, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x1A, + }}, + {as: AVINSERTF64X2, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x18, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x18, + }}, + {as: AVINSERTF64X4, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x1A, + }}, + {as: AVINSERTI128, ytab: _yvinsertf128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x38, + }}, + {as: AVINSERTI32X4, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x38, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x38, + }}, + {as: AVINSERTI32X8, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x3A, + }}, + {as: AVINSERTI64X2, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x38, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x38, + }}, + {as: AVINSERTI64X4, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x3A, + }}, + {as: AVINSERTPS, ytab: _yvinsertps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x21, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x21, + }}, + {as: AVLDDQU, ytab: _yvlddqu, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xF0, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xF0, + }}, + {as: AVLDMXCSR, ytab: _yvldmxcsr, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0xAE, 02, + }}, + {as: AVMASKMOVDQU, ytab: _yvmaskmovdqu, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF7, + }}, + {as: AVMASKMOVPD, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2F, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2F, + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2D, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2D, + }}, + {as: AVMASKMOVPS, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2E, + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2C, + }}, + {as: AVMAXPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5F, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5F, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x5F, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5F, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5F, + }}, + {as: AVMAXPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5F, + avxEscape | vex256 | vex0F | vexW0, 0x5F, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5F, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5F, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5F, + }}, + {as: AVMAXSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5F, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x5F, + }}, + {as: AVMAXSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5F, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5F, + }}, + {as: AVMINPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5D, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5D, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x5D, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5D, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5D, + }}, + {as: AVMINPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5D, + avxEscape | vex256 | vex0F | vexW0, 0x5D, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5D, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5D, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5D, + }}, + {as: AVMINSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5D, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x5D, + }}, + {as: AVMINSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5D, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5D, + }}, + {as: AVMOVAPD, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x29, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x29, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x28, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x28, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x29, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x29, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x29, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x28, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x28, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x28, + }}, + {as: AVMOVAPS, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x29, + avxEscape | vex256 | vex0F | vexW0, 0x29, + avxEscape | vex128 | vex0F | vexW0, 0x28, + avxEscape | vex256 | vex0F | vexW0, 0x28, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x29, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x29, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x29, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x28, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x28, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x28, + }}, + {as: AVMOVD, ytab: _yvmovd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7E, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6E, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN4, 0x7E, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN4, 0x6E, + }}, + {as: AVMOVDDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x12, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x12, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexZeroingEnabled, 0x12, + avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x12, + avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x12, + }}, + {as: AVMOVDQA, ytab: _yvmovdqa, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7F, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7F, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6F, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6F, + }}, + {as: AVMOVDQA32, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVDQA64, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVDQU, ytab: _yvmovdqa, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x7F, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x7F, + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x6F, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x6F, + }}, + {as: AVMOVDQU16, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVDQU32, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVDQU64, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVDQU8, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, + avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, + avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, + }}, + {as: AVMOVHLPS, ytab: _yvmovhlps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x12, + avxEscape | evex128 | evex0F | evexW0, 0, 0x12, + }}, + {as: AVMOVHPD, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x17, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x16, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x17, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x16, + }}, + {as: AVMOVHPS, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x17, + avxEscape | vex128 | vex0F | vexW0, 0x16, + avxEscape | evex128 | evex0F | evexW0, evexN8, 0x17, + avxEscape | evex128 | evex0F | evexW0, evexN8, 0x16, + }}, + {as: AVMOVLHPS, ytab: _yvmovhlps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x16, + avxEscape | evex128 | evex0F | evexW0, 0, 0x16, + }}, + {as: AVMOVLPD, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x13, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x12, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x13, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x12, + }}, + {as: AVMOVLPS, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x13, + avxEscape | vex128 | vex0F | vexW0, 0x12, + avxEscape | evex128 | evex0F | evexW0, evexN8, 0x13, + avxEscape | evex128 | evex0F | evexW0, evexN8, 0x12, + }}, + {as: AVMOVMSKPD, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x50, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x50, + }}, + {as: AVMOVMSKPS, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x50, + avxEscape | vex256 | vex0F | vexW0, 0x50, + }}, + {as: AVMOVNTDQ, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE7, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE7, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0xE7, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0xE7, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0xE7, + }}, + {as: AVMOVNTDQA, ytab: _yvmovntdqa, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2A, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2A, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x2A, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x2A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x2A, + }}, + {as: AVMOVNTPD, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2B, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x2B, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16, 0x2B, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32, 0x2B, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64, 0x2B, + }}, + {as: AVMOVNTPS, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x2B, + avxEscape | vex256 | vex0F | vexW0, 0x2B, + avxEscape | evex128 | evex0F | evexW0, evexN16, 0x2B, + avxEscape | evex256 | evex0F | evexW0, evexN32, 0x2B, + avxEscape | evex512 | evex0F | evexW0, evexN64, 0x2B, + }}, + {as: AVMOVQ, ytab: _yvmovq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x7E, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD6, + avxEscape | vex128 | vex66 | vex0F | vexW1, 0x6E, + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x7E, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x7E, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0xD6, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x6E, + avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8, 0x7E, + }}, + {as: AVMOVSD, ytab: _yvmovsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10, + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x11, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8, 0x11, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexZeroingEnabled, 0x10, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x10, + }}, + {as: AVMOVSHDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x16, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x16, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x16, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x16, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x16, + }}, + {as: AVMOVSLDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x12, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x12, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x12, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x12, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x12, + }}, + {as: AVMOVSS, ytab: _yvmovsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x10, + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x10, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexZeroingEnabled, 0x11, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4, 0x11, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexZeroingEnabled, 0x10, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexZeroingEnabled, 0x10, + }}, + {as: AVMOVUPD, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x11, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x10, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x10, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x11, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x11, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x11, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x10, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x10, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x10, + }}, + {as: AVMOVUPS, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x11, + avxEscape | vex256 | vex0F | vexW0, 0x11, + avxEscape | vex128 | vex0F | vexW0, 0x10, + avxEscape | vex256 | vex0F | vexW0, 0x10, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x11, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x11, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x11, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x10, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x10, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x10, + }}, + {as: AVMPSADBW, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x42, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x42, + }}, + {as: AVMULPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x59, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x59, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x59, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x59, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x59, + }}, + {as: AVMULPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x59, + avxEscape | vex256 | vex0F | vexW0, 0x59, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x59, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x59, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x59, + }}, + {as: AVMULSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x59, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x59, + }}, + {as: AVMULSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x59, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x59, + }}, + {as: AVORPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x56, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x56, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x56, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x56, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x56, + }}, + {as: AVORPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x56, + avxEscape | vex256 | vex0F | vexW0, 0x56, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x56, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x56, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x56, + }}, + {as: AVP4DPWSSD, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x52, + }}, + {as: AVP4DPWSSDS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x53, + }}, + {as: AVPABSB, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1C, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x1C, + }}, + {as: AVPABSD, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1E, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x1E, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x1E, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x1E, + }}, + {as: AVPABSQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x1F, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x1F, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x1F, + }}, + {as: AVPABSW, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1D, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1D, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1D, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1D, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x1D, + }}, + {as: AVPACKSSDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6B, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6B, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x6B, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x6B, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x6B, + }}, + {as: AVPACKSSWB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x63, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x63, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x63, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x63, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x63, + }}, + {as: AVPACKUSDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2B, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2B, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x2B, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x2B, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x2B, + }}, + {as: AVPACKUSWB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x67, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x67, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x67, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x67, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x67, + }}, + {as: AVPADDB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFC, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFC, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xFC, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xFC, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xFC, + }}, + {as: AVPADDD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFE, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFE, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xFE, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xFE, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xFE, + }}, + {as: AVPADDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD4, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD4, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xD4, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xD4, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xD4, + }}, + {as: AVPADDSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEC, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEC, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEC, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEC, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEC, + }}, + {as: AVPADDSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xED, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xED, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xED, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xED, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xED, + }}, + {as: AVPADDUSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDC, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDC, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDC, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDC, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDC, + }}, + {as: AVPADDUSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDD, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDD, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDD, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDD, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDD, + }}, + {as: AVPADDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFD, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFD, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xFD, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xFD, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xFD, + }}, + {as: AVPALIGNR, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0F, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0F, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x0F, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x0F, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexZeroingEnabled, 0x0F, + }}, + {as: AVPAND, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDB, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDB, + }}, + {as: AVPANDD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xDB, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xDB, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xDB, + }}, + {as: AVPANDN, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDF, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDF, + }}, + {as: AVPANDND, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xDF, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xDF, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xDF, + }}, + {as: AVPANDNQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xDF, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xDF, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xDF, + }}, + {as: AVPANDQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xDB, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xDB, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xDB, + }}, + {as: AVPAVGB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE0, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE0, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE0, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE0, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE0, + }}, + {as: AVPAVGW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE3, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE3, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE3, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE3, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE3, + }}, + {as: AVPBLENDD, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x02, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x02, + }}, + {as: AVPBLENDMB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x66, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x66, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x66, + }}, + {as: AVPBLENDMD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x64, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x64, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x64, + }}, + {as: AVPBLENDMQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x64, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x64, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x64, + }}, + {as: AVPBLENDMW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x66, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x66, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x66, + }}, + {as: AVPBLENDVB, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4C, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4C, + }}, + {as: AVPBLENDW, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0E, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0E, + }}, + {as: AVPBROADCASTB, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x78, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x78, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, + }}, + {as: AVPBROADCASTD, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x58, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x58, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, + }}, + {as: AVPBROADCASTMB2Q, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x2A, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x2A, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x2A, + }}, + {as: AVPBROADCASTMW2D, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x3A, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x3A, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x3A, + }}, + {as: AVPBROADCASTQ, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x59, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x59, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, + }}, + {as: AVPBROADCASTW, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x79, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x79, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, + }}, + {as: AVPCLMULQDQ, ytab: _yvpclmulqdq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x44, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x44, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x44, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x44, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x44, + }}, + {as: AVPCMPB, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x3F, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x3F, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x3F, + }}, + {as: AVPCMPD, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x1F, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x1F, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x1F, + }}, + {as: AVPCMPEQB, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x74, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x74, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x74, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x74, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x74, + }}, + {as: AVPCMPEQD, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x76, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x76, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4, 0x76, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4, 0x76, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4, 0x76, + }}, + {as: AVPCMPEQQ, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x29, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x29, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x29, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x29, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x29, + }}, + {as: AVPCMPEQW, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x75, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x75, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x75, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x75, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x75, + }}, + {as: AVPCMPESTRI, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x61, + }}, + {as: AVPCMPESTRM, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x60, + }}, + {as: AVPCMPGTB, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x64, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x64, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x64, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x64, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x64, + }}, + {as: AVPCMPGTD, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x66, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x66, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4, 0x66, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4, 0x66, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4, 0x66, + }}, + {as: AVPCMPGTQ, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x37, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x37, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x37, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x37, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x37, + }}, + {as: AVPCMPGTW, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x65, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x65, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x65, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x65, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x65, + }}, + {as: AVPCMPISTRI, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x63, + }}, + {as: AVPCMPISTRM, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x62, + }}, + {as: AVPCMPQ, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x1F, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x1F, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x1F, + }}, + {as: AVPCMPUB, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x3E, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x3E, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x3E, + }}, + {as: AVPCMPUD, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x1E, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x1E, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x1E, + }}, + {as: AVPCMPUQ, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x1E, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x1E, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x1E, + }}, + {as: AVPCMPUW, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16, 0x3E, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32, 0x3E, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64, 0x3E, + }}, + {as: AVPCMPW, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16, 0x3F, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32, 0x3F, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64, 0x3F, + }}, + {as: AVPCOMPRESSB, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, + }}, + {as: AVPCOMPRESSD, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, + }}, + {as: AVPCOMPRESSQ, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, + }}, + {as: AVPCOMPRESSW, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, + }}, + {as: AVPCONFLICTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xC4, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xC4, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xC4, + }}, + {as: AVPCONFLICTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xC4, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xC4, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xC4, + }}, + {as: AVPDPBUSD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x50, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x50, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x50, + }}, + {as: AVPDPBUSDS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x51, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x51, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x51, + }}, + {as: AVPDPWSSD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x52, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x52, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x52, + }}, + {as: AVPDPWSSDS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x53, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x53, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x53, + }}, + {as: AVPERM2F128, ytab: _yvperm2f128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x06, + }}, + {as: AVPERM2I128, ytab: _yvperm2f128, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x46, + }}, + {as: AVPERMB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x8D, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x8D, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x8D, + }}, + {as: AVPERMD, ytab: _yvpermd, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x36, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x36, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x36, + }}, + {as: AVPERMI2B, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x75, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x75, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x75, + }}, + {as: AVPERMI2D, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x76, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x76, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x76, + }}, + {as: AVPERMI2PD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x77, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x77, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x77, + }}, + {as: AVPERMI2PS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x77, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x77, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x77, + }}, + {as: AVPERMI2Q, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x76, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x76, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x76, + }}, + {as: AVPERMI2W, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x75, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x75, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x75, + }}, + {as: AVPERMILPD, ytab: _yvpermilpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x05, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x05, + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0D, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0D, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x05, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x05, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x05, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x0D, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x0D, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x0D, + }}, + {as: AVPERMILPS, ytab: _yvpermilpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x04, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x04, + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0C, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x04, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x04, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x04, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x0C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x0C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x0C, + }}, + {as: AVPERMPD, ytab: _yvpermq, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0x01, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x01, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x01, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x16, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x16, + }}, + {as: AVPERMPS, ytab: _yvpermd, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x16, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x16, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x16, + }}, + {as: AVPERMQ, ytab: _yvpermq, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0x00, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x00, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x00, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x36, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x36, + }}, + {as: AVPERMT2B, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x7D, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x7D, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x7D, + }}, + {as: AVPERMT2D, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7E, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7E, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x7E, + }}, + {as: AVPERMT2PD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x7F, + }}, + {as: AVPERMT2PS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7F, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7F, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x7F, + }}, + {as: AVPERMT2Q, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7E, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7E, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x7E, + }}, + {as: AVPERMT2W, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x7D, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x7D, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x7D, + }}, + {as: AVPERMW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x8D, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x8D, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x8D, + }}, + {as: AVPEXPANDB, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, + }}, + {as: AVPEXPANDD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, + }}, + {as: AVPEXPANDQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, + }}, + {as: AVPEXPANDW, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, + }}, + {as: AVPEXTRB, ytab: _yvextractps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x14, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN1, 0x14, + }}, + {as: AVPEXTRD, ytab: _yvextractps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x16, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x16, + }}, + {as: AVPEXTRQ, ytab: _yvextractps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x16, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x16, + }}, + {as: AVPEXTRW, ytab: _yvpextrw, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x15, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC5, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN2, 0x15, + avxEscape | evex128 | evex66 | evex0F | evexW0, 0, 0xC5, + }}, + {as: AVPGATHERDD, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x90, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x90, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x90, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x90, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x90, + }}, + {as: AVPGATHERDQ, ytab: _yvgatherdpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x90, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x90, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x90, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x90, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x90, + }}, + {as: AVPGATHERQD, ytab: _yvgatherqps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x91, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x91, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x91, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x91, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x91, + }}, + {as: AVPGATHERQQ, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x91, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x91, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x91, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x91, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x91, + }}, + {as: AVPHADDD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x02, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x02, + }}, + {as: AVPHADDSW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x03, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x03, + }}, + {as: AVPHADDW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x01, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x01, + }}, + {as: AVPHMINPOSUW, ytab: _yvaesimc, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x41, + }}, + {as: AVPHSUBD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x06, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x06, + }}, + {as: AVPHSUBSW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x07, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x07, + }}, + {as: AVPHSUBW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x05, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x05, + }}, + {as: AVPINSRB, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x20, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN1, 0x20, + }}, + {as: AVPINSRD, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x22, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x22, + }}, + {as: AVPINSRQ, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x22, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x22, + }}, + {as: AVPINSRW, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC4, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN2, 0xC4, + }}, + {as: AVPLZCNTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x44, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x44, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x44, + }}, + {as: AVPLZCNTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x44, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x44, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x44, + }}, + {as: AVPMADD52HUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB5, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB5, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xB5, + }}, + {as: AVPMADD52LUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB4, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB4, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xB4, + }}, + {as: AVPMADDUBSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x04, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x04, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x04, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x04, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x04, + }}, + {as: AVPMADDWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF5, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF5, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF5, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF5, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF5, + }}, + {as: AVPMASKMOVD, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x8E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x8E, + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x8C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x8C, + }}, + {as: AVPMASKMOVQ, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x8E, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x8E, + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x8C, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x8C, + }}, + {as: AVPMAXSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3C, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3C, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3C, + }}, + {as: AVPMAXSD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3D, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3D, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3D, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3D, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3D, + }}, + {as: AVPMAXSQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3D, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3D, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3D, + }}, + {as: AVPMAXSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEE, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEE, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEE, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEE, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEE, + }}, + {as: AVPMAXUB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDE, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDE, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDE, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDE, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDE, + }}, + {as: AVPMAXUD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3F, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3F, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3F, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3F, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3F, + }}, + {as: AVPMAXUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3F, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3F, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3F, + }}, + {as: AVPMAXUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3E, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3E, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3E, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3E, + }}, + {as: AVPMINSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x38, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x38, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x38, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x38, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x38, + }}, + {as: AVPMINSD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x39, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x39, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x39, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x39, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x39, + }}, + {as: AVPMINSQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x39, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x39, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x39, + }}, + {as: AVPMINSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEA, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEA, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEA, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEA, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEA, + }}, + {as: AVPMINUB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDA, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDA, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDA, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDA, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDA, + }}, + {as: AVPMINUD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3B, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3B, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3B, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3B, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3B, + }}, + {as: AVPMINUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3B, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3B, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3B, + }}, + {as: AVPMINUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3A, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3A, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3A, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3A, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3A, + }}, + {as: AVPMOVB2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x29, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x29, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x29, + }}, + {as: AVPMOVD2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x39, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x39, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x39, + }}, + {as: AVPMOVDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x31, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x31, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x31, + }}, + {as: AVPMOVDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x33, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x33, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x33, + }}, + {as: AVPMOVM2B, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x28, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x28, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x28, + }}, + {as: AVPMOVM2D, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x38, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x38, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x38, + }}, + {as: AVPMOVM2Q, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x38, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x38, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x38, + }}, + {as: AVPMOVM2W, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x28, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x28, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x28, + }}, + {as: AVPMOVMSKB, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD7, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD7, + }}, + {as: AVPMOVQ2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x39, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x39, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x39, + }}, + {as: AVPMOVQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x32, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x32, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x32, + }}, + {as: AVPMOVQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x35, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x35, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x35, + }}, + {as: AVPMOVQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x34, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x34, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x34, + }}, + {as: AVPMOVSDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x21, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x21, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x21, + }}, + {as: AVPMOVSDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x23, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x23, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x23, + }}, + {as: AVPMOVSQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x22, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x22, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x22, + }}, + {as: AVPMOVSQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x25, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x25, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x25, + }}, + {as: AVPMOVSQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x24, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x24, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x24, + }}, + {as: AVPMOVSWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x20, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x20, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x20, + }}, + {as: AVPMOVSXBD, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x21, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x21, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x21, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x21, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x21, + }}, + {as: AVPMOVSXBQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x22, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x22, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x22, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x22, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x22, + }}, + {as: AVPMOVSXBW, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x20, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x20, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x20, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x20, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x20, + }}, + {as: AVPMOVSXDQ, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x25, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x25, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x25, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x25, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x25, + }}, + {as: AVPMOVSXWD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x23, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x23, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x23, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x23, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x23, + }}, + {as: AVPMOVSXWQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x24, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x24, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x24, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x24, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x24, + }}, + {as: AVPMOVUSDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x11, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x11, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x11, + }}, + {as: AVPMOVUSDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x13, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x13, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x13, + }}, + {as: AVPMOVUSQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x12, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x12, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x12, + }}, + {as: AVPMOVUSQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x15, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x15, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x15, + }}, + {as: AVPMOVUSQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x14, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x14, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x14, + }}, + {as: AVPMOVUSWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x10, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x10, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x10, + }}, + {as: AVPMOVW2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x29, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x29, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x29, + }}, + {as: AVPMOVWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x30, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x30, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x30, + }}, + {as: AVPMOVZXBD, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x31, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x31, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x31, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x31, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x31, + }}, + {as: AVPMOVZXBQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x32, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x32, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x32, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x32, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x32, + }}, + {as: AVPMOVZXBW, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x30, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x30, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x30, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x30, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x30, + }}, + {as: AVPMOVZXDQ, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x35, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x35, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x35, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x35, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x35, + }}, + {as: AVPMOVZXWD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x33, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x33, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x33, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x33, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x33, + }}, + {as: AVPMOVZXWQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x34, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x34, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x34, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x34, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x34, + }}, + {as: AVPMULDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x28, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x28, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x28, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x28, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x28, + }}, + {as: AVPMULHRSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0B, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0B, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x0B, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x0B, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x0B, + }}, + {as: AVPMULHUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE4, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE4, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE4, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE4, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE4, + }}, + {as: AVPMULHW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE5, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE5, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE5, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE5, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE5, + }}, + {as: AVPMULLD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x40, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x40, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x40, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x40, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x40, + }}, + {as: AVPMULLQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x40, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x40, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x40, + }}, + {as: AVPMULLW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD5, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD5, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD5, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD5, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD5, + }}, + {as: AVPMULTISHIFTQB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x83, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x83, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x83, + }}, + {as: AVPMULUDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF4, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF4, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xF4, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xF4, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xF4, + }}, + {as: AVPOPCNTB, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x54, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x54, + }}, + {as: AVPOPCNTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x55, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x55, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x55, + }}, + {as: AVPOPCNTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x55, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x55, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x55, + }}, + {as: AVPOPCNTW, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x54, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x54, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x54, + }}, + {as: AVPOR, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEB, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEB, + }}, + {as: AVPORD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xEB, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xEB, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xEB, + }}, + {as: AVPORQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xEB, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xEB, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xEB, + }}, + {as: AVPROLD, ytab: _yvprold, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, + }}, + {as: AVPROLQ, ytab: _yvprold, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, + }}, + {as: AVPROLVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x15, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x15, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x15, + }}, + {as: AVPROLVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x15, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x15, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x15, + }}, + {as: AVPRORD, ytab: _yvprold, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, + }}, + {as: AVPRORQ, ytab: _yvprold, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, + }}, + {as: AVPRORVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x14, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x14, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x14, + }}, + {as: AVPRORVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x14, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x14, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x14, + }}, + {as: AVPSADBW, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF6, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF6, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0xF6, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0xF6, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0xF6, + }}, + {as: AVPSCATTERDD, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, + }}, + {as: AVPSCATTERDQ, ytab: _yvpscatterdq, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, + }}, + {as: AVPSCATTERQD, ytab: _yvpscatterqd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, + }}, + {as: AVPSCATTERQQ, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, + }}, + {as: AVPSHLDD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x71, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x71, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x71, + }}, + {as: AVPSHLDQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x71, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x71, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x71, + }}, + {as: AVPSHLDVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x71, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x71, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x71, + }}, + {as: AVPSHLDVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x71, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x71, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x71, + }}, + {as: AVPSHLDVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x70, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x70, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x70, + }}, + {as: AVPSHLDW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x70, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x70, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexZeroingEnabled, 0x70, + }}, + {as: AVPSHRDD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x73, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x73, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x73, + }}, + {as: AVPSHRDQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, + }}, + {as: AVPSHRDVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x73, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x73, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x73, + }}, + {as: AVPSHRDVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, + }}, + {as: AVPSHRDVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x72, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x72, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x72, + }}, + {as: AVPSHRDW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x72, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x72, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexZeroingEnabled, 0x72, + }}, + {as: AVPSHUFB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x00, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x00, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x00, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x00, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x00, + }}, + {as: AVPSHUFBITQMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x8F, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x8F, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x8F, + }}, + {as: AVPSHUFD, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x70, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x70, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x70, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x70, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x70, + }}, + {as: AVPSHUFHW, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x70, + avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x70, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x70, + avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x70, + avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x70, + }}, + {as: AVPSHUFLW, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x70, + avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x70, + avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x70, + avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x70, + avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x70, + }}, + {as: AVPSIGNB, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x08, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x08, + }}, + {as: AVPSIGND, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0A, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0A, + }}, + {as: AVPSIGNW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x09, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x09, + }}, + {as: AVPSLLD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 06, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 06, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF2, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF2, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, + }}, + {as: AVPSLLDQ, ytab: _yvpslldq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 07, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 07, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x73, 07, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x73, 07, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x73, 07, + }}, + {as: AVPSLLQ, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 06, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 06, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF3, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF3, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, + }}, + {as: AVPSLLVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x47, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x47, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x47, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x47, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x47, + }}, + {as: AVPSLLVQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x47, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x47, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x47, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x47, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x47, + }}, + {as: AVPSLLVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x12, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x12, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x12, + }}, + {as: AVPSLLW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 06, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 06, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF1, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF1, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 06, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 06, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 06, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, + }}, + {as: AVPSRAD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 04, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 04, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE2, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE2, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, + }}, + {as: AVPSRAQ, ytab: _yvpsraq, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, + }}, + {as: AVPSRAVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x46, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x46, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x46, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x46, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x46, + }}, + {as: AVPSRAVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x46, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x46, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x46, + }}, + {as: AVPSRAVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x11, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x11, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x11, + }}, + {as: AVPSRAW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 04, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 04, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE1, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE1, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 04, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 04, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 04, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, + }}, + {as: AVPSRLD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 02, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 02, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD2, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD2, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, + }}, + {as: AVPSRLDQ, ytab: _yvpslldq, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 03, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 03, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x73, 03, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x73, 03, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x73, 03, + }}, + {as: AVPSRLQ, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 02, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 02, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD3, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD3, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, + }}, + {as: AVPSRLVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x45, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x45, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x45, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x45, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x45, + }}, + {as: AVPSRLVQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x45, + avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x45, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x45, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x45, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x45, + }}, + {as: AVPSRLVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x10, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x10, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x10, + }}, + {as: AVPSRLW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 02, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 02, + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD1, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD1, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 02, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 02, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 02, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, + }}, + {as: AVPSUBB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF8, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF8, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF8, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF8, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF8, + }}, + {as: AVPSUBD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFA, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFA, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xFA, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xFA, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xFA, + }}, + {as: AVPSUBQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFB, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFB, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xFB, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xFB, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xFB, + }}, + {as: AVPSUBSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE8, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE8, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE8, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE8, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE8, + }}, + {as: AVPSUBSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE9, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE9, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE9, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE9, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE9, + }}, + {as: AVPSUBUSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD8, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD8, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD8, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD8, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD8, + }}, + {as: AVPSUBUSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD9, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD9, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD9, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD9, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD9, + }}, + {as: AVPSUBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF9, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF9, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF9, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF9, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF9, + }}, + {as: AVPTERNLOGD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x25, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x25, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x25, + }}, + {as: AVPTERNLOGQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x25, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x25, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x25, + }}, + {as: AVPTEST, ytab: _yvptest, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x17, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x17, + }}, + {as: AVPTESTMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x26, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x26, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x26, + }}, + {as: AVPTESTMD, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4, 0x27, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4, 0x27, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4, 0x27, + }}, + {as: AVPTESTMQ, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x27, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x27, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x27, + }}, + {as: AVPTESTMW, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16, 0x26, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32, 0x26, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64, 0x26, + }}, + {as: AVPTESTNMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN16, 0x26, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN32, 0x26, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN64, 0x26, + }}, + {as: AVPTESTNMD, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN16 | evexBcstN4, 0x27, + avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN32 | evexBcstN4, 0x27, + avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN64 | evexBcstN4, 0x27, + }}, + {as: AVPTESTNMQ, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x27, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x27, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x27, + }}, + {as: AVPTESTNMW, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evexF3 | evex0F38 | evexW1, evexN16, 0x26, + avxEscape | evex256 | evexF3 | evex0F38 | evexW1, evexN32, 0x26, + avxEscape | evex512 | evexF3 | evex0F38 | evexW1, evexN64, 0x26, + }}, + {as: AVPUNPCKHBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x68, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x68, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x68, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x68, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x68, + }}, + {as: AVPUNPCKHDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6A, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6A, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x6A, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x6A, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x6A, + }}, + {as: AVPUNPCKHQDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6D, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6D, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x6D, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x6D, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x6D, + }}, + {as: AVPUNPCKHWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x69, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x69, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x69, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x69, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x69, + }}, + {as: AVPUNPCKLBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x60, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x60, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x60, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x60, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x60, + }}, + {as: AVPUNPCKLDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x62, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x62, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x62, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x62, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x62, + }}, + {as: AVPUNPCKLQDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6C, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6C, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x6C, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x6C, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x6C, + }}, + {as: AVPUNPCKLWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x61, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x61, + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x61, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x61, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x61, + }}, + {as: AVPXOR, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEF, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEF, + }}, + {as: AVPXORD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xEF, + avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xEF, + avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xEF, + }}, + {as: AVPXORQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xEF, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xEF, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xEF, + }}, + {as: AVRANGEPD, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x50, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x50, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x50, + }}, + {as: AVRANGEPS, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x50, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x50, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x50, + }}, + {as: AVRANGESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x51, + }}, + {as: AVRANGESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x51, + }}, + {as: AVRCP14PD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x4C, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x4C, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x4C, + }}, + {as: AVRCP14PS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x4C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x4C, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x4C, + }}, + {as: AVRCP14SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x4D, + }}, + {as: AVRCP14SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x4D, + }}, + {as: AVRCP28PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xCA, + }}, + {as: AVRCP28PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xCA, + }}, + {as: AVRCP28SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0xCB, + }}, + {as: AVRCP28SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0xCB, + }}, + {as: AVRCPPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x53, + avxEscape | vex256 | vex0F | vexW0, 0x53, + }}, + {as: AVRCPSS, ytab: _yvrcpss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x53, + }}, + {as: AVREDUCEPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x56, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x56, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x56, + }}, + {as: AVREDUCEPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x56, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x56, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x56, + }}, + {as: AVREDUCESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x57, + }}, + {as: AVREDUCESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x57, + }}, + {as: AVRNDSCALEPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x09, + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x09, + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x09, + }}, + {as: AVRNDSCALEPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x08, + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x08, + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x08, + }}, + {as: AVRNDSCALESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x0B, + }}, + {as: AVRNDSCALESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x0A, + }}, + {as: AVROUNDPD, ytab: _yvroundpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x09, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x09, + }}, + {as: AVROUNDPS, ytab: _yvroundpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x08, + avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x08, + }}, + {as: AVROUNDSD, ytab: _yvdppd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0B, + }}, + {as: AVROUNDSS, ytab: _yvdppd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0A, + }}, + {as: AVRSQRT14PD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x4E, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x4E, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x4E, + }}, + {as: AVRSQRT14PS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x4E, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x4E, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x4E, + }}, + {as: AVRSQRT14SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x4F, + }}, + {as: AVRSQRT14SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x4F, + }}, + {as: AVRSQRT28PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xCC, + }}, + {as: AVRSQRT28PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xCC, + }}, + {as: AVRSQRT28SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0xCD, + }}, + {as: AVRSQRT28SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0xCD, + }}, + {as: AVRSQRTPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x52, + avxEscape | vex256 | vex0F | vexW0, 0x52, + }}, + {as: AVRSQRTSS, ytab: _yvrcpss, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x52, + }}, + {as: AVSCALEFPD, ytab: _yvscalefpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x2C, + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x2C, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x2C, + }}, + {as: AVSCALEFPS, ytab: _yvscalefpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x2C, + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x2C, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x2C, + }}, + {as: AVSCALEFSD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x2D, + }}, + {as: AVSCALEFSS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x2D, + }}, + {as: AVSCATTERDPD, ytab: _yvpscatterdq, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, + }}, + {as: AVSCATTERDPS, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, + }}, + {as: AVSCATTERPF0DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 05, + }}, + {as: AVSCATTERPF0DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 05, + }}, + {as: AVSCATTERPF0QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 05, + }}, + {as: AVSCATTERPF0QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 05, + }}, + {as: AVSCATTERPF1DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 06, + }}, + {as: AVSCATTERPF1DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 06, + }}, + {as: AVSCATTERPF1QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 06, + }}, + {as: AVSCATTERPF1QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 06, + }}, + {as: AVSCATTERQPD, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, + avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, + avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, + }}, + {as: AVSCATTERQPS, ytab: _yvpscatterqd, prefix: Pavx, op: opBytes{ + avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, + avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, + avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, + }}, + {as: AVSHUFF32X4, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x23, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x23, + }}, + {as: AVSHUFF64X2, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x23, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x23, + }}, + {as: AVSHUFI32X4, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x43, + avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x43, + }}, + {as: AVSHUFI64X2, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ + avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x43, + avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x43, + }}, + {as: AVSHUFPD, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC6, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0xC6, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xC6, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xC6, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xC6, + }}, + {as: AVSHUFPS, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0xC6, + avxEscape | vex256 | vex0F | vexW0, 0xC6, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xC6, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xC6, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xC6, + }}, + {as: AVSQRTPD, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x51, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x51, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x51, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x51, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x51, + }}, + {as: AVSQRTPS, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x51, + avxEscape | vex256 | vex0F | vexW0, 0x51, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x51, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x51, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x51, + }}, + {as: AVSQRTSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x51, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x51, + }}, + {as: AVSQRTSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x51, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x51, + }}, + {as: AVSTMXCSR, ytab: _yvldmxcsr, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0xAE, 03, + }}, + {as: AVSUBPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5C, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5C, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5C, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5C, + }}, + {as: AVSUBPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x5C, + avxEscape | vex256 | vex0F | vexW0, 0x5C, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5C, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5C, + }}, + {as: AVSUBSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5C, + avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, + }}, + {as: AVSUBSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5C, + avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, + }}, + {as: AVTESTPD, ytab: _yvptest, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0F, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0F, + }}, + {as: AVTESTPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0E, + avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0E, + }}, + {as: AVUCOMISD, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2E, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2E, + }}, + {as: AVUCOMISS, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x2E, + avxEscape | evex128 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2E, + }}, + {as: AVUNPCKHPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x15, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x15, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x15, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x15, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x15, + }}, + {as: AVUNPCKHPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x15, + avxEscape | vex256 | vex0F | vexW0, 0x15, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x15, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x15, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x15, + }}, + {as: AVUNPCKLPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x14, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x14, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x14, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x14, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x14, + }}, + {as: AVUNPCKLPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x14, + avxEscape | vex256 | vex0F | vexW0, 0x14, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x14, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x14, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x14, + }}, + {as: AVXORPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex66 | vex0F | vexW0, 0x57, + avxEscape | vex256 | vex66 | vex0F | vexW0, 0x57, + avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x57, + avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x57, + avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x57, + }}, + {as: AVXORPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x57, + avxEscape | vex256 | vex0F | vexW0, 0x57, + avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x57, + avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x57, + avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x57, + }}, + {as: AVZEROALL, ytab: _yvzeroall, prefix: Pavx, op: opBytes{ + avxEscape | vex256 | vex0F | vexW0, 0x77, + }}, + {as: AVZEROUPPER, ytab: _yvzeroall, prefix: Pavx, op: opBytes{ + avxEscape | vex128 | vex0F | vexW0, 0x77, + }}, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/evex.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/evex.go new file mode 100644 index 0000000000000000000000000000000000000000..aa93cd8819ac20c8391ac7af8daf4db577b9bb02 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/evex.go @@ -0,0 +1,383 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "cmd/internal/obj" + "errors" + "fmt" + "strings" +) + +// evexBits stores EVEX prefix info that is used during instruction encoding. +type evexBits struct { + b1 byte // [W1mmLLpp] + b2 byte // [NNNbbZRS] + + // Associated instruction opcode. + opcode byte +} + +// newEVEXBits creates evexBits object from enc bytes at z position. +func newEVEXBits(z int, enc *opBytes) evexBits { + return evexBits{ + b1: enc[z+0], + b2: enc[z+1], + opcode: enc[z+2], + } +} + +// P returns EVEX.pp value. +func (evex evexBits) P() byte { return (evex.b1 & evexP) >> 0 } + +// L returns EVEX.L'L value. +func (evex evexBits) L() byte { return (evex.b1 & evexL) >> 2 } + +// M returns EVEX.mm value. +func (evex evexBits) M() byte { return (evex.b1 & evexM) >> 4 } + +// W returns EVEX.W value. +func (evex evexBits) W() byte { return (evex.b1 & evexW) >> 7 } + +// BroadcastEnabled reports whether BCST suffix is permitted. +func (evex evexBits) BroadcastEnabled() bool { + return evex.b2&evexBcst != 0 +} + +// ZeroingEnabled reports whether Z suffix is permitted. +func (evex evexBits) ZeroingEnabled() bool { + return (evex.b2&evexZeroing)>>2 != 0 +} + +// RoundingEnabled reports whether RN_SAE, RZ_SAE, RD_SAE and RU_SAE suffixes +// are permitted. +func (evex evexBits) RoundingEnabled() bool { + return (evex.b2&evexRounding)>>1 != 0 +} + +// SaeEnabled reports whether SAE suffix is permitted. +func (evex evexBits) SaeEnabled() bool { + return (evex.b2&evexSae)>>0 != 0 +} + +// DispMultiplier returns displacement multiplier that is calculated +// based on tuple type, EVEX.W and input size. +// If embedded broadcast is used, bcst should be true. +func (evex evexBits) DispMultiplier(bcst bool) int32 { + if bcst { + switch evex.b2 & evexBcst { + case evexBcstN4: + return 4 + case evexBcstN8: + return 8 + } + return 1 + } + + switch evex.b2 & evexN { + case evexN1: + return 1 + case evexN2: + return 2 + case evexN4: + return 4 + case evexN8: + return 8 + case evexN16: + return 16 + case evexN32: + return 32 + case evexN64: + return 64 + case evexN128: + return 128 + } + return 1 +} + +// EVEX is described by using 2-byte sequence. +// See evexBits for more details. +const ( + evexW = 0x80 // b1[W... ....] + evexWIG = 0 << 7 + evexW0 = 0 << 7 + evexW1 = 1 << 7 + + evexM = 0x30 // b2[..mm ...] + evex0F = 1 << 4 + evex0F38 = 2 << 4 + evex0F3A = 3 << 4 + + evexL = 0x0C // b1[.... LL..] + evexLIG = 0 << 2 + evex128 = 0 << 2 + evex256 = 1 << 2 + evex512 = 2 << 2 + + evexP = 0x03 // b1[.... ..pp] + evex66 = 1 << 0 + evexF3 = 2 << 0 + evexF2 = 3 << 0 + + // Precalculated Disp8 N value. + // N acts like a multiplier for 8bit displacement. + // Note that some N are not used, but their bits are reserved. + evexN = 0xE0 // b2[NNN. ....] + evexN1 = 0 << 5 + evexN2 = 1 << 5 + evexN4 = 2 << 5 + evexN8 = 3 << 5 + evexN16 = 4 << 5 + evexN32 = 5 << 5 + evexN64 = 6 << 5 + evexN128 = 7 << 5 + + // Disp8 for broadcasts. + evexBcst = 0x18 // b2[...b b...] + evexBcstN4 = 1 << 3 + evexBcstN8 = 2 << 3 + + // Flags that permit certain AVX512 features. + // It's semantically illegal to combine evexZeroing and evexSae. + evexZeroing = 0x4 // b2[.... .Z..] + evexZeroingEnabled = 1 << 2 + evexRounding = 0x2 // b2[.... ..R.] + evexRoundingEnabled = 1 << 1 + evexSae = 0x1 // b2[.... ...S] + evexSaeEnabled = 1 << 0 +) + +// compressedDisp8 calculates EVEX compressed displacement, if applicable. +func compressedDisp8(disp, elemSize int32) (disp8 byte, ok bool) { + if disp%elemSize == 0 { + v := disp / elemSize + if v >= -128 && v <= 127 { + return byte(v), true + } + } + return 0, false +} + +// evexZcase reports whether given Z-case belongs to EVEX group. +func evexZcase(zcase uint8) bool { + return zcase > Zevex_first && zcase < Zevex_last +} + +// evexSuffixBits carries instruction EVEX suffix set flags. +// +// Examples: +// +// "RU_SAE.Z" => {rounding: 3, zeroing: true} +// "Z" => {zeroing: true} +// "BCST" => {broadcast: true} +// "SAE.Z" => {sae: true, zeroing: true} +type evexSuffix struct { + rounding byte + sae bool + zeroing bool + broadcast bool +} + +// Rounding control values. +// Match exact value for EVEX.L'L field (with exception of rcUnset). +const ( + rcRNSAE = 0 // Round towards nearest + rcRDSAE = 1 // Round towards -Inf + rcRUSAE = 2 // Round towards +Inf + rcRZSAE = 3 // Round towards zero + rcUnset = 4 +) + +// newEVEXSuffix returns proper zero value for evexSuffix. +func newEVEXSuffix() evexSuffix { + return evexSuffix{rounding: rcUnset} +} + +// evexSuffixMap maps obj.X86suffix to its decoded version. +// Filled during init(). +var evexSuffixMap [255]evexSuffix + +func init() { + // Decode all valid suffixes for later use. + for i := range opSuffixTable { + suffix := newEVEXSuffix() + parts := strings.Split(opSuffixTable[i], ".") + for j := range parts { + switch parts[j] { + case "Z": + suffix.zeroing = true + case "BCST": + suffix.broadcast = true + case "SAE": + suffix.sae = true + + case "RN_SAE": + suffix.rounding = rcRNSAE + case "RD_SAE": + suffix.rounding = rcRDSAE + case "RU_SAE": + suffix.rounding = rcRUSAE + case "RZ_SAE": + suffix.rounding = rcRZSAE + } + } + evexSuffixMap[i] = suffix + } +} + +// toDisp8 tries to convert disp to proper 8-bit displacement value. +func toDisp8(disp int32, p *obj.Prog, asmbuf *AsmBuf) (disp8 byte, ok bool) { + if asmbuf.evexflag { + bcst := evexSuffixMap[p.Scond].broadcast + elemSize := asmbuf.evex.DispMultiplier(bcst) + return compressedDisp8(disp, elemSize) + } + return byte(disp), disp >= -128 && disp < 128 +} + +// EncodeRegisterRange packs [reg0-reg1] list into 64-bit value that +// is intended to be stored inside obj.Addr.Offset with TYPE_REGLIST. +func EncodeRegisterRange(reg0, reg1 int16) int64 { + return (int64(reg0) << 0) | + (int64(reg1) << 16) | + obj.RegListX86Lo +} + +// decodeRegisterRange unpacks [reg0-reg1] list from 64-bit value created by EncodeRegisterRange. +func decodeRegisterRange(list int64) (reg0, reg1 int) { + return int((list >> 0) & 0xFFFF), + int((list >> 16) & 0xFFFF) +} + +// ParseSuffix handles the special suffix for the 386/AMD64. +// Suffix bits are stored into p.Scond. +// +// Leading "." in cond is ignored. +func ParseSuffix(p *obj.Prog, cond string) error { + cond = strings.TrimPrefix(cond, ".") + + suffix := newOpSuffix(cond) + if !suffix.IsValid() { + return inferSuffixError(cond) + } + + p.Scond = uint8(suffix) + return nil +} + +// inferSuffixError returns non-nil error that describes what could be +// the cause of suffix parse failure. +// +// At the point this function is executed there is already assembly error, +// so we can burn some clocks to construct good error message. +// +// Reported issues: +// - duplicated suffixes +// - illegal rounding/SAE+broadcast combinations +// - unknown suffixes +// - misplaced suffix (e.g. wrong Z suffix position) +func inferSuffixError(cond string) error { + suffixSet := make(map[string]bool) // Set for duplicates detection. + unknownSet := make(map[string]bool) // Set of unknown suffixes. + hasBcst := false + hasRoundSae := false + var msg []string // Error message parts + + suffixes := strings.Split(cond, ".") + for i, suffix := range suffixes { + switch suffix { + case "Z": + if i != len(suffixes)-1 { + msg = append(msg, "Z suffix should be the last") + } + case "BCST": + hasBcst = true + case "SAE", "RN_SAE", "RZ_SAE", "RD_SAE", "RU_SAE": + hasRoundSae = true + default: + if !unknownSet[suffix] { + msg = append(msg, fmt.Sprintf("unknown suffix %q", suffix)) + } + unknownSet[suffix] = true + } + + if suffixSet[suffix] { + msg = append(msg, fmt.Sprintf("duplicate suffix %q", suffix)) + } + suffixSet[suffix] = true + } + + if hasBcst && hasRoundSae { + msg = append(msg, "can't combine rounding/SAE and broadcast") + } + + if len(msg) == 0 { + return errors.New("bad suffix combination") + } + return errors.New(strings.Join(msg, "; ")) +} + +// opSuffixTable is a complete list of possible opcode suffix combinations. +// It "maps" uint8 suffix bits to their string representation. +// With the exception of first and last elements, order is not important. +var opSuffixTable = [...]string{ + "", // Map empty suffix to empty string. + + "Z", + + "SAE", + "SAE.Z", + + "RN_SAE", + "RZ_SAE", + "RD_SAE", + "RU_SAE", + "RN_SAE.Z", + "RZ_SAE.Z", + "RD_SAE.Z", + "RU_SAE.Z", + + "BCST", + "BCST.Z", + + "", +} + +// opSuffix represents instruction opcode suffix. +// Compound (multi-part) suffixes expressed with single opSuffix value. +// +// uint8 type is used to fit obj.Prog.Scond. +type opSuffix uint8 + +// badOpSuffix is used to represent all invalid suffix combinations. +const badOpSuffix = opSuffix(len(opSuffixTable) - 1) + +// newOpSuffix returns opSuffix object that matches suffixes string. +// +// If no matching suffix is found, special "invalid" suffix is returned. +// Use IsValid method to check against this case. +func newOpSuffix(suffixes string) opSuffix { + for i := range opSuffixTable { + if opSuffixTable[i] == suffixes { + return opSuffix(i) + } + } + return badOpSuffix +} + +// IsValid reports whether suffix is valid. +// Empty suffixes are valid. +func (suffix opSuffix) IsValid() bool { + return suffix != badOpSuffix +} + +// String returns suffix printed representation. +// +// It matches the string that was used to create suffix with NewX86Suffix() +// for valid suffixes. +// For all invalid suffixes, special marker is returned. +func (suffix opSuffix) String() string { + return opSuffixTable[suffix] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/list6.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/list6.go new file mode 100644 index 0000000000000000000000000000000000000000..60280312c9bd789224daf9181011ede3375f79e3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/list6.go @@ -0,0 +1,264 @@ +// Inferno utils/6c/list.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/list.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import ( + "cmd/internal/obj" + "fmt" +) + +var Register = []string{ + "AL", // [D_AL] + "CL", + "DL", + "BL", + "SPB", + "BPB", + "SIB", + "DIB", + "R8B", + "R9B", + "R10B", + "R11B", + "R12B", + "R13B", + "R14B", + "R15B", + "AX", // [D_AX] + "CX", + "DX", + "BX", + "SP", + "BP", + "SI", + "DI", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "AH", + "CH", + "DH", + "BH", + "F0", // [D_F0] + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "M0", + "M1", + "M2", + "M3", + "M4", + "M5", + "M6", + "M7", + "K0", + "K1", + "K2", + "K3", + "K4", + "K5", + "K6", + "K7", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + "X8", + "X9", + "X10", + "X11", + "X12", + "X13", + "X14", + "X15", + "X16", + "X17", + "X18", + "X19", + "X20", + "X21", + "X22", + "X23", + "X24", + "X25", + "X26", + "X27", + "X28", + "X29", + "X30", + "X31", + "Y0", + "Y1", + "Y2", + "Y3", + "Y4", + "Y5", + "Y6", + "Y7", + "Y8", + "Y9", + "Y10", + "Y11", + "Y12", + "Y13", + "Y14", + "Y15", + "Y16", + "Y17", + "Y18", + "Y19", + "Y20", + "Y21", + "Y22", + "Y23", + "Y24", + "Y25", + "Y26", + "Y27", + "Y28", + "Y29", + "Y30", + "Y31", + "Z0", + "Z1", + "Z2", + "Z3", + "Z4", + "Z5", + "Z6", + "Z7", + "Z8", + "Z9", + "Z10", + "Z11", + "Z12", + "Z13", + "Z14", + "Z15", + "Z16", + "Z17", + "Z18", + "Z19", + "Z20", + "Z21", + "Z22", + "Z23", + "Z24", + "Z25", + "Z26", + "Z27", + "Z28", + "Z29", + "Z30", + "Z31", + "CS", // [D_CS] + "SS", + "DS", + "ES", + "FS", + "GS", + "GDTR", // [D_GDTR] + "IDTR", // [D_IDTR] + "LDTR", // [D_LDTR] + "MSW", // [D_MSW] + "TASK", // [D_TASK] + "CR0", // [D_CR] + "CR1", + "CR2", + "CR3", + "CR4", + "CR5", + "CR6", + "CR7", + "CR8", + "CR9", + "CR10", + "CR11", + "CR12", + "CR13", + "CR14", + "CR15", + "DR0", // [D_DR] + "DR1", + "DR2", + "DR3", + "DR4", + "DR5", + "DR6", + "DR7", + "TR0", // [D_TR] + "TR1", + "TR2", + "TR3", + "TR4", + "TR5", + "TR6", + "TR7", + "TLS", // [D_TLS] + "MAXREG", // [MAXREG] +} + +func init() { + obj.RegisterRegister(REG_AL, REG_AL+len(Register), rconv) + obj.RegisterOpcode(obj.ABaseAMD64, Anames) + obj.RegisterRegisterList(obj.RegListX86Lo, obj.RegListX86Hi, rlconv) + obj.RegisterOpSuffix("386", opSuffixString) + obj.RegisterOpSuffix("amd64", opSuffixString) +} + +func rconv(r int) string { + if REG_AL <= r && r-REG_AL < len(Register) { + return Register[r-REG_AL] + } + return fmt.Sprintf("Rgok(%d)", r-obj.RBaseAMD64) +} + +func rlconv(bits int64) string { + reg0, reg1 := decodeRegisterRange(bits) + return fmt.Sprintf("[%s-%s]", rconv(reg0), rconv(reg1)) +} + +func opSuffixString(s uint8) string { + return "." + opSuffix(s).String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6.go new file mode 100644 index 0000000000000000000000000000000000000000..e6ea8985e4f78c326a2df0c6b015db0d963ed083 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6.go @@ -0,0 +1,1546 @@ +// Inferno utils/6l/pass.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/pass.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "cmd/internal/sys" + "internal/abi" + "log" + "math" + "path" + "strings" +) + +func CanUse1InsnTLS(ctxt *obj.Link) bool { + if isAndroid { + // Android uses a global variable for the tls offset. + return false + } + + if ctxt.Arch.Family == sys.I386 { + switch ctxt.Headtype { + case objabi.Hlinux, + objabi.Hplan9, + objabi.Hwindows: + return false + } + + return true + } + + switch ctxt.Headtype { + case objabi.Hplan9, objabi.Hwindows: + return false + case objabi.Hlinux, objabi.Hfreebsd: + return !ctxt.Flag_shared + } + + return true +} + +func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { + // Thread-local storage references use the TLS pseudo-register. + // As a register, TLS refers to the thread-local storage base, and it + // can only be loaded into another register: + // + // MOVQ TLS, AX + // + // An offset from the thread-local storage base is written off(reg)(TLS*1). + // Semantically it is off(reg), but the (TLS*1) annotation marks this as + // indexing from the loaded TLS base. This emits a relocation so that + // if the linker needs to adjust the offset, it can. For example: + // + // MOVQ TLS, AX + // MOVQ 0(AX)(TLS*1), CX // load g into CX + // + // On systems that support direct access to the TLS memory, this + // pair of instructions can be reduced to a direct TLS memory reference: + // + // MOVQ 0(TLS), CX // load g into CX + // + // The 2-instruction and 1-instruction forms correspond to the two code + // sequences for loading a TLS variable in the local exec model given in "ELF + // Handling For Thread-Local Storage". + // + // We apply this rewrite on systems that support the 1-instruction form. + // The decision is made using only the operating system and the -shared flag, + // not the link mode. If some link modes on a particular operating system + // require the 2-instruction form, then all builds for that operating system + // will use the 2-instruction form, so that the link mode decision can be + // delayed to link time. + // + // In this way, all supported systems use identical instructions to + // access TLS, and they are rewritten appropriately first here in + // liblink and then finally using relocations in the linker. + // + // When -shared is passed, we leave the code in the 2-instruction form but + // assemble (and relocate) them in different ways to generate the initial + // exec code sequence. It's a bit of a fluke that this is possible without + // rewriting the instructions more comprehensively, and it only does because + // we only support a single TLS variable (g). + + if CanUse1InsnTLS(ctxt) { + // Reduce 2-instruction sequence to 1-instruction sequence. + // Sequences like + // MOVQ TLS, BX + // ... off(BX)(TLS*1) ... + // become + // NOP + // ... off(TLS) ... + // + // TODO(rsc): Remove the Hsolaris special case. It exists only to + // guarantee we are producing byte-identical binaries as before this code. + // But it should be unnecessary. + if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != objabi.Hsolaris { + obj.Nopout(p) + } + if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { + p.From.Reg = REG_TLS + p.From.Scale = 0 + p.From.Index = REG_NONE + } + + if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { + p.To.Reg = REG_TLS + p.To.Scale = 0 + p.To.Index = REG_NONE + } + } else { + // load_g, below, always inserts the 1-instruction sequence. Rewrite it + // as the 2-instruction sequence if necessary. + // MOVQ 0(TLS), BX + // becomes + // MOVQ TLS, BX + // MOVQ 0(BX)(TLS*1), BX + if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { + q := obj.Appendp(p, newprog) + q.As = p.As + q.From = p.From + q.From.Type = obj.TYPE_MEM + q.From.Reg = p.To.Reg + q.From.Index = REG_TLS + q.From.Scale = 2 // TODO: use 1 + q.To = p.To + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_TLS + p.From.Index = REG_NONE + p.From.Offset = 0 + } + } + + // Android and Windows use a tls offset determined at runtime. Rewrite + // MOVQ TLS, BX + // to + // MOVQ runtime.tls_g(SB), BX + if (isAndroid || ctxt.Headtype == objabi.Hwindows) && + (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Reg = REG_NONE + p.From.Sym = ctxt.Lookup("runtime.tls_g") + p.From.Index = REG_NONE + if ctxt.Headtype == objabi.Hwindows { + // Windows requires an additional indirection + // to retrieve the TLS pointer, + // as runtime.tls_g contains the TLS offset from GS or FS. + // on AMD64 add + // MOVQ 0(BX)(GS*1), BX + // on 386 add + // MOVQ 0(BX)(FS*1), BX4 + q := obj.Appendp(p, newprog) + q.As = p.As + q.From = obj.Addr{} + q.From.Type = obj.TYPE_MEM + q.From.Reg = p.To.Reg + if ctxt.Arch.Family == sys.AMD64 { + q.From.Index = REG_GS + } else { + q.From.Index = REG_FS + } + q.From.Scale = 1 + q.From.Offset = 0 + q.To = p.To + } + } + + // TODO: Remove. + if ctxt.Headtype == objabi.Hwindows && ctxt.Arch.Family == sys.AMD64 || ctxt.Headtype == objabi.Hplan9 { + if p.From.Scale == 1 && p.From.Index == REG_TLS { + p.From.Scale = 2 + } + if p.To.Scale == 1 && p.To.Index == REG_TLS { + p.To.Scale = 2 + } + } + + // Rewrite 0 to $0 in 3rd argument to CMPPS etc. + // That's what the tables expect. + switch p.As { + case ACMPPD, ACMPPS, ACMPSD, ACMPSS: + if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil { + p.To.Type = obj.TYPE_CONST + } + } + + // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. + switch p.As { + case obj.ACALL, obj.AJMP, obj.ARET: + if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { + p.To.Type = obj.TYPE_BRANCH + } + } + + // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ. + if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Family == sys.AMD64 || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) { + switch p.As { + case AMOVL: + p.As = ALEAL + p.From.Type = obj.TYPE_MEM + case AMOVQ: + p.As = ALEAQ + p.From.Type = obj.TYPE_MEM + } + } + + // Rewrite float constants to values stored in memory. + switch p.As { + // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx + case AMOVSS: + if p.From.Type == obj.TYPE_FCONST { + // f == 0 can't be used here due to -0, so use Float64bits + if f := p.From.Val.(float64); math.Float64bits(f) == 0 { + if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { + p.As = AXORPS + p.From = p.To + break + } + } + } + fallthrough + + case AFMOVF, + AFADDF, + AFSUBF, + AFSUBRF, + AFMULF, + AFDIVF, + AFDIVRF, + AFCOMF, + AFCOMFP, + AADDSS, + ASUBSS, + AMULSS, + ADIVSS, + ACOMISS, + AUCOMISS: + if p.From.Type == obj.TYPE_FCONST { + f32 := float32(p.From.Val.(float64)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Sym = ctxt.Float32Sym(f32) + p.From.Offset = 0 + } + + case AMOVSD: + // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx + if p.From.Type == obj.TYPE_FCONST { + // f == 0 can't be used here due to -0, so use Float64bits + if f := p.From.Val.(float64); math.Float64bits(f) == 0 { + if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { + p.As = AXORPS + p.From = p.To + break + } + } + } + fallthrough + + case AFMOVD, + AFADDD, + AFSUBD, + AFSUBRD, + AFMULD, + AFDIVD, + AFDIVRD, + AFCOMD, + AFCOMDP, + AADDSD, + ASUBSD, + AMULSD, + ADIVSD, + ACOMISD, + AUCOMISD: + if p.From.Type == obj.TYPE_FCONST { + f64 := p.From.Val.(float64) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Sym = ctxt.Float64Sym(f64) + p.From.Offset = 0 + } + } + + if ctxt.Flag_dynlink { + rewriteToUseGot(ctxt, p, newprog) + } + + if ctxt.Flag_shared && ctxt.Arch.Family == sys.I386 { + rewriteToPcrel(ctxt, p, newprog) + } +} + +// Rewrite p, if necessary, to access global data via the global offset table. +func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { + var lea, mov obj.As + var reg int16 + if ctxt.Arch.Family == sys.AMD64 { + lea = ALEAQ + mov = AMOVQ + reg = REG_R15 + } else { + lea = ALEAL + mov = AMOVL + reg = REG_CX + if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { + // Special case: clobber the destination register with + // the PC so we don't have to clobber CX. + // The SSA backend depends on CX not being clobbered across LEAL. + // See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared). + reg = p.To.Reg + } + } + + if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { + // ADUFFxxx $offset + // becomes + // $MOV runtime.duffxxx@GOT, $reg + // $LEA $offset($reg), $reg + // CALL $reg + // (we use LEAx rather than ADDx because ADDx clobbers + // flags and duffzero on 386 does not otherwise do so). + var sym *obj.LSym + if p.As == obj.ADUFFZERO { + sym = ctxt.LookupABI("runtime.duffzero", obj.ABIInternal) + } else { + sym = ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal) + } + offset := p.To.Offset + p.As = mov + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_GOTREF + p.From.Sym = sym + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + p.To.Offset = 0 + p.To.Sym = nil + p1 := obj.Appendp(p, newprog) + p1.As = lea + p1.From.Type = obj.TYPE_MEM + p1.From.Offset = offset + p1.From.Reg = reg + p1.To.Type = obj.TYPE_REG + p1.To.Reg = reg + p2 := obj.Appendp(p1, newprog) + p2.As = obj.ACALL + p2.To.Type = obj.TYPE_REG + p2.To.Reg = reg + } + + // We only care about global data: NAME_EXTERN means a global + // symbol in the Go sense, and p.Sym.Local is true for a few + // internally defined symbols. + if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below + p.As = mov + p.From.Type = obj.TYPE_ADDR + } + if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + // $MOV $sym, Rx becomes $MOV sym@GOT, Rx + // $MOV $sym+, Rx becomes $MOV sym@GOT, Rx; $LEA (Rx), Rx + // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX + cmplxdest := false + pAs := p.As + var dest obj.Addr + if p.To.Type != obj.TYPE_REG || pAs != mov { + if ctxt.Arch.Family == sys.AMD64 { + ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) + } + cmplxdest = true + dest = p.To + p.As = mov + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + p.To.Sym = nil + p.To.Name = obj.NAME_NONE + } + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_GOTREF + q := p + if p.From.Offset != 0 { + q = obj.Appendp(p, newprog) + q.As = lea + q.From.Type = obj.TYPE_MEM + q.From.Reg = p.To.Reg + q.From.Offset = p.From.Offset + q.To = p.To + p.From.Offset = 0 + } + if cmplxdest { + q = obj.Appendp(q, newprog) + q.As = pAs + q.To = dest + q.From.Type = obj.TYPE_REG + q.From.Reg = reg + } + } + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { + ctxt.Diag("don't know how to handle %v with -dynlink", p) + } + var source *obj.Addr + // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry + // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15) + // An addition may be inserted between the two MOVs if there is an offset. + if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) + } + source = &p.From + } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + source = &p.To + } else { + return + } + if p.As == obj.ACALL { + // When dynlinking on 386, almost any call might end up being a call + // to a PLT, so make sure the GOT pointer is loaded into BX. + // RegTo2 is set on the replacement call insn to stop it being + // processed when it is in turn passed to progedit. + // + // We disable open-coded defers in buildssa() on 386 ONLY with shared + // libraries because of this extra code added before deferreturn calls. + if ctxt.Arch.Family == sys.AMD64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 { + return + } + p1 := obj.Appendp(p, newprog) + p2 := obj.Appendp(p1, newprog) + + p1.As = ALEAL + p1.From.Type = obj.TYPE_MEM + p1.From.Name = obj.NAME_STATIC + p1.From.Sym = ctxt.Lookup("_GLOBAL_OFFSET_TABLE_") + p1.To.Type = obj.TYPE_REG + p1.To.Reg = REG_BX + + p2.As = p.As + p2.Scond = p.Scond + p2.From = p.From + if p.RestArgs != nil { + p2.RestArgs = append(p2.RestArgs, p.RestArgs...) + } + p2.Reg = p.Reg + p2.To = p.To + // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr + // in ../pass.go complain, so set it back to TYPE_MEM here, until p2 + // itself gets passed to progedit. + p2.To.Type = obj.TYPE_MEM + p2.RegTo2 = 1 + + obj.Nopout(p) + return + + } + if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { + return + } + if source.Type != obj.TYPE_MEM { + ctxt.Diag("don't know how to handle %v with -dynlink", p) + } + p1 := obj.Appendp(p, newprog) + p2 := obj.Appendp(p1, newprog) + + p1.As = mov + p1.From.Type = obj.TYPE_MEM + p1.From.Sym = source.Sym + p1.From.Name = obj.NAME_GOTREF + p1.To.Type = obj.TYPE_REG + p1.To.Reg = reg + + p2.As = p.As + p2.From = p.From + p2.To = p.To + if from3 := p.GetFrom3(); from3 != nil { + p2.AddRestSource(*from3) + } + if p.From.Name == obj.NAME_EXTERN { + p2.From.Reg = reg + p2.From.Name = obj.NAME_NONE + p2.From.Sym = nil + } else if p.To.Name == obj.NAME_EXTERN { + p2.To.Reg = reg + p2.To.Name = obj.NAME_NONE + p2.To.Sym = nil + } else { + return + } + obj.Nopout(p) +} + +func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { + // RegTo2 is set on the instructions we insert here so they don't get + // processed twice. + if p.RegTo2 != 0 { + return + } + if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { + return + } + // Any Prog (aside from the above special cases) with an Addr with Name == + // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX + // inserted before it. + isName := func(a *obj.Addr) bool { + if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { + return false + } + if a.Sym.Type == objabi.STLSBSS { + return false + } + return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF + } + + if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { + // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting + // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" + // respectively. + if p.To.Type != obj.TYPE_REG { + q := obj.Appendp(p, newprog) + q.As = p.As + q.From.Type = obj.TYPE_REG + q.From.Reg = REG_CX + q.To = p.To + p.As = AMOVL + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_CX + p.To.Sym = nil + p.To.Name = obj.NAME_NONE + } + } + + if !isName(&p.From) && !isName(&p.To) && (p.GetFrom3() == nil || !isName(p.GetFrom3())) { + return + } + var dst int16 = REG_CX + if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { + dst = p.To.Reg + // Why? See the comment near the top of rewriteToUseGot above. + // AMOVLs might be introduced by the GOT rewrites. + } + q := obj.Appendp(p, newprog) + q.RegTo2 = 1 + r := obj.Appendp(q, newprog) + r.RegTo2 = 1 + q.As = obj.ACALL + thunkname := "__x86.get_pc_thunk." + strings.ToLower(rconv(int(dst))) + q.To.Sym = ctxt.LookupInit(thunkname, func(s *obj.LSym) { s.Set(obj.AttrLocal, true) }) + q.To.Type = obj.TYPE_MEM + q.To.Name = obj.NAME_EXTERN + r.As = p.As + r.Scond = p.Scond + r.From = p.From + r.RestArgs = p.RestArgs + r.Reg = p.Reg + r.To = p.To + if isName(&p.From) { + r.From.Reg = dst + } + if isName(&p.To) { + r.To.Reg = dst + } + if p.GetFrom3() != nil && isName(p.GetFrom3()) { + r.GetFrom3().Reg = dst + } + obj.Nopout(p) +} + +// Prog.mark +const ( + markBit = 1 << 0 // used in errorCheck to avoid duplicate work +) + +func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + if cursym.Func().Text == nil || cursym.Func().Text.Link == nil { + return + } + + p := cursym.Func().Text + autoffset := int32(p.To.Offset) + if autoffset < 0 { + autoffset = 0 + } + + hasCall := false + for q := p; q != nil; q = q.Link { + if q.As == obj.ACALL || q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO { + hasCall = true + break + } + } + + var bpsize int + if ctxt.Arch.Family == sys.AMD64 && + !p.From.Sym.NoFrame() && // (1) below + !(autoffset == 0 && !hasCall) { // (2) below + // Make room to save a base pointer. + // There are 2 cases we must avoid: + // 1) If noframe is set (which we do for functions which tail call). + // For performance, we also want to avoid: + // 2) Frameless leaf functions + bpsize = ctxt.Arch.PtrSize + autoffset += int32(bpsize) + p.To.Offset += int64(bpsize) + } else { + bpsize = 0 + p.From.Sym.Set(obj.AttrNoFrame, true) + } + + textarg := int64(p.To.Val.(int32)) + cursym.Func().Args = int32(textarg) + cursym.Func().Locals = int32(p.To.Offset) + + // TODO(rsc): Remove. + if ctxt.Arch.Family == sys.I386 && cursym.Func().Locals < 0 { + cursym.Func().Locals = 0 + } + + // TODO(rsc): Remove 'ctxt.Arch.Family == sys.AMD64 &&'. + if ctxt.Arch.Family == sys.AMD64 && autoffset < abi.StackSmall && !p.From.Sym.NoSplit() { + leaf := true + LeafSearch: + for q := p; q != nil; q = q.Link { + switch q.As { + case obj.ACALL: + // Treat common runtime calls that take no arguments + // the same as duffcopy and duffzero. + if !isZeroArgRuntimeCall(q.To.Sym) { + leaf = false + break LeafSearch + } + fallthrough + case obj.ADUFFCOPY, obj.ADUFFZERO: + if autoffset >= abi.StackSmall-8 { + leaf = false + break LeafSearch + } + } + } + + if leaf { + p.From.Sym.Set(obj.AttrNoSplit, true) + } + } + + var regEntryTmp0, regEntryTmp1 int16 + if ctxt.Arch.Family == sys.AMD64 { + regEntryTmp0, regEntryTmp1 = REGENTRYTMP0, REGENTRYTMP1 + } else { + regEntryTmp0, regEntryTmp1 = REG_BX, REG_DI + } + + var regg int16 + if !p.From.Sym.NoSplit() { + // Emit split check and load G register + p, regg = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg)) + } else if p.From.Sym.Wrapper() { + // Load G register for the wrapper code + p, regg = loadG(ctxt, cursym, p, newprog) + } + + if bpsize > 0 { + // Save caller's BP + p = obj.Appendp(p, newprog) + + p.As = APUSHQ + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_BP + + // Move current frame to BP + p = obj.Appendp(p, newprog) + + p.As = AMOVQ + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_BP + } + + if autoffset%int32(ctxt.Arch.RegSize) != 0 { + ctxt.Diag("unaligned stack size %d", autoffset) + } + + // localoffset is autoffset discounting the frame pointer, + // which has already been allocated in the stack. + localoffset := autoffset - int32(bpsize) + if localoffset != 0 { + p = obj.Appendp(p, newprog) + p.As = AADJSP + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(localoffset) + p.Spadj = localoffset + } + + // Delve debugger would like the next instruction to be noted as the end of the function prologue. + // TODO: are there other cases (e.g., wrapper functions) that need marking? + if autoffset != 0 { + p.Pos = p.Pos.WithXlogue(src.PosPrologueEnd) + } + + if cursym.Func().Text.From.Sym.Wrapper() { + // if g._panic != nil && g._panic.argp == FP { + // g._panic.argp = bottom-of-frame + // } + // + // MOVQ g_panic(g), regEntryTmp0 + // TESTQ regEntryTmp0, regEntryTmp0 + // JNE checkargp + // end: + // NOP + // ... rest of function ... + // checkargp: + // LEAQ (autoffset+8)(SP), regEntryTmp1 + // CMPQ panic_argp(regEntryTmp0), regEntryTmp1 + // JNE end + // MOVQ SP, panic_argp(regEntryTmp0) + // JMP end + // + // The NOP is needed to give the jumps somewhere to land. + // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. + // + // The layout is chosen to help static branch prediction: + // Both conditional jumps are unlikely, so they are arranged to be forward jumps. + + // MOVQ g_panic(g), regEntryTmp0 + p = obj.Appendp(p, newprog) + p.As = AMOVQ + p.From.Type = obj.TYPE_MEM + p.From.Reg = regg + p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic + p.To.Type = obj.TYPE_REG + p.To.Reg = regEntryTmp0 + if ctxt.Arch.Family == sys.I386 { + p.As = AMOVL + } + + // TESTQ regEntryTmp0, regEntryTmp0 + p = obj.Appendp(p, newprog) + p.As = ATESTQ + p.From.Type = obj.TYPE_REG + p.From.Reg = regEntryTmp0 + p.To.Type = obj.TYPE_REG + p.To.Reg = regEntryTmp0 + if ctxt.Arch.Family == sys.I386 { + p.As = ATESTL + } + + // JNE checkargp (checkargp to be resolved later) + jne := obj.Appendp(p, newprog) + jne.As = AJNE + jne.To.Type = obj.TYPE_BRANCH + + // end: + // NOP + end := obj.Appendp(jne, newprog) + end.As = obj.ANOP + + // Fast forward to end of function. + var last *obj.Prog + for last = end; last.Link != nil; last = last.Link { + } + + // LEAQ (autoffset+8)(SP), regEntryTmp1 + p = obj.Appendp(last, newprog) + p.As = ALEAQ + p.From.Type = obj.TYPE_MEM + p.From.Reg = REG_SP + p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) + p.To.Type = obj.TYPE_REG + p.To.Reg = regEntryTmp1 + if ctxt.Arch.Family == sys.I386 { + p.As = ALEAL + } + + // Set jne branch target. + jne.To.SetTarget(p) + + // CMPQ panic_argp(regEntryTmp0), regEntryTmp1 + p = obj.Appendp(p, newprog) + p.As = ACMPQ + p.From.Type = obj.TYPE_MEM + p.From.Reg = regEntryTmp0 + p.From.Offset = 0 // Panic.argp + p.To.Type = obj.TYPE_REG + p.To.Reg = regEntryTmp1 + if ctxt.Arch.Family == sys.I386 { + p.As = ACMPL + } + + // JNE end + p = obj.Appendp(p, newprog) + p.As = AJNE + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(end) + + // MOVQ SP, panic_argp(regEntryTmp0) + p = obj.Appendp(p, newprog) + p.As = AMOVQ + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP + p.To.Type = obj.TYPE_MEM + p.To.Reg = regEntryTmp0 + p.To.Offset = 0 // Panic.argp + if ctxt.Arch.Family == sys.I386 { + p.As = AMOVL + } + + // JMP end + p = obj.Appendp(p, newprog) + p.As = obj.AJMP + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(end) + + // Reset p for following code. + p = end + } + + var deltasp int32 + for p = cursym.Func().Text; p != nil; p = p.Link { + pcsize := ctxt.Arch.RegSize + switch p.From.Name { + case obj.NAME_AUTO: + p.From.Offset += int64(deltasp) - int64(bpsize) + case obj.NAME_PARAM: + p.From.Offset += int64(deltasp) + int64(pcsize) + } + if p.GetFrom3() != nil { + switch p.GetFrom3().Name { + case obj.NAME_AUTO: + p.GetFrom3().Offset += int64(deltasp) - int64(bpsize) + case obj.NAME_PARAM: + p.GetFrom3().Offset += int64(deltasp) + int64(pcsize) + } + } + switch p.To.Name { + case obj.NAME_AUTO: + p.To.Offset += int64(deltasp) - int64(bpsize) + case obj.NAME_PARAM: + p.To.Offset += int64(deltasp) + int64(pcsize) + } + + switch p.As { + default: + if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_SP && p.As != ACMPL && p.As != ACMPQ { + f := cursym.Func() + if f.FuncFlag&abi.FuncFlagSPWrite == 0 { + f.FuncFlag |= abi.FuncFlagSPWrite + if ctxt.Debugvlog || !ctxt.IsAsm { + ctxt.Logf("auto-SPWRITE: %s %v\n", cursym.Name, p) + if !ctxt.IsAsm { + ctxt.Diag("invalid auto-SPWRITE in non-assembly") + ctxt.DiagFlush() + log.Fatalf("bad SPWRITE") + } + } + } + } + continue + + case APUSHL, APUSHFL: + deltasp += 4 + p.Spadj = 4 + continue + + case APUSHQ, APUSHFQ: + deltasp += 8 + p.Spadj = 8 + continue + + case APUSHW, APUSHFW: + deltasp += 2 + p.Spadj = 2 + continue + + case APOPL, APOPFL: + deltasp -= 4 + p.Spadj = -4 + continue + + case APOPQ, APOPFQ: + deltasp -= 8 + p.Spadj = -8 + continue + + case APOPW, APOPFW: + deltasp -= 2 + p.Spadj = -2 + continue + + case AADJSP: + p.Spadj = int32(p.From.Offset) + deltasp += int32(p.From.Offset) + continue + + case obj.ARET: + // do nothing + } + + if autoffset != deltasp { + ctxt.Diag("%s: unbalanced PUSH/POP", cursym) + } + + if autoffset != 0 { + to := p.To // Keep To attached to RET for retjmp below + p.To = obj.Addr{} + if localoffset != 0 { + p.As = AADJSP + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(-localoffset) + p.Spadj = -localoffset + p = obj.Appendp(p, newprog) + } + + if bpsize > 0 { + // Restore caller's BP + p.As = APOPQ + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_BP + p.Spadj = -int32(bpsize) + p = obj.Appendp(p, newprog) + } + + p.As = obj.ARET + p.To = to + + // If there are instructions following + // this ARET, they come from a branch + // with the same stackframe, so undo + // the cleanup. + p.Spadj = +autoffset + } + + if p.To.Sym != nil { // retjmp + p.As = obj.AJMP + } + } +} + +func isZeroArgRuntimeCall(s *obj.LSym) bool { + if s == nil { + return false + } + switch s.Name { + case "runtime.panicdivide", "runtime.panicwrap", "runtime.panicshift": + return true + } + if strings.HasPrefix(s.Name, "runtime.panicIndex") || strings.HasPrefix(s.Name, "runtime.panicSlice") { + // These functions do take arguments (in registers), + // but use no stack before they do a stack check. We + // should include them. See issue 31219. + return true + } + return false +} + +func indir_cx(ctxt *obj.Link, a *obj.Addr) { + a.Type = obj.TYPE_MEM + a.Reg = REG_CX +} + +// loadG ensures the G is loaded into a register (either CX or REGG), +// appending instructions to p if necessary. It returns the new last +// instruction and the G register. +func loadG(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc) (*obj.Prog, int16) { + if ctxt.Arch.Family == sys.AMD64 && cursym.ABI() == obj.ABIInternal { + // Use the G register directly in ABIInternal + return p, REGG + } + + var regg int16 = REG_CX + if ctxt.Arch.Family == sys.AMD64 { + regg = REGG // == REG_R14 + } + + p = obj.Appendp(p, newprog) + p.As = AMOVQ + if ctxt.Arch.PtrSize == 4 { + p.As = AMOVL + } + p.From.Type = obj.TYPE_MEM + p.From.Reg = REG_TLS + p.From.Offset = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = regg + + // Rewrite TLS instruction if necessary. + next := p.Link + progedit(ctxt, p, newprog) + for p.Link != next { + p = p.Link + progedit(ctxt, p, newprog) + } + + if p.From.Index == REG_TLS { + p.From.Scale = 2 + } + + return p, regg +} + +// Append code to p to check for stack split. +// Appends to (does not overwrite) p. +// Assumes g is in rg. +// Returns last new instruction and G register. +func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) (*obj.Prog, int16) { + cmp := ACMPQ + lea := ALEAQ + mov := AMOVQ + sub := ASUBQ + push, pop := APUSHQ, APOPQ + + if ctxt.Arch.Family == sys.I386 { + cmp = ACMPL + lea = ALEAL + mov = AMOVL + sub = ASUBL + push, pop = APUSHL, APOPL + } + + tmp := int16(REG_AX) // use AX for 32-bit + if ctxt.Arch.Family == sys.AMD64 { + // Avoid register parameters. + tmp = int16(REGENTRYTMP0) + } + + if ctxt.Flag_maymorestack != "" { + p = cursym.Func().SpillRegisterArgs(p, newprog) + + if cursym.Func().Text.From.Sym.NeedCtxt() { + p = obj.Appendp(p, newprog) + p.As = push + p.From.Type = obj.TYPE_REG + p.From.Reg = REGCTXT + } + + // We call maymorestack with an ABI matching the + // caller's ABI. Since this is the first thing that + // happens in the function, we have to be consistent + // with the caller about CPU state (notably, + // fixed-meaning registers). + + p = obj.Appendp(p, newprog) + p.As = obj.ACALL + p.To.Type = obj.TYPE_BRANCH + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ctxt.LookupABI(ctxt.Flag_maymorestack, cursym.ABI()) + + if cursym.Func().Text.From.Sym.NeedCtxt() { + p = obj.Appendp(p, newprog) + p.As = pop + p.To.Type = obj.TYPE_REG + p.To.Reg = REGCTXT + } + + p = cursym.Func().UnspillRegisterArgs(p, newprog) + } + + // Jump back to here after morestack returns. + startPred := p + + // Load G register + var rg int16 + p, rg = loadG(ctxt, cursym, p, newprog) + + var q1 *obj.Prog + if framesize <= abi.StackSmall { + // small stack: SP <= stackguard + // CMPQ SP, stackguard + p = obj.Appendp(p, newprog) + + p.As = cmp + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg + p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 + if cursym.CFunc() { + p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 + } + + // Mark the stack bound check and morestack call async nonpreemptible. + // If we get preempted here, when resumed the preemption request is + // cleared, but we'll still call morestack, which will double the stack + // unnecessarily. See issue #35470. + p = ctxt.StartUnsafePoint(p, newprog) + } else if framesize <= abi.StackBig { + // large stack: SP-framesize <= stackguard-StackSmall + // LEAQ -xxx(SP), tmp + // CMPQ tmp, stackguard + p = obj.Appendp(p, newprog) + + p.As = lea + p.From.Type = obj.TYPE_MEM + p.From.Reg = REG_SP + p.From.Offset = -(int64(framesize) - abi.StackSmall) + p.To.Type = obj.TYPE_REG + p.To.Reg = tmp + + p = obj.Appendp(p, newprog) + p.As = cmp + p.From.Type = obj.TYPE_REG + p.From.Reg = tmp + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg + p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 + if cursym.CFunc() { + p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 + } + + p = ctxt.StartUnsafePoint(p, newprog) // see the comment above + } else { + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // MOVQ SP, tmp + // SUBQ $(framesize - StackSmall), tmp + // // If subtraction wrapped (carry set), morestack. + // JCS label-of-call-to-morestack + // CMPQ tmp, stackguard + + p = obj.Appendp(p, newprog) + + p.As = mov + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP + p.To.Type = obj.TYPE_REG + p.To.Reg = tmp + + p = ctxt.StartUnsafePoint(p, newprog) // see the comment above + + p = obj.Appendp(p, newprog) + p.As = sub + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(framesize) - abi.StackSmall + p.To.Type = obj.TYPE_REG + p.To.Reg = tmp + + p = obj.Appendp(p, newprog) + p.As = AJCS + p.To.Type = obj.TYPE_BRANCH + q1 = p + + p = obj.Appendp(p, newprog) + p.As = cmp + p.From.Type = obj.TYPE_REG + p.From.Reg = tmp + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg + p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 + if cursym.CFunc() { + p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 + } + } + + // common + jls := obj.Appendp(p, newprog) + jls.As = AJLS + jls.To.Type = obj.TYPE_BRANCH + + end := ctxt.EndUnsafePoint(jls, newprog, -1) + + var last *obj.Prog + for last = cursym.Func().Text; last.Link != nil; last = last.Link { + } + + // Now we are at the end of the function, but logically + // we are still in function prologue. We need to fix the + // SP data and PCDATA. + spfix := obj.Appendp(last, newprog) + spfix.As = obj.ANOP + spfix.Spadj = -framesize + + pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog) + spill := ctxt.StartUnsafePoint(pcdata, newprog) + pcdata = cursym.Func().SpillRegisterArgs(spill, newprog) + + call := obj.Appendp(pcdata, newprog) + call.Pos = cursym.Func().Text.Pos + call.As = obj.ACALL + call.To.Type = obj.TYPE_BRANCH + call.To.Name = obj.NAME_EXTERN + morestack := "runtime.morestack" + switch { + case cursym.CFunc(): + morestack = "runtime.morestackc" + case !cursym.Func().Text.From.Sym.NeedCtxt(): + morestack = "runtime.morestack_noctxt" + } + call.To.Sym = ctxt.Lookup(morestack) + // When compiling 386 code for dynamic linking, the call needs to be adjusted + // to follow PIC rules. This in turn can insert more instructions, so we need + // to keep track of the start of the call (where the jump will be to) and the + // end (which following instructions are appended to). + callend := call + progedit(ctxt, callend, newprog) + for ; callend.Link != nil; callend = callend.Link { + progedit(ctxt, callend.Link, newprog) + } + + // The instructions which unspill regs should be preemptible. + pcdata = ctxt.EndUnsafePoint(callend, newprog, -1) + unspill := cursym.Func().UnspillRegisterArgs(pcdata, newprog) + + jmp := obj.Appendp(unspill, newprog) + jmp.As = obj.AJMP + jmp.To.Type = obj.TYPE_BRANCH + jmp.To.SetTarget(startPred.Link) + jmp.Spadj = +framesize + + jls.To.SetTarget(spill) + if q1 != nil { + q1.To.SetTarget(spill) + } + + return end, rg +} + +func isR15(r int16) bool { + return r == REG_R15 || r == REG_R15B +} +func addrMentionsR15(a *obj.Addr) bool { + if a == nil { + return false + } + return isR15(a.Reg) || isR15(a.Index) +} +func progMentionsR15(p *obj.Prog) bool { + return addrMentionsR15(&p.From) || addrMentionsR15(&p.To) || isR15(p.Reg) || addrMentionsR15(p.GetFrom3()) +} + +func addrUsesGlobal(a *obj.Addr) bool { + if a == nil { + return false + } + return a.Name == obj.NAME_EXTERN && !a.Sym.Local() +} +func progUsesGlobal(p *obj.Prog) bool { + if p.As == obj.ACALL || p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { + // These opcodes don't use a GOT to access their argument (see rewriteToUseGot), + // or R15 would be dead at them anyway. + return false + } + if p.As == ALEAQ { + // The GOT entry is placed directly in the destination register; R15 is not used. + return false + } + return addrUsesGlobal(&p.From) || addrUsesGlobal(&p.To) || addrUsesGlobal(p.GetFrom3()) +} + +type rwMask int + +const ( + readFrom rwMask = 1 << iota + readTo + readReg + readFrom3 + writeFrom + writeTo + writeReg + writeFrom3 +) + +// progRW returns a mask describing the effects of the instruction p. +// Note: this isn't exhaustively accurate. It is only currently used for detecting +// reads/writes to R15, so SSE register behavior isn't fully correct, and +// other weird cases (e.g. writes to DX by CLD) also aren't captured. +func progRW(p *obj.Prog) rwMask { + var m rwMask + // Default for most instructions + if p.From.Type != obj.TYPE_NONE { + m |= readFrom + } + if p.To.Type != obj.TYPE_NONE { + // Most x86 instructions update the To value + m |= readTo | writeTo + } + if p.Reg != 0 { + m |= readReg + } + if p.GetFrom3() != nil { + m |= readFrom3 + } + + // Lots of exceptions to the above defaults. + name := p.As.String() + if strings.HasPrefix(name, "MOV") || strings.HasPrefix(name, "PMOV") { + // MOV instructions don't read To. + m &^= readTo + } + switch p.As { + case APOPW, APOPL, APOPQ, + ALEAL, ALEAQ, + AIMUL3W, AIMUL3L, AIMUL3Q, + APEXTRB, APEXTRW, APEXTRD, APEXTRQ, AVPEXTRB, AVPEXTRW, AVPEXTRD, AVPEXTRQ, AEXTRACTPS, + ABSFW, ABSFL, ABSFQ, ABSRW, ABSRL, ABSRQ, APOPCNTW, APOPCNTL, APOPCNTQ, ALZCNTW, ALZCNTL, ALZCNTQ, + ASHLXL, ASHLXQ, ASHRXL, ASHRXQ, ASARXL, ASARXQ: + // These instructions are pure writes to To. They don't use its old value. + m &^= readTo + case AXORL, AXORQ: + // Register-clearing idiom doesn't read previous value. + if p.From.Type == obj.TYPE_REG && p.To.Type == obj.TYPE_REG && p.From.Reg == p.To.Reg { + m &^= readFrom | readTo + } + case AMULXL, AMULXQ: + // These are write-only to both To and From3. + m &^= readTo | readFrom3 + m |= writeFrom3 + } + return m +} + +// progReadsR15 reports whether p reads the register R15. +func progReadsR15(p *obj.Prog) bool { + m := progRW(p) + if m&readFrom != 0 && p.From.Type == obj.TYPE_REG && isR15(p.From.Reg) { + return true + } + if m&readTo != 0 && p.To.Type == obj.TYPE_REG && isR15(p.To.Reg) { + return true + } + if m&readReg != 0 && isR15(p.Reg) { + return true + } + if m&readFrom3 != 0 && p.GetFrom3().Type == obj.TYPE_REG && isR15(p.GetFrom3().Reg) { + return true + } + // reads of the index registers + if p.From.Type == obj.TYPE_MEM && (isR15(p.From.Reg) || isR15(p.From.Index)) { + return true + } + if p.To.Type == obj.TYPE_MEM && (isR15(p.To.Reg) || isR15(p.To.Index)) { + return true + } + if f3 := p.GetFrom3(); f3 != nil && f3.Type == obj.TYPE_MEM && (isR15(f3.Reg) || isR15(f3.Index)) { + return true + } + return false +} + +// progWritesR15 reports whether p writes the register R15. +func progWritesR15(p *obj.Prog) bool { + m := progRW(p) + if m&writeFrom != 0 && p.From.Type == obj.TYPE_REG && isR15(p.From.Reg) { + return true + } + if m&writeTo != 0 && p.To.Type == obj.TYPE_REG && isR15(p.To.Reg) { + return true + } + if m&writeReg != 0 && isR15(p.Reg) { + return true + } + if m&writeFrom3 != 0 && p.GetFrom3().Type == obj.TYPE_REG && isR15(p.GetFrom3().Reg) { + return true + } + return false +} + +func errorCheck(ctxt *obj.Link, s *obj.LSym) { + // When dynamic linking, R15 is used to access globals. Reject code that + // uses R15 after a global variable access. + if !ctxt.Flag_dynlink { + return + } + + // Flood fill all the instructions where R15's value is junk. + // If there are any uses of R15 in that set, report an error. + var work []*obj.Prog + var mentionsR15 bool + for p := s.Func().Text; p != nil; p = p.Link { + if progUsesGlobal(p) { + work = append(work, p) + p.Mark |= markBit + } + if progMentionsR15(p) { + mentionsR15 = true + } + } + if mentionsR15 { + for len(work) > 0 { + p := work[len(work)-1] + work = work[:len(work)-1] + if progReadsR15(p) { + pos := ctxt.PosTable.Pos(p.Pos) + ctxt.Diag("%s:%s: when dynamic linking, R15 is clobbered by a global variable access and is used here: %v", path.Base(pos.Filename()), pos.LineNumber(), p) + break // only report one error + } + if progWritesR15(p) { + // R15 is overwritten by this instruction. Its value is not junk any more. + continue + } + if q := p.To.Target(); q != nil && q.Mark&markBit == 0 { + q.Mark |= markBit + work = append(work, q) + } + if p.As == obj.AJMP || p.As == obj.ARET { + continue // no fallthrough + } + if q := p.Link; q != nil && q.Mark&markBit == 0 { + q.Mark |= markBit + work = append(work, q) + } + } + } + + // Clean up. + for p := s.Func().Text; p != nil; p = p.Link { + p.Mark &^= markBit + } +} + +var unaryDst = map[obj.As]bool{ + ABSWAPL: true, + ABSWAPQ: true, + ACLDEMOTE: true, + ACLFLUSH: true, + ACLFLUSHOPT: true, + ACLWB: true, + ACMPXCHG16B: true, + ACMPXCHG8B: true, + ADECB: true, + ADECL: true, + ADECQ: true, + ADECW: true, + AFBSTP: true, + AFFREE: true, + AFLDENV: true, + AFSAVE: true, + AFSTCW: true, + AFSTENV: true, + AFSTSW: true, + AFXSAVE64: true, + AFXSAVE: true, + AINCB: true, + AINCL: true, + AINCQ: true, + AINCW: true, + ANEGB: true, + ANEGL: true, + ANEGQ: true, + ANEGW: true, + ANOTB: true, + ANOTL: true, + ANOTQ: true, + ANOTW: true, + APOPL: true, + APOPQ: true, + APOPW: true, + ARDFSBASEL: true, + ARDFSBASEQ: true, + ARDGSBASEL: true, + ARDGSBASEQ: true, + ARDPID: true, + ARDRANDL: true, + ARDRANDQ: true, + ARDRANDW: true, + ARDSEEDL: true, + ARDSEEDQ: true, + ARDSEEDW: true, + ASETCC: true, + ASETCS: true, + ASETEQ: true, + ASETGE: true, + ASETGT: true, + ASETHI: true, + ASETLE: true, + ASETLS: true, + ASETLT: true, + ASETMI: true, + ASETNE: true, + ASETOC: true, + ASETOS: true, + ASETPC: true, + ASETPL: true, + ASETPS: true, + ASGDT: true, + ASIDT: true, + ASLDTL: true, + ASLDTQ: true, + ASLDTW: true, + ASMSWL: true, + ASMSWQ: true, + ASMSWW: true, + ASTMXCSR: true, + ASTRL: true, + ASTRQ: true, + ASTRW: true, + AXSAVE64: true, + AXSAVE: true, + AXSAVEC64: true, + AXSAVEC: true, + AXSAVEOPT64: true, + AXSAVEOPT: true, + AXSAVES64: true, + AXSAVES: true, +} + +var Linkamd64 = obj.LinkArch{ + Arch: sys.ArchAMD64, + Init: instinit, + ErrorCheck: errorCheck, + Preprocess: preprocess, + Assemble: span6, + Progedit: progedit, + SEH: populateSeh, + UnaryDst: unaryDst, + DWARFRegisters: AMD64DWARFRegisters, +} + +var Link386 = obj.LinkArch{ + Arch: sys.Arch386, + Init: instinit, + Preprocess: preprocess, + Assemble: span6, + Progedit: progedit, + UnaryDst: unaryDst, + DWARFRegisters: X86DWARFRegisters, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6_test.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d1246be77bd8d1488a350e096b320d324990cc26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/obj6_test.go @@ -0,0 +1,167 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86_test + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +const testdata = ` +MOVQ AX, AX -> MOVQ AX, AX + +LEAQ name(SB), AX -> MOVQ name@GOT(SB), AX +LEAQ name+10(SB), AX -> MOVQ name@GOT(SB), AX; LEAQ 10(AX), AX +MOVQ $name(SB), AX -> MOVQ name@GOT(SB), AX +MOVQ $name+10(SB), AX -> MOVQ name@GOT(SB), AX; LEAQ 10(AX), AX + +MOVQ name(SB), AX -> NOP; MOVQ name@GOT(SB), R15; MOVQ (R15), AX +MOVQ name+10(SB), AX -> NOP; MOVQ name@GOT(SB), R15; MOVQ 10(R15), AX + +CMPQ name(SB), $0 -> NOP; MOVQ name@GOT(SB), R15; CMPQ (R15), $0 + +MOVQ $1, name(SB) -> NOP; MOVQ name@GOT(SB), R15; MOVQ $1, (R15) +MOVQ $1, name+10(SB) -> NOP; MOVQ name@GOT(SB), R15; MOVQ $1, 10(R15) +` + +type ParsedTestData struct { + input string + marks []int + marker_to_input map[int][]string + marker_to_expected map[int][]string + marker_to_output map[int][]string +} + +const marker_start = 1234 + +func parseTestData(t *testing.T) *ParsedTestData { + r := &ParsedTestData{} + scanner := bufio.NewScanner(strings.NewReader(testdata)) + r.marker_to_input = make(map[int][]string) + r.marker_to_expected = make(map[int][]string) + marker := marker_start + input_insns := []string{} + for scanner.Scan() { + line := scanner.Text() + if len(strings.TrimSpace(line)) == 0 { + continue + } + parts := strings.Split(line, "->") + if len(parts) != 2 { + t.Fatalf("malformed line %v", line) + } + r.marks = append(r.marks, marker) + marker_insn := fmt.Sprintf("MOVQ $%d, AX", marker) + input_insns = append(input_insns, marker_insn) + for _, input_insn := range strings.Split(parts[0], ";") { + input_insns = append(input_insns, input_insn) + r.marker_to_input[marker] = append(r.marker_to_input[marker], normalize(input_insn)) + } + for _, expected_insn := range strings.Split(parts[1], ";") { + r.marker_to_expected[marker] = append(r.marker_to_expected[marker], normalize(expected_insn)) + } + marker++ + } + r.input = "TEXT ·foo(SB),$0\n" + strings.Join(input_insns, "\n") + "\n" + return r +} + +var spaces_re *regexp.Regexp = regexp.MustCompile(`\s+`) + +func normalize(s string) string { + return spaces_re.ReplaceAllLiteralString(strings.TrimSpace(s), " ") +} + +func asmOutput(t *testing.T, s string) []byte { + tmpdir, err := os.MkdirTemp("", "progedittest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpfile, err := os.Create(filepath.Join(tmpdir, "input.s")) + if err != nil { + t.Fatal(err) + } + defer tmpfile.Close() + _, err = tmpfile.WriteString(s) + if err != nil { + t.Fatal(err) + } + cmd := testenv.Command(t, + testenv.GoToolPath(t), "tool", "asm", "-S", "-dynlink", + "-o", filepath.Join(tmpdir, "output.6"), tmpfile.Name()) + + cmd.Env = append(os.Environ(), + "GOARCH=amd64", "GOOS=linux", "GOPATH="+filepath.Join(tmpdir, "_gopath")) + asmout, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("error %s output %s", err, asmout) + } + return asmout +} + +func parseOutput(t *testing.T, td *ParsedTestData, asmout []byte) { + scanner := bufio.NewScanner(bytes.NewReader(asmout)) + marker := regexp.MustCompile(`MOVQ \$([0-9]+), AX`) + mark := -1 + td.marker_to_output = make(map[int][]string) + for scanner.Scan() { + line := scanner.Text() + if line[0] != '\t' { + continue + } + parts := strings.SplitN(line, "\t", 3) + if len(parts) != 3 { + continue + } + n := normalize(parts[2]) + mark_matches := marker.FindStringSubmatch(n) + if mark_matches != nil { + mark, _ = strconv.Atoi(mark_matches[1]) + if _, ok := td.marker_to_input[mark]; !ok { + t.Fatalf("unexpected marker %d", mark) + } + } else if mark != -1 { + td.marker_to_output[mark] = append(td.marker_to_output[mark], n) + } + } +} + +func TestDynlink(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if os.Getenv("GOHOSTARCH") != "" { + // TODO: make this work? It was failing due to the + // GOARCH= filtering above and skipping is easiest for + // now. + t.Skip("skipping when GOHOSTARCH is set") + } + + testdata := parseTestData(t) + asmout := asmOutput(t, testdata.input) + parseOutput(t, testdata, asmout) + for _, m := range testdata.marks { + i := strings.Join(testdata.marker_to_input[m], "; ") + o := strings.Join(testdata.marker_to_output[m], "; ") + e := strings.Join(testdata.marker_to_expected[m], "; ") + if o != e { + if o == i { + t.Errorf("%s was unchanged; should have become %s", i, e) + } else { + t.Errorf("%s became %s; should have become %s", i, o, e) + } + } else if i != e { + t.Logf("%s correctly became %s", i, o) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/pcrelative_test.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/pcrelative_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3827100123f26d874a30239bff703111e5006fb6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/pcrelative_test.go @@ -0,0 +1,105 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86_test + +import ( + "bytes" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const asmData = ` +GLOBL zeros<>(SB),8,$64 +TEXT ·testASM(SB),4,$0 +VMOVUPS zeros<>(SB), %s // PC relative relocation is off by 1, for Y8-Y15, Z8-15 and Z24-Z31 +RET +` + +const goData = ` +package main + +func testASM() + +func main() { + testASM() +} +` + +func objdumpOutput(t *testing.T, mname, source string) []byte { + tmpdir, err := os.MkdirTemp("", mname) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + err = os.WriteFile(filepath.Join(tmpdir, "go.mod"), []byte(fmt.Sprintf("module %s\n", mname)), 0666) + if err != nil { + t.Fatal(err) + } + tmpfile, err := os.Create(filepath.Join(tmpdir, "input.s")) + if err != nil { + t.Fatal(err) + } + defer tmpfile.Close() + _, err = tmpfile.WriteString(source) + if err != nil { + t.Fatal(err) + } + tmpfile2, err := os.Create(filepath.Join(tmpdir, "input.go")) + if err != nil { + t.Fatal(err) + } + defer tmpfile2.Close() + _, err = tmpfile2.WriteString(goData) + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, + testenv.GoToolPath(t), "build", "-o", + filepath.Join(tmpdir, "output")) + + cmd.Env = append(os.Environ(), + "GOARCH=amd64", "GOOS=linux", "GOPATH="+filepath.Join(tmpdir, "_gopath")) + cmd.Dir = tmpdir + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("error %s output %s", err, out) + } + cmd2 := testenv.Command(t, + testenv.GoToolPath(t), "tool", "objdump", "-s", "testASM", + filepath.Join(tmpdir, "output")) + cmd2.Env = cmd.Env + cmd2.Dir = tmpdir + objout, err := cmd2.CombinedOutput() + if err != nil { + t.Fatalf("error %s output %s", err, objout) + } + + return objout +} + +func TestVexEvexPCrelative(t *testing.T) { + testenv.MustHaveGoBuild(t) +LOOP: + for _, reg := range []string{"Y0", "Y8", "Z0", "Z8", "Z16", "Z24"} { + asm := fmt.Sprintf(asmData, reg) + objout := objdumpOutput(t, "pcrelative", asm) + data := bytes.Split(objout, []byte("\n")) + for idx := len(data) - 1; idx >= 0; idx-- { + // check that RET wasn't overwritten. + if bytes.Index(data[idx], []byte("RET")) != -1 { + if testing.Short() { + break LOOP + } + continue LOOP + } + } + t.Errorf("VMOVUPS zeros<>(SB), %s overwrote RET", reg) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/seh.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/seh.go new file mode 100644 index 0000000000000000000000000000000000000000..71cdd3664202a1785d7a999d2d516a2b22dca144 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/seh.go @@ -0,0 +1,165 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "encoding/base64" + "fmt" + "math" +) + +type sehbuf struct { + ctxt *obj.Link + data []byte + off int +} + +func newsehbuf(ctxt *obj.Link, nodes uint8) sehbuf { + // - 8 bytes for the header + // - 2 bytes for each node + // - 2 bytes in case nodes is not even + size := 8 + nodes*2 + if nodes%2 != 0 { + size += 2 + } + return sehbuf{ctxt, make([]byte, size), 0} +} + +func (b *sehbuf) write8(v uint8) { + b.data[b.off] = v + b.off++ +} + +func (b *sehbuf) write32(v uint32) { + b.ctxt.Arch.ByteOrder.PutUint32(b.data[b.off:], v) + b.off += 4 +} + +func (b *sehbuf) writecode(op, value uint8) { + b.write8(value<<4 | op) +} + +// populateSeh generates the SEH unwind information for s. +func populateSeh(ctxt *obj.Link, s *obj.LSym) (sehsym *obj.LSym) { + if s.NoFrame() { + return + } + + // This implementation expects the following function prologue layout: + // - Stack split code (optional) + // - PUSHQ BP + // - MOVQ SP, BP + // + // If the prologue layout change, the unwind information should be updated + // accordingly. + + // Search for the PUSHQ BP instruction inside the prologue. + var pushbp *obj.Prog + for p := s.Func().Text; p != nil; p = p.Link { + if p.As == APUSHQ && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_BP { + pushbp = p + break + } + if p.Pos.Xlogue() == src.PosPrologueEnd { + break + } + } + if pushbp == nil { + ctxt.Diag("missing frame pointer instruction: PUSHQ BP") + return + } + + // It must be followed by a MOVQ SP, BP. + movbp := pushbp.Link + if movbp == nil { + ctxt.Diag("missing frame pointer instruction: MOVQ SP, BP") + return + } + if !(movbp.As == AMOVQ && movbp.From.Type == obj.TYPE_REG && movbp.From.Reg == REG_SP && + movbp.To.Type == obj.TYPE_REG && movbp.To.Reg == REG_BP && movbp.From.Offset == 0) { + ctxt.Diag("unexpected frame pointer instruction\n%v", movbp) + return + } + if movbp.Link.Pc > math.MaxUint8 { + // SEH unwind information don't support prologues that are more than 255 bytes long. + // These are very rare, but still possible, e.g., when compiling functions with many + // parameters with -gcflags=-d=maymorestack=runtime.mayMoreStackPreempt. + // Return without reporting an error. + return + } + + // Reference: + // https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-unwind_info + + const ( + UWOP_PUSH_NONVOL = 0 + UWOP_SET_FPREG = 3 + SEH_REG_BP = 5 + UNW_FLAG_EHANDLER = 1 << 3 + ) + + var exceptionHandler *obj.LSym + var flags uint8 + if s.Name == "runtime.asmcgocall_landingpad" { + // Most cgo calls go through runtime.asmcgocall_landingpad, + // we can use it to catch exceptions from C code. + // TODO: use a more generic approach to identify which calls need an exception handler. + exceptionHandler = ctxt.Lookup("runtime.sehtramp") + if exceptionHandler == nil { + ctxt.Diag("missing runtime.sehtramp\n") + return + } + flags = UNW_FLAG_EHANDLER + } + + // Fow now we only support operations which are encoded + // using a single 2-byte node, so the number of nodes + // is the number of operations. + nodes := uint8(2) + buf := newsehbuf(ctxt, nodes) + buf.write8(flags | 1) // Flags + version + buf.write8(uint8(movbp.Link.Pc)) // Size of prolog + buf.write8(nodes) // Count of nodes + buf.write8(SEH_REG_BP) // FP register + + // Notes are written in reverse order of appearance. + buf.write8(uint8(movbp.Link.Pc)) + buf.writecode(UWOP_SET_FPREG, 0) + + buf.write8(uint8(pushbp.Link.Pc)) + buf.writecode(UWOP_PUSH_NONVOL, SEH_REG_BP) + + // The following 4 bytes reference the RVA of the exception handler. + // The value is set to 0 for now, if an exception handler is needed, + // it will be updated later with a R_PEIMAGEOFF relocation to the + // exception handler. + buf.write32(0) + + // The list of unwind infos in a PE binary have very low cardinality + // as each info only contains frame pointer operations, + // which are very similar across functions. + // Dedup them when possible. + hash := base64.StdEncoding.EncodeToString(buf.data) + symname := fmt.Sprintf("%d.%s", len(buf.data), hash) + return ctxt.LookupInit("go:sehuw."+symname, func(s *obj.LSym) { + s.WriteBytes(ctxt, 0, buf.data) + s.Type = objabi.SSEHUNWINDINFO + s.Set(obj.AttrDuplicateOK, true) + s.Set(obj.AttrLocal, true) + if exceptionHandler != nil { + r := obj.Addrel(s) + r.Off = int32(len(buf.data) - 4) + r.Siz = 4 + r.Sym = exceptionHandler + r.Type = objabi.R_PEIMAGEOFF + } + // Note: AttrContentAddressable cannot be set here, + // because the content-addressable-handling code + // does not know about aux symbols. + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/ytab.go b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/ytab.go new file mode 100644 index 0000000000000000000000000000000000000000..7d0b75bf46415292b6389af461bf35d446520816 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/obj/x86/ytab.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +// argListMax specifies upper arg count limit expected to be carried by obj.Prog. +// Max len(obj.Prog.RestArgs) can be inferred from this to be 4. +const argListMax int = 6 + +type argList [argListMax]uint8 + +type ytab struct { + zcase uint8 + zoffset uint8 + + // Last arg is usually destination. + // For unary instructions unaryDst is used to determine + // if single argument is a source or destination. + args argList +} + +// Returns true if yt is compatible with args. +// +// Elements from args and yt.args are used +// to index ycover table like `ycover[args[i]+yt.args[i]]`. +// This means that args should contain values that already +// multiplied by Ymax. +func (yt *ytab) match(args []int) bool { + // Trailing Yxxx check is required to avoid a case + // where shorter arg list is matched. + // If we had exact yt.args length, it could be `yt.argc != len(args)`. + if len(args) < len(yt.args) && yt.args[len(args)] != Yxxx { + return false + } + + for i := range args { + if ycover[args[i]+int(yt.args[i])] == 0 { + return false + } + } + + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.json new file mode 100644 index 0000000000000000000000000000000000000000..94695a10c5541ef57ddb4fe6e66c4e96b65f4e8b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.json @@ -0,0 +1,11 @@ +{"Action":"start"} +{"Action":"run","Test":"TestAscii"} +{"Action":"output","Test":"TestAscii","Output":"=== RUN TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":"I can eat glass, and it doesn't hurt me. I can eat glass, and it doesn't hurt me.\n"} +{"Action":"output","Test":"TestAscii","Output":"I CAN EAT GLASS, AND IT DOESN'T HURT ME. I CAN EAT GLASS, AND IT DOESN'T HURT ME.\n"} +{"Action":"output","Test":"TestAscii","Output":"--- PASS: TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":" i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me.\n"} +{"Action":"output","Test":"TestAscii","Output":" V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR.\n"} +{"Action":"pass","Test":"TestAscii"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.test new file mode 100644 index 0000000000000000000000000000000000000000..4ff7453430cc8bbb92d089587edc39fa12eda7fc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/ascii.test @@ -0,0 +1,7 @@ +=== RUN TestAscii +I can eat glass, and it doesn't hurt me. I can eat glass, and it doesn't hurt me. +I CAN EAT GLASS, AND IT DOESN'T HURT ME. I CAN EAT GLASS, AND IT DOESN'T HURT ME. +--- PASS: TestAscii + i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me. + V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.json new file mode 100644 index 0000000000000000000000000000000000000000..102e189ed7ad3ca2746fa65eef98a753564437e0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.json @@ -0,0 +1,15 @@ +{"Action":"start"} +{"Action":"output","Output":"goos: darwin\n"} +{"Action":"output","Output":"goarch: 386\n"} +{"Action":"output","Output":"BenchmarkFoo-8 \t2000000000\t 0.00 ns/op\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"--- BENCH: BenchmarkFoo-8\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"output","Test":"BenchmarkFoo-8","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"bench","Test":"BenchmarkFoo-8"} +{"Action":"output","Output":"PASS\n"} +{"Action":"output","Output":"ok \tcommand-line-arguments\t0.009s\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.test new file mode 100644 index 0000000000000000000000000000000000000000..453bd5928a5d0f0c3c3f4004aee5f4ffe9680336 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/bench.test @@ -0,0 +1,12 @@ +goos: darwin +goarch: 386 +BenchmarkFoo-8 2000000000 0.00 ns/op +--- BENCH: BenchmarkFoo-8 + x_test.go:8: My benchmark + x_test.go:8: My benchmark + x_test.go:8: My benchmark + x_test.go:8: My benchmark + x_test.go:8: My benchmark + x_test.go:8: My benchmark +PASS +ok command-line-arguments 0.009s diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.json new file mode 100644 index 0000000000000000000000000000000000000000..d2d968191c4ffc8f9f7ea9c6773f4f4b440ea041 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.json @@ -0,0 +1,7 @@ +{"Action":"start"} +{"Action":"output","Test":"BenchmarkFoo","Output":"--- FAIL: BenchmarkFoo\n"} +{"Action":"output","Test":"BenchmarkFoo","Output":"\tx_test.go:8: My benchmark\n"} +{"Action":"fail","Test":"BenchmarkFoo"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.008s\n"} +{"Action":"fail"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.test new file mode 100644 index 0000000000000000000000000000000000000000..538d95772020febe9d7e59f2d07dc194447e58aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchfail.test @@ -0,0 +1,4 @@ +--- FAIL: BenchmarkFoo + x_test.go:8: My benchmark +FAIL +FAIL command-line-arguments 0.008s diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.json new file mode 100644 index 0000000000000000000000000000000000000000..6c4e19357393246726e38e9831675db9dc7ef658 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.json @@ -0,0 +1,7 @@ +{"Action":"start"} +{"Action":"output","Output":"# This file ends in an early EOF to trigger the Benchmark prefix test,\n"} +{"Action":"output","Output":"# which only happens when a benchmark prefix is seen ahead of the \\n.\n"} +{"Action":"output","Output":"# Normally that's due to the benchmark running and the \\n coming later,\n"} +{"Action":"output","Output":"# but to avoid questions of timing, we just use a file with no \\n at all.\n"} +{"Action":"output","Output":"BenchmarkFoo \t"} +{"Action":"output","Output":"10000 early EOF"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.test new file mode 100644 index 0000000000000000000000000000000000000000..0b173ab20d374ded23ca221b561f522204285ef1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/benchshort.test @@ -0,0 +1,5 @@ +# This file ends in an early EOF to trigger the Benchmark prefix test, +# which only happens when a benchmark prefix is seen ahead of the \n. +# Normally that's due to the benchmark running and the \n coming later, +# but to avoid questions of timing, we just use a file with no \n at all. +BenchmarkFoo 10000 early EOF \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/empty.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/empty.json new file mode 100644 index 0000000000000000000000000000000000000000..198f8d7b55384e7190a2d90b923d5fe9be39ceb1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/empty.json @@ -0,0 +1 @@ +{"Action":"start"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/empty.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/empty.test new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.json new file mode 100644 index 0000000000000000000000000000000000000000..9dd29ae97758b4c1e41fb75f11e8cd411ef37d8a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.json @@ -0,0 +1,10 @@ +{"Action":"start"} +{"Action":"run","Test":"TestAscii"} +{"Action":"output","Test":"TestAscii","Output":"=== RUN TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":"=== RUN TestNotReally\n"} +{"Action":"output","Test":"TestAscii","Output":"--- PASS: TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":" i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me.\n"} +{"Action":"output","Test":"TestAscii","Output":"FAIL\n"} +{"Action":"pass","Test":"TestAscii"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.test new file mode 100644 index 0000000000000000000000000000000000000000..ec7d4530273b7ac4b62702a74fe4884c04db8c8c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/frame.test @@ -0,0 +1,6 @@ +=== RUN TestAscii +=== RUN TestNotReally +--- PASS: TestAscii + i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me. +FAIL +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.json new file mode 100644 index 0000000000000000000000000000000000000000..54a8a524fe4708a733d62ae60b33438e764ee635 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.json @@ -0,0 +1,168 @@ +{"Action":"start"} +{"Action":"run","Test":"TestIndex"} +{"Action":"output","Test":"TestIndex","Output":"=== RUN TestIndex\n"} +{"Action":"output","Test":"TestIndex","Output":"--- PASS: TestIndex (0.00s)\n"} +{"Action":"pass","Test":"TestIndex"} +{"Action":"pass","Test":"TestIndex"} +{"Action":"output","Test":"TestIndex","Output":"=== PASS TestIndex\n"} +{"Action":"run","Test":"TestLastIndex"} +{"Action":"output","Test":"TestLastIndex","Output":"=== RUN TestLastIndex\n"} +{"Action":"output","Test":"TestLastIndex","Output":"--- PASS: TestLastIndex (0.00s)\n"} +{"Action":"pass","Test":"TestLastIndex"} +{"Action":"pass","Test":"TestLastIndex"} +{"Action":"output","Test":"TestLastIndex","Output":"=== PASS TestLastIndex\n"} +{"Action":"run","Test":"TestIndexAny"} +{"Action":"output","Test":"TestIndexAny","Output":"=== RUN TestIndexAny\n"} +{"Action":"output","Test":"TestIndexAny","Output":"--- PASS: TestIndexAny (0.00s)\n"} +{"Action":"pass","Test":"TestIndexAny"} +{"Action":"pass","Test":"TestIndexAny"} +{"Action":"output","Test":"TestIndexAny","Output":"=== PASS TestIndexAny\n"} +{"Action":"run","Test":"TestLastIndexAny"} +{"Action":"output","Test":"TestLastIndexAny","Output":"=== RUN TestLastIndexAny\n"} +{"Action":"output","Test":"TestLastIndexAny","Output":"--- PASS: TestLastIndexAny (0.00s)\n"} +{"Action":"pass","Test":"TestLastIndexAny"} +{"Action":"pass","Test":"TestLastIndexAny"} +{"Action":"output","Test":"TestLastIndexAny","Output":"=== PASS TestLastIndexAny\n"} +{"Action":"run","Test":"TestIndexByte"} +{"Action":"output","Test":"TestIndexByte","Output":"=== RUN TestIndexByte\n"} +{"Action":"output","Test":"TestIndexByte","Output":"--- PASS: TestIndexByte (0.00s)\n"} +{"Action":"pass","Test":"TestIndexByte"} +{"Action":"pass","Test":"TestIndexByte"} +{"Action":"output","Test":"TestIndexByte","Output":"=== PASS TestIndexByte\n"} +{"Action":"run","Test":"TestLastIndexByte"} +{"Action":"output","Test":"TestLastIndexByte","Output":"=== RUN TestLastIndexByte\n"} +{"Action":"output","Test":"TestLastIndexByte","Output":"--- PASS: TestLastIndexByte (0.00s)\n"} +{"Action":"pass","Test":"TestLastIndexByte"} +{"Action":"pass","Test":"TestLastIndexByte"} +{"Action":"output","Test":"TestLastIndexByte","Output":"=== PASS TestLastIndexByte\n"} +{"Action":"run","Test":"TestIndexRandom"} +{"Action":"output","Test":"TestIndexRandom","Output":"=== RUN TestIndexRandom\n"} +{"Action":"output","Test":"TestIndexRandom","Output":"--- PASS: TestIndexRandom (0.00s)\n"} +{"Action":"pass","Test":"TestIndexRandom"} +{"Action":"pass","Test":"TestIndexRandom"} +{"Action":"output","Test":"TestIndexRandom","Output":"=== PASS TestIndexRandom\n"} +{"Action":"run","Test":"TestIndexRune"} +{"Action":"output","Test":"TestIndexRune","Output":"=== RUN TestIndexRune\n"} +{"Action":"output","Test":"TestIndexRune","Output":"--- PASS: TestIndexRune (0.00s)\n"} +{"Action":"pass","Test":"TestIndexRune"} +{"Action":"pass","Test":"TestIndexRune"} +{"Action":"output","Test":"TestIndexRune","Output":"=== PASS TestIndexRune\n"} +{"Action":"run","Test":"TestIndexFunc"} +{"Action":"output","Test":"TestIndexFunc","Output":"=== RUN TestIndexFunc\n"} +{"Action":"output","Test":"TestIndexFunc","Output":"--- PASS: TestIndexFunc (0.00s)\n"} +{"Action":"pass","Test":"TestIndexFunc"} +{"Action":"pass","Test":"TestIndexFunc"} +{"Action":"output","Test":"TestIndexFunc","Output":"=== PASS TestIndexFunc\n"} +{"Action":"run","Test":"ExampleIndex"} +{"Action":"output","Test":"ExampleIndex","Output":"=== RUN ExampleIndex\n"} +{"Action":"output","Test":"ExampleIndex","Output":"--- PASS: ExampleIndex (0.00s)\n"} +{"Action":"pass","Test":"ExampleIndex"} +{"Action":"run","Test":"ExampleIndexFunc"} +{"Action":"output","Test":"ExampleIndexFunc","Output":"=== RUN ExampleIndexFunc\n"} +{"Action":"output","Test":"ExampleIndexFunc","Output":"--- PASS: ExampleIndexFunc (0.00s)\n"} +{"Action":"pass","Test":"ExampleIndexFunc"} +{"Action":"run","Test":"ExampleIndexAny"} +{"Action":"output","Test":"ExampleIndexAny","Output":"=== RUN ExampleIndexAny\n"} +{"Action":"output","Test":"ExampleIndexAny","Output":"--- PASS: ExampleIndexAny (0.00s)\n"} +{"Action":"pass","Test":"ExampleIndexAny"} +{"Action":"run","Test":"ExampleIndexByte"} +{"Action":"output","Test":"ExampleIndexByte","Output":"=== RUN ExampleIndexByte\n"} +{"Action":"output","Test":"ExampleIndexByte","Output":"--- PASS: ExampleIndexByte (0.00s)\n"} +{"Action":"pass","Test":"ExampleIndexByte"} +{"Action":"run","Test":"ExampleIndexRune"} +{"Action":"output","Test":"ExampleIndexRune","Output":"=== RUN ExampleIndexRune\n"} +{"Action":"output","Test":"ExampleIndexRune","Output":"--- PASS: ExampleIndexRune (0.00s)\n"} +{"Action":"pass","Test":"ExampleIndexRune"} +{"Action":"run","Test":"ExampleLastIndex"} +{"Action":"output","Test":"ExampleLastIndex","Output":"=== RUN ExampleLastIndex\n"} +{"Action":"output","Test":"ExampleLastIndex","Output":"--- PASS: ExampleLastIndex (0.00s)\n"} +{"Action":"pass","Test":"ExampleLastIndex"} +{"Action":"run","Test":"ExampleLastIndexAny"} +{"Action":"output","Test":"ExampleLastIndexAny","Output":"=== RUN ExampleLastIndexAny\n"} +{"Action":"output","Test":"ExampleLastIndexAny","Output":"--- PASS: ExampleLastIndexAny (0.00s)\n"} +{"Action":"pass","Test":"ExampleLastIndexAny"} +{"Action":"run","Test":"ExampleLastIndexByte"} +{"Action":"output","Test":"ExampleLastIndexByte","Output":"=== RUN ExampleLastIndexByte\n"} +{"Action":"output","Test":"ExampleLastIndexByte","Output":"--- PASS: ExampleLastIndexByte (0.00s)\n"} +{"Action":"pass","Test":"ExampleLastIndexByte"} +{"Action":"run","Test":"ExampleLastIndexFunc"} +{"Action":"output","Test":"ExampleLastIndexFunc","Output":"=== RUN ExampleLastIndexFunc\n"} +{"Action":"output","Test":"ExampleLastIndexFunc","Output":"--- PASS: ExampleLastIndexFunc (0.00s)\n"} +{"Action":"pass","Test":"ExampleLastIndexFunc"} +{"Action":"output","Output":"goos: darwin\n"} +{"Action":"output","Output":"goarch: amd64\n"} +{"Action":"output","Output":"pkg: strings\n"} +{"Action":"output","Output":"cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz\n"} +{"Action":"run","Test":"BenchmarkIndexRune"} +{"Action":"output","Test":"BenchmarkIndexRune","Output":"=== RUN BenchmarkIndexRune\n"} +{"Action":"output","Test":"BenchmarkIndexRune","Output":"BenchmarkIndexRune\n"} +{"Action":"output","Test":"BenchmarkIndexRune","Output":"BenchmarkIndexRune-16 \t87335496\t 14.27 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexRuneLongString"} +{"Action":"output","Test":"BenchmarkIndexRuneLongString","Output":"=== RUN BenchmarkIndexRuneLongString\n"} +{"Action":"output","Test":"BenchmarkIndexRuneLongString","Output":"BenchmarkIndexRuneLongString\n"} +{"Action":"output","Test":"BenchmarkIndexRuneLongString","Output":"BenchmarkIndexRuneLongString-16 \t57104472\t 18.66 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexRuneFastPath"} +{"Action":"output","Test":"BenchmarkIndexRuneFastPath","Output":"=== RUN BenchmarkIndexRuneFastPath\n"} +{"Action":"output","Test":"BenchmarkIndexRuneFastPath","Output":"BenchmarkIndexRuneFastPath\n"} +{"Action":"output","Test":"BenchmarkIndexRuneFastPath","Output":"BenchmarkIndexRuneFastPath-16 \t262380160\t 4.499 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndex"} +{"Action":"output","Test":"BenchmarkIndex","Output":"=== RUN BenchmarkIndex\n"} +{"Action":"output","Test":"BenchmarkIndex","Output":"BenchmarkIndex\n"} +{"Action":"output","Test":"BenchmarkIndex","Output":"BenchmarkIndex-16 \t248529364\t 4.697 ns/op\n"} +{"Action":"run","Test":"BenchmarkLastIndex"} +{"Action":"output","Test":"BenchmarkLastIndex","Output":"=== RUN BenchmarkLastIndex\n"} +{"Action":"output","Test":"BenchmarkLastIndex","Output":"BenchmarkLastIndex\n"} +{"Action":"output","Test":"BenchmarkLastIndex","Output":"BenchmarkLastIndex-16 \t293688756\t 4.166 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexByte"} +{"Action":"output","Test":"BenchmarkIndexByte","Output":"=== RUN BenchmarkIndexByte\n"} +{"Action":"output","Test":"BenchmarkIndexByte","Output":"BenchmarkIndexByte\n"} +{"Action":"output","Test":"BenchmarkIndexByte","Output":"BenchmarkIndexByte-16 \t310338391\t 3.608 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexHard1"} +{"Action":"output","Test":"BenchmarkIndexHard1","Output":"=== RUN BenchmarkIndexHard1\n"} +{"Action":"output","Test":"BenchmarkIndexHard1","Output":"BenchmarkIndexHard1\n"} +{"Action":"output","Test":"BenchmarkIndexHard1","Output":"BenchmarkIndexHard1-16 \t 12852\t 92380 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexHard2"} +{"Action":"output","Test":"BenchmarkIndexHard2","Output":"=== RUN BenchmarkIndexHard2\n"} +{"Action":"output","Test":"BenchmarkIndexHard2","Output":"BenchmarkIndexHard2\n"} +{"Action":"output","Test":"BenchmarkIndexHard2","Output":"BenchmarkIndexHard2-16 \t 8977\t 135080 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexHard3"} +{"Action":"output","Test":"BenchmarkIndexHard3","Output":"=== RUN BenchmarkIndexHard3\n"} +{"Action":"output","Test":"BenchmarkIndexHard3","Output":"BenchmarkIndexHard3\n"} +{"Action":"output","Test":"BenchmarkIndexHard3","Output":"BenchmarkIndexHard3-16 \t 1885\t 532079 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexHard4"} +{"Action":"output","Test":"BenchmarkIndexHard4","Output":"=== RUN BenchmarkIndexHard4\n"} +{"Action":"output","Test":"BenchmarkIndexHard4","Output":"BenchmarkIndexHard4\n"} +{"Action":"output","Test":"BenchmarkIndexHard4","Output":"BenchmarkIndexHard4-16 \t 2298\t 533435 ns/op\n"} +{"Action":"run","Test":"BenchmarkLastIndexHard1"} +{"Action":"output","Test":"BenchmarkLastIndexHard1","Output":"=== RUN BenchmarkLastIndexHard1\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard1","Output":"BenchmarkLastIndexHard1\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard1","Output":"BenchmarkLastIndexHard1-16 \t 813\t 1295767 ns/op\n"} +{"Action":"run","Test":"BenchmarkLastIndexHard2"} +{"Action":"output","Test":"BenchmarkLastIndexHard2","Output":"=== RUN BenchmarkLastIndexHard2\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard2","Output":"BenchmarkLastIndexHard2\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard2","Output":"BenchmarkLastIndexHard2-16 \t 784\t 1389403 ns/op\n"} +{"Action":"run","Test":"BenchmarkLastIndexHard3"} +{"Action":"output","Test":"BenchmarkLastIndexHard3","Output":"=== RUN BenchmarkLastIndexHard3\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard3","Output":"BenchmarkLastIndexHard3\n"} +{"Action":"output","Test":"BenchmarkLastIndexHard3","Output":"BenchmarkLastIndexHard3-16 \t 913\t 1316608 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexTorture"} +{"Action":"output","Test":"BenchmarkIndexTorture","Output":"=== RUN BenchmarkIndexTorture\n"} +{"Action":"output","Test":"BenchmarkIndexTorture","Output":"BenchmarkIndexTorture\n"} +{"Action":"output","Test":"BenchmarkIndexTorture","Output":"BenchmarkIndexTorture-16 \t 98090\t 10201 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexAnyASCII"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII","Output":"=== RUN BenchmarkIndexAnyASCII\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII","Output":"BenchmarkIndexAnyASCII\n"} +{"Action":"run","Test":"BenchmarkIndexAnyASCII/1:1"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:1","Output":"=== RUN BenchmarkIndexAnyASCII/1:1\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:1","Output":"BenchmarkIndexAnyASCII/1:1\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:1","Output":"BenchmarkIndexAnyASCII/1:1-16 \t214829462\t 5.592 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexAnyASCII/1:2"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:2","Output":"=== RUN BenchmarkIndexAnyASCII/1:2\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:2","Output":"BenchmarkIndexAnyASCII/1:2\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:2","Output":"BenchmarkIndexAnyASCII/1:2-16 \t155499682\t 7.214 ns/op\n"} +{"Action":"run","Test":"BenchmarkIndexAnyASCII/1:4"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:4","Output":"=== RUN BenchmarkIndexAnyASCII/1:4\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:4","Output":"BenchmarkIndexAnyASCII/1:4\n"} +{"Action":"output","Test":"BenchmarkIndexAnyASCII/1:4","Output":"BenchmarkIndexAnyASCII/1:4-16 \t172757770\t 7.092 ns/op\n"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.test new file mode 100644 index 0000000000000000000000000000000000000000..cb0b11a6a08e390b30a978b0fe50b5a182b00bd7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framebig.test @@ -0,0 +1,138 @@ +=== RUN TestIndex +--- PASS: TestIndex (0.00s) +=== PASS TestIndex +=== NAME +=== RUN TestLastIndex +--- PASS: TestLastIndex (0.00s) +=== PASS TestLastIndex +=== NAME +=== RUN TestIndexAny +--- PASS: TestIndexAny (0.00s) +=== PASS TestIndexAny +=== NAME +=== RUN TestLastIndexAny +--- PASS: TestLastIndexAny (0.00s) +=== PASS TestLastIndexAny +=== NAME +=== RUN TestIndexByte +--- PASS: TestIndexByte (0.00s) +=== PASS TestIndexByte +=== NAME +=== RUN TestLastIndexByte +--- PASS: TestLastIndexByte (0.00s) +=== PASS TestLastIndexByte +=== NAME +=== RUN TestIndexRandom +--- PASS: TestIndexRandom (0.00s) +=== PASS TestIndexRandom +=== NAME +=== RUN TestIndexRune +--- PASS: TestIndexRune (0.00s) +=== PASS TestIndexRune +=== NAME +=== RUN TestIndexFunc +--- PASS: TestIndexFunc (0.00s) +=== PASS TestIndexFunc +=== NAME +=== RUN ExampleIndex +--- PASS: ExampleIndex (0.00s) +=== NAME +=== RUN ExampleIndexFunc +--- PASS: ExampleIndexFunc (0.00s) +=== NAME +=== RUN ExampleIndexAny +--- PASS: ExampleIndexAny (0.00s) +=== NAME +=== RUN ExampleIndexByte +--- PASS: ExampleIndexByte (0.00s) +=== NAME +=== RUN ExampleIndexRune +--- PASS: ExampleIndexRune (0.00s) +=== NAME +=== RUN ExampleLastIndex +--- PASS: ExampleLastIndex (0.00s) +=== NAME +=== RUN ExampleLastIndexAny +--- PASS: ExampleLastIndexAny (0.00s) +=== NAME +=== RUN ExampleLastIndexByte +--- PASS: ExampleLastIndexByte (0.00s) +=== NAME +=== RUN ExampleLastIndexFunc +--- PASS: ExampleLastIndexFunc (0.00s) +=== NAME +goos: darwin +goarch: amd64 +pkg: strings +cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz +=== RUN BenchmarkIndexRune +BenchmarkIndexRune +BenchmarkIndexRune-16 87335496 14.27 ns/op +=== NAME +=== RUN BenchmarkIndexRuneLongString +BenchmarkIndexRuneLongString +BenchmarkIndexRuneLongString-16 57104472 18.66 ns/op +=== NAME +=== RUN BenchmarkIndexRuneFastPath +BenchmarkIndexRuneFastPath +BenchmarkIndexRuneFastPath-16 262380160 4.499 ns/op +=== NAME +=== RUN BenchmarkIndex +BenchmarkIndex +BenchmarkIndex-16 248529364 4.697 ns/op +=== NAME +=== RUN BenchmarkLastIndex +BenchmarkLastIndex +BenchmarkLastIndex-16 293688756 4.166 ns/op +=== NAME +=== RUN BenchmarkIndexByte +BenchmarkIndexByte +BenchmarkIndexByte-16 310338391 3.608 ns/op +=== NAME +=== RUN BenchmarkIndexHard1 +BenchmarkIndexHard1 +BenchmarkIndexHard1-16 12852 92380 ns/op +=== NAME +=== RUN BenchmarkIndexHard2 +BenchmarkIndexHard2 +BenchmarkIndexHard2-16 8977 135080 ns/op +=== NAME +=== RUN BenchmarkIndexHard3 +BenchmarkIndexHard3 +BenchmarkIndexHard3-16 1885 532079 ns/op +=== NAME +=== RUN BenchmarkIndexHard4 +BenchmarkIndexHard4 +BenchmarkIndexHard4-16 2298 533435 ns/op +=== NAME +=== RUN BenchmarkLastIndexHard1 +BenchmarkLastIndexHard1 +BenchmarkLastIndexHard1-16 813 1295767 ns/op +=== NAME +=== RUN BenchmarkLastIndexHard2 +BenchmarkLastIndexHard2 +BenchmarkLastIndexHard2-16 784 1389403 ns/op +=== NAME +=== RUN BenchmarkLastIndexHard3 +BenchmarkLastIndexHard3 +BenchmarkLastIndexHard3-16 913 1316608 ns/op +=== NAME +=== RUN BenchmarkIndexTorture +BenchmarkIndexTorture +BenchmarkIndexTorture-16 98090 10201 ns/op +=== NAME +=== RUN BenchmarkIndexAnyASCII +BenchmarkIndexAnyASCII +=== RUN BenchmarkIndexAnyASCII/1:1 +BenchmarkIndexAnyASCII/1:1 +BenchmarkIndexAnyASCII/1:1-16 214829462 5.592 ns/op +=== NAME +=== RUN BenchmarkIndexAnyASCII/1:2 +BenchmarkIndexAnyASCII/1:2 +BenchmarkIndexAnyASCII/1:2-16 155499682 7.214 ns/op +=== NAME +=== RUN BenchmarkIndexAnyASCII/1:4 +BenchmarkIndexAnyASCII/1:4 +BenchmarkIndexAnyASCII/1:4-16 172757770 7.092 ns/op +=== NAME +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.json new file mode 100644 index 0000000000000000000000000000000000000000..25869ee7408d5b82ec34e25388f5f44041c45eff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.json @@ -0,0 +1,69 @@ +{"Action":"start"} +{"Action":"run","Test":"TestAddrStringAllocs"} +{"Action":"output","Test":"TestAddrStringAllocs","Output":"=== RUN TestAddrStringAllocs\n"} +{"Action":"run","Test":"TestAddrStringAllocs/zero"} +{"Action":"output","Test":"TestAddrStringAllocs/zero","Output":"=== RUN TestAddrStringAllocs/zero\n"} +{"Action":"run","Test":"TestAddrStringAllocs/ipv4"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4","Output":"=== RUN TestAddrStringAllocs/ipv4\n"} +{"Action":"run","Test":"TestAddrStringAllocs/ipv6"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv6","Output":"=== RUN TestAddrStringAllocs/ipv6\n"} +{"Action":"run","Test":"TestAddrStringAllocs/ipv6+zone"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv6+zone","Output":"=== RUN TestAddrStringAllocs/ipv6+zone\n"} +{"Action":"run","Test":"TestAddrStringAllocs/ipv4-in-ipv6"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4-in-ipv6","Output":"=== RUN TestAddrStringAllocs/ipv4-in-ipv6\n"} +{"Action":"run","Test":"TestAddrStringAllocs/ipv4-in-ipv6+zone"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4-in-ipv6+zone","Output":"=== RUN TestAddrStringAllocs/ipv4-in-ipv6+zone\n"} +{"Action":"output","Test":"TestAddrStringAllocs","Output":"--- PASS: TestAddrStringAllocs (0.00s)\n"} +{"Action":"output","Test":"TestAddrStringAllocs/zero","Output":" --- PASS: TestAddrStringAllocs/zero (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/zero"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4","Output":" --- PASS: TestAddrStringAllocs/ipv4 (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/ipv4"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv6","Output":" --- PASS: TestAddrStringAllocs/ipv6 (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/ipv6"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv6+zone","Output":" --- PASS: TestAddrStringAllocs/ipv6+zone (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/ipv6+zone"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4-in-ipv6","Output":" --- PASS: TestAddrStringAllocs/ipv4-in-ipv6 (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/ipv4-in-ipv6"} +{"Action":"output","Test":"TestAddrStringAllocs/ipv4-in-ipv6+zone","Output":" --- PASS: TestAddrStringAllocs/ipv4-in-ipv6+zone (0.00s)\n"} +{"Action":"pass","Test":"TestAddrStringAllocs/ipv4-in-ipv6+zone"} +{"Action":"pass","Test":"TestAddrStringAllocs"} +{"Action":"run","Test":"TestPrefixString"} +{"Action":"output","Test":"TestPrefixString","Output":"=== RUN TestPrefixString\n"} +{"Action":"output","Test":"TestPrefixString","Output":"--- PASS: TestPrefixString (0.00s)\n"} +{"Action":"pass","Test":"TestPrefixString"} +{"Action":"run","Test":"TestInvalidAddrPortString"} +{"Action":"output","Test":"TestInvalidAddrPortString","Output":"=== RUN TestInvalidAddrPortString\n"} +{"Action":"output","Test":"TestInvalidAddrPortString","Output":"--- PASS: TestInvalidAddrPortString (0.00s)\n"} +{"Action":"pass","Test":"TestInvalidAddrPortString"} +{"Action":"run","Test":"TestAsSlice"} +{"Action":"output","Test":"TestAsSlice","Output":"=== RUN TestAsSlice\n"} +{"Action":"output","Test":"TestAsSlice","Output":"--- PASS: TestAsSlice (0.00s)\n"} +{"Action":"pass","Test":"TestAsSlice"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"Addr.string4\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"Prefix.isZero\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"IPv4Unspecified\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"joinHostPort\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"Addr.MarshalBinary\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"bePutUint64\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"mask6\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"AddrPort.isZero\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"stringsLastIndexByte\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"Addr.isZero\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"bePutUint32\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"leUint16\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"Addr.string6\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"beUint64\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"appendHexPad\"\n"} +{"Action":"output","Test":"TestInlining","Output":" inlining_test.go:102: not in expected set, but also inlinable: \"lePutUint16\"\n"} +{"Action":"output","Test":"TestInlining","Output":"--- PASS: TestInlining (0.10s)\n"} +{"Action":"pass","Test":"TestInlining"} +{"Action":"run","Test":"FuzzParse"} +{"Action":"output","Test":"FuzzParse","Output":"=== RUN FuzzParse\n"} +{"Action":"output","Test":"FuzzParse","Output":"fuzz: elapsed: 0s, gathering baseline coverage: 0/390 completed\n"} +{"Action":"output","Test":"FuzzParse","Output":"fuzz: elapsed: 0s, gathering baseline coverage: 390/390 completed, now fuzzing with 16 workers\n"} +{"Action":"output","Test":"FuzzParse","Output":"fuzz: elapsed: 3s, execs: 438666 (146173/sec), new interesting: 12 (total: 402)\n"} +{"Action":"output","Test":"FuzzParse","Output":"\u0003fuzz: elapsed: 4s, execs: 558467 (147850/sec), new interesting: 15 (total: 405)\n"} +{"Action":"output","Test":"FuzzParse","Output":"--- PASS: FuzzParse (3.85s)\n"} +{"Action":"pass","Test":"FuzzParse"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.test new file mode 100644 index 0000000000000000000000000000000000000000..1eb3a125e2776ff5d391c9df9804504be0d1608d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/framefuzz.test @@ -0,0 +1,56 @@ +=== RUN TestAddrStringAllocs +=== RUN TestAddrStringAllocs/zero +=== NAME TestAddrStringAllocs +=== RUN TestAddrStringAllocs/ipv4 +=== NAME TestAddrStringAllocs +=== RUN TestAddrStringAllocs/ipv6 +=== NAME TestAddrStringAllocs +=== RUN TestAddrStringAllocs/ipv6+zone +=== NAME TestAddrStringAllocs +=== RUN TestAddrStringAllocs/ipv4-in-ipv6 +=== NAME TestAddrStringAllocs +=== RUN TestAddrStringAllocs/ipv4-in-ipv6+zone +=== NAME TestAddrStringAllocs +--- PASS: TestAddrStringAllocs (0.00s) + --- PASS: TestAddrStringAllocs/zero (0.00s) + --- PASS: TestAddrStringAllocs/ipv4 (0.00s) + --- PASS: TestAddrStringAllocs/ipv6 (0.00s) + --- PASS: TestAddrStringAllocs/ipv6+zone (0.00s) + --- PASS: TestAddrStringAllocs/ipv4-in-ipv6 (0.00s) + --- PASS: TestAddrStringAllocs/ipv4-in-ipv6+zone (0.00s) +=== NAME +=== RUN TestPrefixString +--- PASS: TestPrefixString (0.00s) +=== NAME +=== RUN TestInvalidAddrPortString +--- PASS: TestInvalidAddrPortString (0.00s) +=== NAME +=== RUN TestAsSlice +--- PASS: TestAsSlice (0.00s) +=== NAME +=== NAME TestInlining + inlining_test.go:102: not in expected set, but also inlinable: "Addr.string4" + inlining_test.go:102: not in expected set, but also inlinable: "Prefix.isZero" + inlining_test.go:102: not in expected set, but also inlinable: "IPv4Unspecified" + inlining_test.go:102: not in expected set, but also inlinable: "joinHostPort" + inlining_test.go:102: not in expected set, but also inlinable: "Addr.MarshalBinary" + inlining_test.go:102: not in expected set, but also inlinable: "bePutUint64" + inlining_test.go:102: not in expected set, but also inlinable: "mask6" + inlining_test.go:102: not in expected set, but also inlinable: "AddrPort.isZero" + inlining_test.go:102: not in expected set, but also inlinable: "stringsLastIndexByte" + inlining_test.go:102: not in expected set, but also inlinable: "Addr.isZero" + inlining_test.go:102: not in expected set, but also inlinable: "bePutUint32" + inlining_test.go:102: not in expected set, but also inlinable: "leUint16" + inlining_test.go:102: not in expected set, but also inlinable: "Addr.string6" + inlining_test.go:102: not in expected set, but also inlinable: "beUint64" + inlining_test.go:102: not in expected set, but also inlinable: "appendHexPad" + inlining_test.go:102: not in expected set, but also inlinable: "lePutUint16" +--- PASS: TestInlining (0.10s) +=== RUN FuzzParse +fuzz: elapsed: 0s, gathering baseline coverage: 0/390 completed +fuzz: elapsed: 0s, gathering baseline coverage: 390/390 completed, now fuzzing with 16 workers +fuzz: elapsed: 3s, execs: 438666 (146173/sec), new interesting: 12 (total: 402) +fuzz: elapsed: 4s, execs: 558467 (147850/sec), new interesting: 15 (total: 405) +--- PASS: FuzzParse (3.85s) +=== NAME +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.json new file mode 100644 index 0000000000000000000000000000000000000000..bfdc3e5e0f8fd44aac0402487b59303a4dfdbfbf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.json @@ -0,0 +1,13 @@ +{"Action":"start"} +{"Action":"run","Test":"TestActualCase"} +{"Action":"output","Test":"TestActualCase","Output":"=== RUN TestActualCase\n"} +{"Action":"output","Test":"TestActualCase","Output":"--- FAIL: TestActualCase (0.00s)\n"} +{"Action":"output","Test":"TestActualCase","Output":" foo_test.go:14: Differed.\n"} +{"Action":"output","Test":"TestActualCase","Output":" Expected: MyTest:\n"} +{"Action":"output","Test":"TestActualCase","Output":" --- FAIL: Test output from other tool\n"} +{"Action":"output","Test":"TestActualCase","Output":" Actual: not expected\n"} +{"Action":"fail","Test":"TestActualCase"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"output","Output":"exit status 1\n"} +{"Action":"output","Output":"FAIL github.com/org/project/badtest 0.049s\n"} +{"Action":"fail"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.test new file mode 100644 index 0000000000000000000000000000000000000000..fd4774f4b63834c9b333e79f68020709e0b7ce50 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23036.test @@ -0,0 +1,9 @@ +=== RUN TestActualCase +--- FAIL: TestActualCase (0.00s) + foo_test.go:14: Differed. + Expected: MyTest: + --- FAIL: Test output from other tool + Actual: not expected +FAIL +exit status 1 +FAIL github.com/org/project/badtest 0.049s diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.json new file mode 100644 index 0000000000000000000000000000000000000000..17e8de6e917c663e249c59597ac695e21ef72bac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.json @@ -0,0 +1,15 @@ +{"Action":"start"} +{"Action":"run","Test":"TestWithColons"} +{"Action":"output","Test":"TestWithColons","Output":"=== RUN TestWithColons\n"} +{"Action":"run","Test":"TestWithColons/[::1]"} +{"Action":"output","Test":"TestWithColons/[::1]","Output":"=== RUN TestWithColons/[::1]\n"} +{"Action":"run","Test":"TestWithColons/127.0.0.1:0"} +{"Action":"output","Test":"TestWithColons/127.0.0.1:0","Output":"=== RUN TestWithColons/127.0.0.1:0\n"} +{"Action":"output","Test":"TestWithColons","Output":"--- PASS: TestWithColons (0.00s)\n"} +{"Action":"output","Test":"TestWithColons/[::1]","Output":" --- PASS: TestWithColons/[::1] (0.00s)\n"} +{"Action":"pass","Test":"TestWithColons/[::1]"} +{"Action":"output","Test":"TestWithColons/127.0.0.1:0","Output":" --- PASS: TestWithColons/127.0.0.1:0 (0.00s)\n"} +{"Action":"pass","Test":"TestWithColons/127.0.0.1:0"} +{"Action":"pass","Test":"TestWithColons"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.test new file mode 100644 index 0000000000000000000000000000000000000000..43bf0580343f489e2d650947cb2cb31b7397d9a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue23920.test @@ -0,0 +1,7 @@ +=== RUN TestWithColons +=== RUN TestWithColons/[::1] +=== RUN TestWithColons/127.0.0.1:0 +--- PASS: TestWithColons (0.00s) + --- PASS: TestWithColons/[::1] (0.00s) + --- PASS: TestWithColons/127.0.0.1:0 (0.00s) +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.json new file mode 100644 index 0000000000000000000000000000000000000000..c49bf92ce2a49a86f6143281f611945a1bc438ae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.json @@ -0,0 +1,39 @@ +{"Action":"start"} +{"Action":"run","Test":"TestOutputWithSubtest"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":"=== RUN TestOutputWithSubtest\n"} +{"Action":"run","Test":"TestOutputWithSubtest/sub_test"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test","Output":"=== RUN TestOutputWithSubtest/sub_test\n"} +{"Action":"run","Test":"TestOutputWithSubtest/sub_test/sub2"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test/sub2","Output":"=== RUN TestOutputWithSubtest/sub_test/sub2\n"} +{"Action":"run","Test":"TestOutputWithSubtest/sub_test2"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2","Output":"=== RUN TestOutputWithSubtest/sub_test2\n"} +{"Action":"run","Test":"TestOutputWithSubtest/sub_test2/sub2"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2/sub2","Output":"=== RUN TestOutputWithSubtest/sub_test2/sub2\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":"--- FAIL: TestOutputWithSubtest (0.00s)\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:6: output before sub tests\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:10: output from root test\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:15: output from root test\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test","Output":" --- PASS: TestOutputWithSubtest/sub_test (0.00s)\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test","Output":" foo_test.go:9: output from sub test\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test","Output":" foo_test.go:11: more output from sub test\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test","Output":" foo_test.go:16: more output from sub test\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test/sub2","Output":" --- PASS: TestOutputWithSubtest/sub_test/sub2 (0.00s)\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test/sub2","Output":" foo_test.go:14: output from sub2 test\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:22: output from root test\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:27: output from root test\n"} +{"Action":"pass","Test":"TestOutputWithSubtest/sub_test/sub2"} +{"Action":"pass","Test":"TestOutputWithSubtest/sub_test"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2","Output":" --- PASS: TestOutputWithSubtest/sub_test2 (0.00s)\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2","Output":" foo_test.go:21: output from sub test2\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2","Output":" foo_test.go:23: more output from sub test2\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2","Output":" foo_test.go:28: more output from sub test2\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2/sub2","Output":" --- PASS: TestOutputWithSubtest/sub_test2/sub2 (0.00s)\n"} +{"Action":"output","Test":"TestOutputWithSubtest/sub_test2/sub2","Output":" foo_test.go:26: output from sub2 test\n"} +{"Action":"output","Test":"TestOutputWithSubtest","Output":" foo_test.go:32: output after sub test\n"} +{"Action":"pass","Test":"TestOutputWithSubtest/sub_test2/sub2"} +{"Action":"pass","Test":"TestOutputWithSubtest/sub_test2"} +{"Action":"fail","Test":"TestOutputWithSubtest"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"output","Output":"FAIL\tgotest.tools/gotestsum/foo\t0.001s\n"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"fail"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.test new file mode 100644 index 0000000000000000000000000000000000000000..b0c596ce45dbbeae83dd145d81be6c0085cb88da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/issue29755.test @@ -0,0 +1,27 @@ +=== RUN TestOutputWithSubtest +=== RUN TestOutputWithSubtest/sub_test +=== RUN TestOutputWithSubtest/sub_test/sub2 +=== RUN TestOutputWithSubtest/sub_test2 +=== RUN TestOutputWithSubtest/sub_test2/sub2 +--- FAIL: TestOutputWithSubtest (0.00s) + foo_test.go:6: output before sub tests + foo_test.go:10: output from root test + foo_test.go:15: output from root test + --- PASS: TestOutputWithSubtest/sub_test (0.00s) + foo_test.go:9: output from sub test + foo_test.go:11: more output from sub test + foo_test.go:16: more output from sub test + --- PASS: TestOutputWithSubtest/sub_test/sub2 (0.00s) + foo_test.go:14: output from sub2 test + foo_test.go:22: output from root test + foo_test.go:27: output from root test + --- PASS: TestOutputWithSubtest/sub_test2 (0.00s) + foo_test.go:21: output from sub test2 + foo_test.go:23: more output from sub test2 + foo_test.go:28: more output from sub test2 + --- PASS: TestOutputWithSubtest/sub_test2/sub2 (0.00s) + foo_test.go:26: output from sub2 test + foo_test.go:32: output after sub test +FAIL +FAIL gotest.tools/gotestsum/foo 0.001s +FAIL diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.json new file mode 100644 index 0000000000000000000000000000000000000000..1cd43846298b5cc2e970f9476300ffcbf3bddc2b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.json @@ -0,0 +1,20 @@ +{"Action":"start"} +{"Action":"output","Test":"TestPanic","Output":"--- FAIL: TestPanic (0.00s)\n"} +{"Action":"output","Test":"TestPanic","Output":"panic: oops [recovered]\n"} +{"Action":"output","Test":"TestPanic","Output":"\tpanic: oops\n"} +{"Action":"output","Test":"TestPanic","Output":"\n"} +{"Action":"output","Test":"TestPanic","Output":"goroutine 7 [running]:\n"} +{"Action":"output","Test":"TestPanic","Output":"testing.tRunner.func1(0xc000092100)\n"} +{"Action":"output","Test":"TestPanic","Output":"\t/go/src/testing/testing.go:874 +0x3a3\n"} +{"Action":"output","Test":"TestPanic","Output":"panic(0x1110ea0, 0x116aea0)\n"} +{"Action":"output","Test":"TestPanic","Output":"\t/go/src/runtime/panic.go:679 +0x1b2\n"} +{"Action":"output","Test":"TestPanic","Output":"command-line-arguments.TestPanic(0xc000092100)\n"} +{"Action":"output","Test":"TestPanic","Output":"\ta_test.go:6 +0x39\n"} +{"Action":"output","Test":"TestPanic","Output":"testing.tRunner(0xc000092100, 0x114f500)\n"} +{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:909 +0xc9\n"} +{"Action":"output","Test":"TestPanic","Output":"created by testing.(*T).Run\n"} +{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:960 +0x350\n"} +{"Action":"fail","Test":"TestPanic"} +{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.042s\n"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"fail"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.test new file mode 100644 index 0000000000000000000000000000000000000000..517ebafeb53f45f791ef1a30c2389a00a1b18f48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/panic.test @@ -0,0 +1,17 @@ +--- FAIL: TestPanic (0.00s) +panic: oops [recovered] + panic: oops + +goroutine 7 [running]: +testing.tRunner.func1(0xc000092100) + /go/src/testing/testing.go:874 +0x3a3 +panic(0x1110ea0, 0x116aea0) + /go/src/runtime/panic.go:679 +0x1b2 +command-line-arguments.TestPanic(0xc000092100) + a_test.go:6 +0x39 +testing.tRunner(0xc000092100, 0x114f500) + go/src/testing/testing.go:909 +0xc9 +created by testing.(*T).Run + go/src/testing/testing.go:960 +0x350 +FAIL command-line-arguments 0.042s +FAIL diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.json new file mode 100644 index 0000000000000000000000000000000000000000..858843f3af2174b8fa2c9d52ccf611dace9b06a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.json @@ -0,0 +1,183 @@ +{"Action":"start"} +{"Action":"run","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹","Output":"=== RUN Test☺☹\n"} +{"Action":"output","Test":"Test☺☹","Output":"=== PAUSE Test☺☹\n"} +{"Action":"pause","Test":"Test☺☹"} +{"Action":"run","Test":"Test☺☹Asm"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== RUN Test☺☹Asm\n"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== PAUSE Test☺☹Asm\n"} +{"Action":"pause","Test":"Test☺☹Asm"} +{"Action":"run","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== RUN Test☺☹Dirs\n"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== PAUSE Test☺☹Dirs\n"} +{"Action":"pause","Test":"Test☺☹Dirs"} +{"Action":"run","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== RUN TestTags\n"} +{"Action":"output","Test":"TestTags","Output":"=== PAUSE TestTags\n"} +{"Action":"pause","Test":"TestTags"} +{"Action":"run","Test":"Test☺☹Verbose"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== RUN Test☺☹Verbose\n"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== PAUSE Test☺☹Verbose\n"} +{"Action":"pause","Test":"Test☺☹Verbose"} +{"Action":"cont","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹","Output":"=== CONT Test☺☹\n"} +{"Action":"cont","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== CONT TestTags\n"} +{"Action":"cont","Test":"Test☺☹Verbose"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== CONT Test☺☹Verbose\n"} +{"Action":"run","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== RUN TestTags/testtag\n"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== PAUSE TestTags/testtag\n"} +{"Action":"pause","Test":"TestTags/testtag"} +{"Action":"cont","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== CONT Test☺☹Dirs\n"} +{"Action":"cont","Test":"Test☺☹Asm"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== CONT Test☺☹Asm\n"} +{"Action":"run","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== RUN Test☺☹/0\n"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== PAUSE Test☺☹/0\n"} +{"Action":"pause","Test":"Test☺☹/0"} +{"Action":"run","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== RUN Test☺☹/1\n"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== PAUSE Test☺☹/1\n"} +{"Action":"pause","Test":"Test☺☹/1"} +{"Action":"run","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== RUN Test☺☹/2\n"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== PAUSE Test☺☹/2\n"} +{"Action":"pause","Test":"Test☺☹/2"} +{"Action":"run","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== RUN Test☺☹/3\n"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== PAUSE Test☺☹/3\n"} +{"Action":"pause","Test":"Test☺☹/3"} +{"Action":"run","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== RUN Test☺☹/4\n"} +{"Action":"run","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== RUN TestTags/x_testtag_y\n"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== PAUSE Test☺☹/4\n"} +{"Action":"pause","Test":"Test☺☹/4"} +{"Action":"run","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== RUN Test☺☹/5\n"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== PAUSE Test☺☹/5\n"} +{"Action":"pause","Test":"Test☺☹/5"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== PAUSE TestTags/x_testtag_y\n"} +{"Action":"pause","Test":"TestTags/x_testtag_y"} +{"Action":"run","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== RUN Test☺☹/6\n"} +{"Action":"run","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== RUN TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== PAUSE TestTags/x,testtag,y\n"} +{"Action":"pause","Test":"TestTags/x,testtag,y"} +{"Action":"run","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== RUN Test☺☹Dirs/testingpkg\n"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== PAUSE Test☺☹/6\n"} +{"Action":"pause","Test":"Test☺☹/6"} +{"Action":"cont","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== CONT TestTags/x,testtag,y\n"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== PAUSE Test☺☹Dirs/testingpkg\n"} +{"Action":"pause","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"run","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== RUN Test☺☹Dirs/divergent\n"} +{"Action":"run","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== RUN Test☺☹/7\n"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== PAUSE Test☺☹/7\n"} +{"Action":"pause","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== PAUSE Test☺☹Dirs/divergent\n"} +{"Action":"pause","Test":"Test☺☹Dirs/divergent"} +{"Action":"cont","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== CONT TestTags/x_testtag_y\n"} +{"Action":"cont","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== CONT TestTags/testtag\n"} +{"Action":"run","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== RUN Test☺☹Dirs/buildtag\n"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== PAUSE Test☺☹Dirs/buildtag\n"} +{"Action":"pause","Test":"Test☺☹Dirs/buildtag"} +{"Action":"cont","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== CONT Test☺☹/0\n"} +{"Action":"cont","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== CONT Test☺☹/4\n"} +{"Action":"run","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== RUN Test☺☹Dirs/incomplete\n"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== PAUSE Test☺☹Dirs/incomplete\n"} +{"Action":"pause","Test":"Test☺☹Dirs/incomplete"} +{"Action":"run","Test":"Test☺☹Dirs/cgo"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== RUN Test☺☹Dirs/cgo\n"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== PAUSE Test☺☹Dirs/cgo\n"} +{"Action":"pause","Test":"Test☺☹Dirs/cgo"} +{"Action":"cont","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== CONT Test☺☹/7\n"} +{"Action":"cont","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== CONT Test☺☹/6\n"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"--- PASS: Test☺☹Verbose (0.04s)\n"} +{"Action":"pass","Test":"Test☺☹Verbose"} +{"Action":"cont","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== CONT Test☺☹/5\n"} +{"Action":"cont","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== CONT Test☺☹/3\n"} +{"Action":"cont","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== CONT Test☺☹/2\n"} +{"Action":"output","Test":"TestTags","Output":"--- PASS: TestTags (0.00s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" --- PASS: TestTags/x_testtag_y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" vet_test.go:187: -tags=x testtag y\n"} +{"Action":"pass","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" --- PASS: TestTags/x,testtag,y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" vet_test.go:187: -tags=x,testtag,y\n"} +{"Action":"pass","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/testtag","Output":" --- PASS: TestTags/testtag (0.04s)\n"} +{"Action":"output","Test":"TestTags/testtag","Output":" vet_test.go:187: -tags=testtag\n"} +{"Action":"pass","Test":"TestTags/testtag"} +{"Action":"pass","Test":"TestTags"} +{"Action":"cont","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== CONT Test☺☹/1\n"} +{"Action":"cont","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== CONT Test☺☹Dirs/testingpkg\n"} +{"Action":"cont","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== CONT Test☺☹Dirs/buildtag\n"} +{"Action":"cont","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== CONT Test☺☹Dirs/divergent\n"} +{"Action":"cont","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== CONT Test☺☹Dirs/incomplete\n"} +{"Action":"cont","Test":"Test☺☹Dirs/cgo"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== CONT Test☺☹Dirs/cgo\n"} +{"Action":"output","Test":"Test☺☹","Output":"--- PASS: Test☺☹ (0.39s)\n"} +{"Action":"output","Test":"Test☺☹/5","Output":" --- PASS: Test☺☹/5 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/5","Output":" vet_test.go:114: φιλεσ: [\"testdata/copylock_func.go\" \"testdata/rangeloop.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/3","Output":" --- PASS: Test☺☹/3 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/3","Output":" vet_test.go:114: φιλεσ: [\"testdata/composite.go\" \"testdata/nilfunc.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/6","Output":" --- PASS: Test☺☹/6 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/6","Output":" vet_test.go:114: φιλεσ: [\"testdata/copylock_range.go\" \"testdata/shadow.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/2","Output":" --- PASS: Test☺☹/2 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/2","Output":" vet_test.go:114: φιλεσ: [\"testdata/bool.go\" \"testdata/method.go\" \"testdata/unused.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/0","Output":" --- PASS: Test☺☹/0 (0.13s)\n"} +{"Action":"output","Test":"Test☺☹/0","Output":" vet_test.go:114: φιλεσ: [\"testdata/assign.go\" \"testdata/httpresponse.go\" \"testdata/structtag.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/4","Output":" --- PASS: Test☺☹/4 (0.16s)\n"} +{"Action":"output","Test":"Test☺☹/4","Output":" vet_test.go:114: φιλεσ: [\"testdata/copylock.go\" \"testdata/print.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/1","Output":" --- PASS: Test☺☹/1 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/1","Output":" vet_test.go:114: φιλεσ: [\"testdata/atomic.go\" \"testdata/lostcancel.go\" \"testdata/unsafeptr.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/7","Output":" --- PASS: Test☺☹/7 (0.19s)\n"} +{"Action":"output","Test":"Test☺☹/7","Output":" vet_test.go:114: φιλεσ: [\"testdata/deadcode.go\" \"testdata/shift.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/7"} +{"Action":"pass","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"--- PASS: Test☺☹Dirs (0.01s)\n"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":" --- PASS: Test☺☹Dirs/testingpkg (0.06s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":" --- PASS: Test☺☹Dirs/divergent (0.05s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":" --- PASS: Test☺☹Dirs/buildtag (0.06s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":" --- PASS: Test☺☹Dirs/incomplete (0.05s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":" --- PASS: Test☺☹Dirs/cgo (0.04s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/cgo"} +{"Action":"pass","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Asm","Output":"--- PASS: Test☺☹Asm (0.75s)\n"} +{"Action":"pass","Test":"Test☺☹Asm"} +{"Action":"output","Output":"PASS\n"} +{"Action":"output","Output":"ok \tcmd/vet\t(cached)\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.test new file mode 100644 index 0000000000000000000000000000000000000000..bd1ed2dd9a9d9a9daef06a21f846c1215f3eceeb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/smiley.test @@ -0,0 +1,97 @@ +=== RUN Test☺☹ +=== PAUSE Test☺☹ +=== RUN Test☺☹Asm +=== PAUSE Test☺☹Asm +=== RUN Test☺☹Dirs +=== PAUSE Test☺☹Dirs +=== RUN TestTags +=== PAUSE TestTags +=== RUN Test☺☹Verbose +=== PAUSE Test☺☹Verbose +=== CONT Test☺☹ +=== CONT TestTags +=== CONT Test☺☹Verbose +=== RUN TestTags/testtag +=== PAUSE TestTags/testtag +=== CONT Test☺☹Dirs +=== CONT Test☺☹Asm +=== RUN Test☺☹/0 +=== PAUSE Test☺☹/0 +=== RUN Test☺☹/1 +=== PAUSE Test☺☹/1 +=== RUN Test☺☹/2 +=== PAUSE Test☺☹/2 +=== RUN Test☺☹/3 +=== PAUSE Test☺☹/3 +=== RUN Test☺☹/4 +=== RUN TestTags/x_testtag_y +=== PAUSE Test☺☹/4 +=== RUN Test☺☹/5 +=== PAUSE Test☺☹/5 +=== PAUSE TestTags/x_testtag_y +=== RUN Test☺☹/6 +=== RUN TestTags/x,testtag,y +=== PAUSE TestTags/x,testtag,y +=== RUN Test☺☹Dirs/testingpkg +=== PAUSE Test☺☹/6 +=== CONT TestTags/x,testtag,y +=== PAUSE Test☺☹Dirs/testingpkg +=== RUN Test☺☹Dirs/divergent +=== RUN Test☺☹/7 +=== PAUSE Test☺☹/7 +=== PAUSE Test☺☹Dirs/divergent +=== CONT TestTags/x_testtag_y +=== CONT TestTags/testtag +=== RUN Test☺☹Dirs/buildtag +=== PAUSE Test☺☹Dirs/buildtag +=== CONT Test☺☹/0 +=== CONT Test☺☹/4 +=== RUN Test☺☹Dirs/incomplete +=== PAUSE Test☺☹Dirs/incomplete +=== RUN Test☺☹Dirs/cgo +=== PAUSE Test☺☹Dirs/cgo +=== CONT Test☺☹/7 +=== CONT Test☺☹/6 +--- PASS: Test☺☹Verbose (0.04s) +=== CONT Test☺☹/5 +=== CONT Test☺☹/3 +=== CONT Test☺☹/2 +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + vet_test.go:187: -tags=x testtag y + --- PASS: TestTags/x,testtag,y (0.04s) + vet_test.go:187: -tags=x,testtag,y + --- PASS: TestTags/testtag (0.04s) + vet_test.go:187: -tags=testtag +=== CONT Test☺☹/1 +=== CONT Test☺☹Dirs/testingpkg +=== CONT Test☺☹Dirs/buildtag +=== CONT Test☺☹Dirs/divergent +=== CONT Test☺☹Dirs/incomplete +=== CONT Test☺☹Dirs/cgo +--- PASS: Test☺☹ (0.39s) + --- PASS: Test☺☹/5 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/copylock_func.go" "testdata/rangeloop.go"] + --- PASS: Test☺☹/3 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/composite.go" "testdata/nilfunc.go"] + --- PASS: Test☺☹/6 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/copylock_range.go" "testdata/shadow.go"] + --- PASS: Test☺☹/2 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] + --- PASS: Test☺☹/0 (0.13s) + vet_test.go:114: φιλεσ: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] + --- PASS: Test☺☹/4 (0.16s) + vet_test.go:114: φιλεσ: ["testdata/copylock.go" "testdata/print.go"] + --- PASS: Test☺☹/1 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] + --- PASS: Test☺☹/7 (0.19s) + vet_test.go:114: φιλεσ: ["testdata/deadcode.go" "testdata/shift.go"] +--- PASS: Test☺☹Dirs (0.01s) + --- PASS: Test☺☹Dirs/testingpkg (0.06s) + --- PASS: Test☺☹Dirs/divergent (0.05s) + --- PASS: Test☺☹Dirs/buildtag (0.06s) + --- PASS: Test☺☹Dirs/incomplete (0.05s) + --- PASS: Test☺☹Dirs/cgo (0.04s) +--- PASS: Test☺☹Asm (0.75s) +PASS +ok cmd/vet (cached) diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.json new file mode 100644 index 0000000000000000000000000000000000000000..dc225262f7c3263f88c8e37db10eba6cc868dd29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.json @@ -0,0 +1,8 @@ +{"Action":"start"} +{"Action":"run","Test":"Test"} +{"Action":"output","Test":"Test","Output":"=== RUN Test\n"} +{"Action":"output","Test":"Test","Output":"panic: test timed out after 1s\n"} +{"Action":"output","Test":"Test","Output":"\n"} +{"Action":"output","Output":"FAIL\tp\t1.111s\n"} +{"Action":"output","Output":"FAIL\n"} +{"Action":"fail"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.test new file mode 100644 index 0000000000000000000000000000000000000000..7f3debf37d9ac7ddcaf1c31b1998f31f83703b0e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/timeout.test @@ -0,0 +1,5 @@ +=== RUN Test +panic: test timed out after 1s + +FAIL p 1.111s +FAIL diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.json new file mode 100644 index 0000000000000000000000000000000000000000..2cc3e7322ddf486078164117c879127842de911d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.json @@ -0,0 +1,11 @@ +{"Action":"start"} +{"Action":"run","Test":"TestUnicode"} +{"Action":"output","Test":"TestUnicode","Output":"=== RUN TestUnicode\n"} +{"Action":"output","Test":"TestUnicode","Output":"Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα.\n"} +{"Action":"output","Test":"TestUnicode","Output":"私はガラスを食べられます。それは私を傷つけません。私はガラスを食べられます。それは私を傷つけません。\n"} +{"Action":"output","Test":"TestUnicode","Output":"--- PASS: TestUnicode\n"} +{"Action":"output","Test":"TestUnicode","Output":" ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ\n"} +{"Action":"output","Test":"TestUnicode","Output":" אני יכול לאכול זכוכית וזה לא מזיק לי. אני יכול לאכול זכוכית וזה לא מזיק לי.\n"} +{"Action":"pass","Test":"TestUnicode"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.test new file mode 100644 index 0000000000000000000000000000000000000000..58c620d5f743cb679d232041455689e83f59f580 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/unicode.test @@ -0,0 +1,7 @@ +=== RUN TestUnicode +Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. +私はガラスを食べられます。それは私を傷つけません。私はガラスを食べられます。それは私を傷つけません。 +--- PASS: TestUnicode + ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ + אני יכול לאכול זכוכית וזה לא מזיק לי. אני יכול לאכול זכוכית וזה לא מזיק לי. +PASS diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.json b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.json new file mode 100644 index 0000000000000000000000000000000000000000..5b09104522af94af01d0224d58c5de043700314b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.json @@ -0,0 +1,183 @@ +{"Action":"start"} +{"Action":"run","Test":"TestVet"} +{"Action":"output","Test":"TestVet","Output":"=== RUN TestVet\n"} +{"Action":"output","Test":"TestVet","Output":"=== PAUSE TestVet\n"} +{"Action":"pause","Test":"TestVet"} +{"Action":"run","Test":"TestVetAsm"} +{"Action":"output","Test":"TestVetAsm","Output":"=== RUN TestVetAsm\n"} +{"Action":"output","Test":"TestVetAsm","Output":"=== PAUSE TestVetAsm\n"} +{"Action":"pause","Test":"TestVetAsm"} +{"Action":"run","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetDirs","Output":"=== RUN TestVetDirs\n"} +{"Action":"output","Test":"TestVetDirs","Output":"=== PAUSE TestVetDirs\n"} +{"Action":"pause","Test":"TestVetDirs"} +{"Action":"run","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== RUN TestTags\n"} +{"Action":"output","Test":"TestTags","Output":"=== PAUSE TestTags\n"} +{"Action":"pause","Test":"TestTags"} +{"Action":"run","Test":"TestVetVerbose"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== RUN TestVetVerbose\n"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== PAUSE TestVetVerbose\n"} +{"Action":"pause","Test":"TestVetVerbose"} +{"Action":"cont","Test":"TestVet"} +{"Action":"output","Test":"TestVet","Output":"=== CONT TestVet\n"} +{"Action":"cont","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== CONT TestTags\n"} +{"Action":"cont","Test":"TestVetVerbose"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== CONT TestVetVerbose\n"} +{"Action":"run","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== RUN TestTags/testtag\n"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== PAUSE TestTags/testtag\n"} +{"Action":"pause","Test":"TestTags/testtag"} +{"Action":"cont","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetDirs","Output":"=== CONT TestVetDirs\n"} +{"Action":"cont","Test":"TestVetAsm"} +{"Action":"output","Test":"TestVetAsm","Output":"=== CONT TestVetAsm\n"} +{"Action":"run","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/0","Output":"=== RUN TestVet/0\n"} +{"Action":"output","Test":"TestVet/0","Output":"=== PAUSE TestVet/0\n"} +{"Action":"pause","Test":"TestVet/0"} +{"Action":"run","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/1","Output":"=== RUN TestVet/1\n"} +{"Action":"output","Test":"TestVet/1","Output":"=== PAUSE TestVet/1\n"} +{"Action":"pause","Test":"TestVet/1"} +{"Action":"run","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/2","Output":"=== RUN TestVet/2\n"} +{"Action":"output","Test":"TestVet/2","Output":"=== PAUSE TestVet/2\n"} +{"Action":"pause","Test":"TestVet/2"} +{"Action":"run","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/3","Output":"=== RUN TestVet/3\n"} +{"Action":"output","Test":"TestVet/3","Output":"=== PAUSE TestVet/3\n"} +{"Action":"pause","Test":"TestVet/3"} +{"Action":"run","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/4","Output":"=== RUN TestVet/4\n"} +{"Action":"run","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== RUN TestTags/x_testtag_y\n"} +{"Action":"output","Test":"TestVet/4","Output":"=== PAUSE TestVet/4\n"} +{"Action":"pause","Test":"TestVet/4"} +{"Action":"run","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/5","Output":"=== RUN TestVet/5\n"} +{"Action":"output","Test":"TestVet/5","Output":"=== PAUSE TestVet/5\n"} +{"Action":"pause","Test":"TestVet/5"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== PAUSE TestTags/x_testtag_y\n"} +{"Action":"pause","Test":"TestTags/x_testtag_y"} +{"Action":"run","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/6","Output":"=== RUN TestVet/6\n"} +{"Action":"run","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== RUN TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== PAUSE TestTags/x,testtag,y\n"} +{"Action":"pause","Test":"TestTags/x,testtag,y"} +{"Action":"run","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== RUN TestVetDirs/testingpkg\n"} +{"Action":"output","Test":"TestVet/6","Output":"=== PAUSE TestVet/6\n"} +{"Action":"pause","Test":"TestVet/6"} +{"Action":"cont","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== CONT TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== PAUSE TestVetDirs/testingpkg\n"} +{"Action":"pause","Test":"TestVetDirs/testingpkg"} +{"Action":"run","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== RUN TestVetDirs/divergent\n"} +{"Action":"run","Test":"TestVet/7"} +{"Action":"output","Test":"TestVet/7","Output":"=== RUN TestVet/7\n"} +{"Action":"output","Test":"TestVet/7","Output":"=== PAUSE TestVet/7\n"} +{"Action":"pause","Test":"TestVet/7"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== PAUSE TestVetDirs/divergent\n"} +{"Action":"pause","Test":"TestVetDirs/divergent"} +{"Action":"cont","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== CONT TestTags/x_testtag_y\n"} +{"Action":"cont","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== CONT TestTags/testtag\n"} +{"Action":"run","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== RUN TestVetDirs/buildtag\n"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== PAUSE TestVetDirs/buildtag\n"} +{"Action":"pause","Test":"TestVetDirs/buildtag"} +{"Action":"cont","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/0","Output":"=== CONT TestVet/0\n"} +{"Action":"cont","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/4","Output":"=== CONT TestVet/4\n"} +{"Action":"run","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== RUN TestVetDirs/incomplete\n"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== PAUSE TestVetDirs/incomplete\n"} +{"Action":"pause","Test":"TestVetDirs/incomplete"} +{"Action":"run","Test":"TestVetDirs/cgo"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== RUN TestVetDirs/cgo\n"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== PAUSE TestVetDirs/cgo\n"} +{"Action":"pause","Test":"TestVetDirs/cgo"} +{"Action":"cont","Test":"TestVet/7"} +{"Action":"output","Test":"TestVet/7","Output":"=== CONT TestVet/7\n"} +{"Action":"cont","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/6","Output":"=== CONT TestVet/6\n"} +{"Action":"output","Test":"TestVetVerbose","Output":"--- PASS: TestVetVerbose (0.04s)\n"} +{"Action":"pass","Test":"TestVetVerbose"} +{"Action":"cont","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/5","Output":"=== CONT TestVet/5\n"} +{"Action":"cont","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/3","Output":"=== CONT TestVet/3\n"} +{"Action":"cont","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/2","Output":"=== CONT TestVet/2\n"} +{"Action":"output","Test":"TestTags","Output":"--- PASS: TestTags (0.00s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" --- PASS: TestTags/x_testtag_y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" vet_test.go:187: -tags=x testtag y\n"} +{"Action":"pass","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" --- PASS: TestTags/x,testtag,y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" vet_test.go:187: -tags=x,testtag,y\n"} +{"Action":"pass","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/testtag","Output":" --- PASS: TestTags/testtag (0.04s)\n"} +{"Action":"output","Test":"TestTags/testtag","Output":" vet_test.go:187: -tags=testtag\n"} +{"Action":"pass","Test":"TestTags/testtag"} +{"Action":"pass","Test":"TestTags"} +{"Action":"cont","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/1","Output":"=== CONT TestVet/1\n"} +{"Action":"cont","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== CONT TestVetDirs/testingpkg\n"} +{"Action":"cont","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== CONT TestVetDirs/buildtag\n"} +{"Action":"cont","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== CONT TestVetDirs/divergent\n"} +{"Action":"cont","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== CONT TestVetDirs/incomplete\n"} +{"Action":"cont","Test":"TestVetDirs/cgo"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== CONT TestVetDirs/cgo\n"} +{"Action":"output","Test":"TestVet","Output":"--- PASS: TestVet (0.39s)\n"} +{"Action":"output","Test":"TestVet/5","Output":" --- PASS: TestVet/5 (0.07s)\n"} +{"Action":"output","Test":"TestVet/5","Output":" vet_test.go:114: files: [\"testdata/copylock_func.go\" \"testdata/rangeloop.go\"]\n"} +{"Action":"pass","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/3","Output":" --- PASS: TestVet/3 (0.07s)\n"} +{"Action":"output","Test":"TestVet/3","Output":" vet_test.go:114: files: [\"testdata/composite.go\" \"testdata/nilfunc.go\"]\n"} +{"Action":"pass","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/6","Output":" --- PASS: TestVet/6 (0.07s)\n"} +{"Action":"output","Test":"TestVet/6","Output":" vet_test.go:114: files: [\"testdata/copylock_range.go\" \"testdata/shadow.go\"]\n"} +{"Action":"pass","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/2","Output":" --- PASS: TestVet/2 (0.07s)\n"} +{"Action":"output","Test":"TestVet/2","Output":" vet_test.go:114: files: [\"testdata/bool.go\" \"testdata/method.go\" \"testdata/unused.go\"]\n"} +{"Action":"pass","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/0","Output":" --- PASS: TestVet/0 (0.13s)\n"} +{"Action":"output","Test":"TestVet/0","Output":" vet_test.go:114: files: [\"testdata/assign.go\" \"testdata/httpresponse.go\" \"testdata/structtag.go\"]\n"} +{"Action":"pass","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/4","Output":" --- PASS: TestVet/4 (0.16s)\n"} +{"Action":"output","Test":"TestVet/4","Output":" vet_test.go:114: files: [\"testdata/copylock.go\" \"testdata/print.go\"]\n"} +{"Action":"pass","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/1","Output":" --- PASS: TestVet/1 (0.07s)\n"} +{"Action":"output","Test":"TestVet/1","Output":" vet_test.go:114: files: [\"testdata/atomic.go\" \"testdata/lostcancel.go\" \"testdata/unsafeptr.go\"]\n"} +{"Action":"pass","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/7","Output":" --- PASS: TestVet/7 (0.19s)\n"} +{"Action":"output","Test":"TestVet/7","Output":" vet_test.go:114: files: [\"testdata/deadcode.go\" \"testdata/shift.go\"]\n"} +{"Action":"pass","Test":"TestVet/7"} +{"Action":"pass","Test":"TestVet"} +{"Action":"output","Test":"TestVetDirs","Output":"--- PASS: TestVetDirs (0.01s)\n"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":" --- PASS: TestVetDirs/testingpkg (0.06s)\n"} +{"Action":"pass","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":" --- PASS: TestVetDirs/divergent (0.05s)\n"} +{"Action":"pass","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":" --- PASS: TestVetDirs/buildtag (0.06s)\n"} +{"Action":"pass","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":" --- PASS: TestVetDirs/incomplete (0.05s)\n"} +{"Action":"pass","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":" --- PASS: TestVetDirs/cgo (0.04s)\n"} +{"Action":"pass","Test":"TestVetDirs/cgo"} +{"Action":"pass","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetAsm","Output":"--- PASS: TestVetAsm (0.75s)\n"} +{"Action":"pass","Test":"TestVetAsm"} +{"Action":"output","Output":"PASS\n"} +{"Action":"output","Output":"ok \tcmd/vet\t(cached)\n"} +{"Action":"pass"} diff --git a/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.test b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.test new file mode 100644 index 0000000000000000000000000000000000000000..59d187e0a33b637e695f25e1838f6a8e97ef2c0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/internal/test2json/testdata/vet.test @@ -0,0 +1,97 @@ +=== RUN TestVet +=== PAUSE TestVet +=== RUN TestVetAsm +=== PAUSE TestVetAsm +=== RUN TestVetDirs +=== PAUSE TestVetDirs +=== RUN TestTags +=== PAUSE TestTags +=== RUN TestVetVerbose +=== PAUSE TestVetVerbose +=== CONT TestVet +=== CONT TestTags +=== CONT TestVetVerbose +=== RUN TestTags/testtag +=== PAUSE TestTags/testtag +=== CONT TestVetDirs +=== CONT TestVetAsm +=== RUN TestVet/0 +=== PAUSE TestVet/0 +=== RUN TestVet/1 +=== PAUSE TestVet/1 +=== RUN TestVet/2 +=== PAUSE TestVet/2 +=== RUN TestVet/3 +=== PAUSE TestVet/3 +=== RUN TestVet/4 +=== RUN TestTags/x_testtag_y +=== PAUSE TestVet/4 +=== RUN TestVet/5 +=== PAUSE TestVet/5 +=== PAUSE TestTags/x_testtag_y +=== RUN TestVet/6 +=== RUN TestTags/x,testtag,y +=== PAUSE TestTags/x,testtag,y +=== RUN TestVetDirs/testingpkg +=== PAUSE TestVet/6 +=== CONT TestTags/x,testtag,y +=== PAUSE TestVetDirs/testingpkg +=== RUN TestVetDirs/divergent +=== RUN TestVet/7 +=== PAUSE TestVet/7 +=== PAUSE TestVetDirs/divergent +=== CONT TestTags/x_testtag_y +=== CONT TestTags/testtag +=== RUN TestVetDirs/buildtag +=== PAUSE TestVetDirs/buildtag +=== CONT TestVet/0 +=== CONT TestVet/4 +=== RUN TestVetDirs/incomplete +=== PAUSE TestVetDirs/incomplete +=== RUN TestVetDirs/cgo +=== PAUSE TestVetDirs/cgo +=== CONT TestVet/7 +=== CONT TestVet/6 +--- PASS: TestVetVerbose (0.04s) +=== CONT TestVet/5 +=== CONT TestVet/3 +=== CONT TestVet/2 +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + vet_test.go:187: -tags=x testtag y + --- PASS: TestTags/x,testtag,y (0.04s) + vet_test.go:187: -tags=x,testtag,y + --- PASS: TestTags/testtag (0.04s) + vet_test.go:187: -tags=testtag +=== CONT TestVet/1 +=== CONT TestVetDirs/testingpkg +=== CONT TestVetDirs/buildtag +=== CONT TestVetDirs/divergent +=== CONT TestVetDirs/incomplete +=== CONT TestVetDirs/cgo +--- PASS: TestVet (0.39s) + --- PASS: TestVet/5 (0.07s) + vet_test.go:114: files: ["testdata/copylock_func.go" "testdata/rangeloop.go"] + --- PASS: TestVet/3 (0.07s) + vet_test.go:114: files: ["testdata/composite.go" "testdata/nilfunc.go"] + --- PASS: TestVet/6 (0.07s) + vet_test.go:114: files: ["testdata/copylock_range.go" "testdata/shadow.go"] + --- PASS: TestVet/2 (0.07s) + vet_test.go:114: files: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] + --- PASS: TestVet/0 (0.13s) + vet_test.go:114: files: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] + --- PASS: TestVet/4 (0.16s) + vet_test.go:114: files: ["testdata/copylock.go" "testdata/print.go"] + --- PASS: TestVet/1 (0.07s) + vet_test.go:114: files: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] + --- PASS: TestVet/7 (0.19s) + vet_test.go:114: files: ["testdata/deadcode.go" "testdata/shift.go"] +--- PASS: TestVetDirs (0.01s) + --- PASS: TestVetDirs/testingpkg (0.06s) + --- PASS: TestVetDirs/divergent (0.05s) + --- PASS: TestVetDirs/buildtag (0.06s) + --- PASS: TestVetDirs/incomplete (0.05s) + --- PASS: TestVetDirs/cgo (0.04s) +--- PASS: TestVetAsm (0.75s) +PASS +ok cmd/vet (cached) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..9da0541f5263c26d12740cb279074fa859740f83 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/asm.go @@ -0,0 +1,707 @@ +// Inferno utils/6l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package amd64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "log" +) + +func PADDR(x uint32) uint32 { + return x &^ 0x80000000 +} + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op ...uint8) { + for _, op1 := range op { + initfunc.AddUint8(op1) + } + } + + // 0000000000000000 : + // 0: 48 8d 3d 00 00 00 00 lea 0x0(%rip),%rdi # 7 + // 3: R_X86_64_PC32 runtime.firstmoduledata-0x4 + o(0x48, 0x8d, 0x3d) + initfunc.AddPCRelPlus(ctxt.Arch, ctxt.Moduledata, 0) + // 7: e8 00 00 00 00 callq c + // 8: R_X86_64_PLT32 runtime.addmoduledata-0x4 + o(0xe8) + initfunc.AddSymRef(ctxt.Arch, addmoduledata, 0, objabi.R_CALL, 4) + // c: c3 retq + o(0xc3) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch rt := r.Type(); rt { + default: + if rt >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PC32): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_X86_64_PC32 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PC64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_X86_64_PC64 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+8) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PLT32): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_GOTPCREL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_GOTPCRELX), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_REX_GOTPCRELX): + su := ldr.MakeSymbolUpdater(s) + if targType != sym.SDYNIMPORT { + // have symbol + sData := ldr.Data(s) + if r.Off() >= 2 && sData[r.Off()-2] == 0x8b { + su.MakeWritable() + // turn MOVQ of GOT entry into LEAQ of symbol itself + writeableData := su.Data() + writeableData[r.Off()-2] = 0x8d + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + } + } + + // fall back to using GOT and hope for the best (CMOV*) + // TODO: just needs relocation, no need to put in .dynsym + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_X86_64_GLOB_DAT)) + + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+4+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_X86_64_64 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + if target.IsPIE() && target.IsInternal() { + // For internal linking PIE, this R_ADDR relocation cannot + // be resolved statically. We need to generate a dynamic + // relocation. Let the code below handle it. + break + } + return true + + // Handle relocations found in Mach-O object files. + case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 0, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED*2 + 0, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_BRANCH*2 + 0: + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected reloc for dynamic symbol %s", ldr.SymName(targ)) + } + if target.IsPIE() && target.IsInternal() { + // For internal linking PIE, this R_ADDR relocation cannot + // be resolved statically. We need to generate a dynamic + // relocation. Let the code below handle it. + if rt == objabi.MachoRelocOffset+ld.MACHO_X86_64_RELOC_UNSIGNED*2 { + break + } else { + // MACHO_X86_64_RELOC_SIGNED or MACHO_X86_64_RELOC_BRANCH + // Can this happen? The object is expected to be PIC. + ldr.Errorf(s, "unsupported relocation for PIE: %v", rt) + } + } + return true + + case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1: + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + return true + } + fallthrough + + case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_2*2 + 1, + objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1: + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected pc-relative reloc for dynamic symbol %s", ldr.SymName(targ)) + } + return true + + case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1: + if targType != sym.SDYNIMPORT { + // have symbol + // turn MOVQ of GOT entry into LEAQ of symbol itself + sdata := ldr.Data(s) + if r.Off() < 2 || sdata[r.Off()-2] != 0x8b { + ldr.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", ldr.SymName(targ)) + return false + } + + su := ldr.MakeSymbolUpdater(s) + su.MakeWritable() + sdata = su.Data() + sdata[r.Off()-2] = 0x8d + su.SetRelocType(rIdx, objabi.R_PCREL) + return true + } + fallthrough + + case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_GOT*2 + 1: + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ)) + } + ld.AddGotSym(target, ldr, syms, targ, 0) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + } + + // Reread the reloc to incorporate any changes in type above. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_CALL: + if targType != sym.SDYNIMPORT { + // nothing to do, the relocation will be laid out in reloc + return true + } + if target.IsExternal() { + // External linker will do this relocation. + return true + } + // Internal linking, for both ELF and Mach-O. + // Build a PLT entry and change the relocation target to that entry. + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + return true + + case objabi.R_PCREL: + if targType == sym.SDYNIMPORT && ldr.SymType(s) == sym.STEXT && target.IsDarwin() { + // Loading the address of a dynamic symbol. Rewrite to use GOT. + // turn LEAQ symbol address to MOVQ of GOT entry + if r.Add() != 0 { + ldr.Errorf(s, "unexpected nonzero addend for dynamic symbol %s", ldr.SymName(targ)) + return false + } + su := ldr.MakeSymbolUpdater(s) + if r.Off() >= 2 && su.Data()[r.Off()-2] == 0x8d { + su.MakeWritable() + su.Data()[r.Off()-2] = 0x8b + if target.IsInternal() { + ld.AddGotSym(target, ldr, syms, targ, 0) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ))) + } else { + su.SetRelocType(rIdx, objabi.R_GOTPCREL) + } + return true + } + ldr.Errorf(s, "unexpected R_PCREL reloc for dynamic symbol %s: not preceded by LEAQ instruction", ldr.SymName(targ)) + } + + case objabi.R_ADDR: + if ldr.SymType(s) == sym.STEXT && target.IsElf() { + su := ldr.MakeSymbolUpdater(s) + if target.IsSolaris() { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + return true + } + // The code is asking for the address of an external + // function. We provide it with the address of the + // correspondent GOT symbol. + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_X86_64_GLOB_DAT)) + + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + } + + // Process dynamic relocations for the data sections. + if target.IsPIE() && target.IsInternal() { + // When internally linking, generate dynamic relocations + // for all typical R_ADDR relocations. The exception + // are those R_ADDR that are created as part of generating + // the dynamic relocations and must be resolved statically. + // + // There are three phases relevant to understanding this: + // + // dodata() // we are here + // address() // symbol address assignment + // reloc() // resolution of static R_ADDR relocs + // + // At this point symbol addresses have not been + // assigned yet (as the final size of the .rela section + // will affect the addresses), and so we cannot write + // the Elf64_Rela.r_offset now. Instead we delay it + // until after the 'address' phase of the linker is + // complete. We do this via Addaddrplus, which creates + // a new R_ADDR relocation which will be resolved in + // the 'reloc' phase. + // + // These synthetic static R_ADDR relocs must be skipped + // now, or else we will be caught in an infinite loop + // of generating synthetic relocs for our synthetic + // relocs. + // + // Furthermore, the rela sections contain dynamic + // relocations with R_ADDR relocations on + // Elf64_Rela.r_offset. This field should contain the + // symbol offset as determined by reloc(), not the + // final dynamically linked address as a dynamic + // relocation would provide. + switch ldr.SymName(s) { + case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic": + return false + } + } else { + // Either internally linking a static executable, + // in which case we can resolve these relocations + // statically in the 'reloc' phase, or externally + // linking, in which case the relocation will be + // prepared in the 'reloc' phase and passed to the + // external linker in the 'asmb' phase. + if ldr.SymType(s) != sym.SDATA && ldr.SymType(s) != sym.SRODATA { + break + } + } + + if target.IsElf() { + // Generate R_X86_64_RELATIVE relocations for best + // efficiency in the dynamic linker. + // + // As noted above, symbol addresses have not been + // assigned yet, so we can't generate the final reloc + // entry yet. We ultimately want: + // + // r_offset = s + r.Off + // r_info = R_X86_64_RELATIVE + // r_addend = targ + r.Add + // + // The dynamic linker will set *offset = base address + + // addend. + // + // AddAddrPlus is used for r_offset and r_addend to + // generate new R_ADDR relocations that will update + // these fields in the 'reloc' phase. + rela := ldr.MakeSymbolUpdater(syms.Rela) + rela.AddAddrPlus(target.Arch, s, int64(r.Off())) + if r.Siz() == 8 { + rela.AddUint64(target.Arch, elf.R_INFO(0, uint32(elf.R_X86_64_RELATIVE))) + } else { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + // Not mark r done here. So we still apply it statically, + // so in the file content we'll also have the right offset + // to the relocation target. So it can be examined statically + // (e.g. go version). + return true + } + + if target.IsDarwin() { + // Mach-O relocations are a royal pain to lay out. + // They use a compact stateful bytecode representation. + // Here we record what are needed and encode them later. + ld.MachoAddRebase(s, int64(r.Off())) + // Not mark r done here. So we still apply it statically, + // so in the file content we'll also have the right offset + // to the relocation target. So it can be examined statically + // (e.g. go version). + return true + } + case objabi.R_GOTPCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + // We only need to handle external linking mode, as R_GOTPCREL can + // only occur in plugin or shared build modes. + } + + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write64(uint64(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + siz := r.Size + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + if siz == 4 { + out.Write64(uint64(elf.R_X86_64_32) | uint64(elfsym)<<32) + } else if siz == 8 { + out.Write64(uint64(elf.R_X86_64_64) | uint64(elfsym)<<32) + } else { + return false + } + case objabi.R_TLS_LE: + if siz == 4 { + out.Write64(uint64(elf.R_X86_64_TPOFF32) | uint64(elfsym)<<32) + } else { + return false + } + case objabi.R_TLS_IE: + if siz == 4 { + out.Write64(uint64(elf.R_X86_64_GOTTPOFF) | uint64(elfsym)<<32) + } else { + return false + } + case objabi.R_CALL: + if siz == 4 { + if ldr.SymType(r.Xsym) == sym.SDYNIMPORT { + out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32) + } else { + out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32) + } + } else { + return false + } + case objabi.R_PCREL: + if siz == 4 { + if ldr.SymType(r.Xsym) == sym.SDYNIMPORT && ldr.SymElfType(r.Xsym) == elf.STT_FUNC { + out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32) + } else { + out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32) + } + } else { + return false + } + case objabi.R_GOTPCREL: + if siz == 4 { + out.Write64(uint64(elf.R_X86_64_GOTPCREL) | uint64(elfsym)<<32) + } else { + return false + } + } + + out.Write64(uint64(r.Xadd)) + return true +} + +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + var v uint32 + + rs := r.Xsym + rt := r.Type + + if !ldr.SymType(s).IsDWARF() { + if ldr.SymDynid(rs) < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + v = uint32(ldr.SymDynid(rs)) + v |= 1 << 27 // external relocation + } else { + v = uint32(ldr.SymSect(rs).Extnum) + if v == 0 { + ldr.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymSect(rs).Name, ldr.SymType(rs), ldr.SymType(rs)) + return false + } + } + + switch rt { + default: + return false + + case objabi.R_ADDR: + v |= ld.MACHO_X86_64_RELOC_UNSIGNED << 28 + + case objabi.R_CALL: + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_X86_64_RELOC_BRANCH << 28 + + // NOTE: Only works with 'external' relocation. Forced above. + case objabi.R_PCREL: + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_X86_64_RELOC_SIGNED << 28 + case objabi.R_GOTPCREL: + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_X86_64_RELOC_GOT_LOAD << 28 + } + + switch r.Size { + default: + return false + + case 1: + v |= 0 << 25 + + case 2: + v |= 1 << 25 + + case 4: + v |= 2 << 25 + + case 8: + v |= 3 << 25 + } + + out.Write32(uint32(sectoff)) + out.Write32(v) + return true +} + +func pereloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + var v uint32 + + rs := r.Xsym + rt := r.Type + + if ldr.SymDynid(rs) < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + out.Write32(uint32(sectoff)) + out.Write32(uint32(ldr.SymDynid(rs))) + + switch rt { + default: + return false + + case objabi.R_DWARFSECREF: + v = ld.IMAGE_REL_AMD64_SECREL + + case objabi.R_ADDR: + if r.Size == 8 { + v = ld.IMAGE_REL_AMD64_ADDR64 + } else { + v = ld.IMAGE_REL_AMD64_ADDR32 + } + + case objabi.R_PEIMAGEOFF: + v = ld.IMAGE_REL_AMD64_ADDR32NB + + case objabi.R_CALL, + objabi.R_PCREL: + v = ld.IMAGE_REL_AMD64_REL32 + } + + out.Write16(uint16(v)) + + return true +} + +func archreloc(*ld.Target, *loader.Loader, *ld.ArchSyms, loader.Reloc, loader.Sym, int64) (int64, int, bool) { + return -1, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + log.Fatalf("unexpected relocation variant") + return -1 +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // pushq got+8(IP) + plt.AddUint8(0xff) + + plt.AddUint8(0x35) + plt.AddPCRelPlus(ctxt.Arch, got.Sym(), 8) + + // jmpq got+16(IP) + plt.AddUint8(0xff) + + plt.AddUint8(0x25) + plt.AddPCRelPlus(ctxt.Arch, got.Sym(), 16) + + // nopl 0(AX) + plt.AddUint32(ctxt.Arch, 0x00401f0f) + + // assume got->size == 0 too + got.AddAddrPlus(ctxt.Arch, dynamic, 0) + + got.AddUint64(ctxt.Arch, 0) + got.AddUint64(ctxt.Arch, 0) + } +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + if target.IsElf() { + plt := ldr.MakeSymbolUpdater(syms.PLT) + got := ldr.MakeSymbolUpdater(syms.GOTPLT) + rela := ldr.MakeSymbolUpdater(syms.RelaPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // jmpq *got+size(IP) + plt.AddUint8(0xff) + + plt.AddUint8(0x25) + plt.AddPCRelPlus(target.Arch, got.Sym(), got.Size()) + + // add to got: pointer to current pos in plt + got.AddAddrPlus(target.Arch, plt.Sym(), plt.Size()) + + // pushq $x + plt.AddUint8(0x68) + + plt.AddUint32(target.Arch, uint32((got.Size()-24-8)/8)) + + // jmpq .plt + plt.AddUint8(0xe9) + + plt.AddUint32(target.Arch, uint32(-(plt.Size() + 4))) + + // rela + rela.AddAddrPlus(target.Arch, got.Sym(), got.Size()-8) + + sDynid := ldr.SymDynid(s) + rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_X86_64_JMP_SLOT))) + rela.AddUint64(target.Arch, 0) + + ldr.SetPlt(s, int32(plt.Size()-16)) + } else if target.IsDarwin() { + ld.AddGotSym(target, ldr, syms, s, 0) + + sDynid := ldr.SymDynid(s) + lep := ldr.MakeSymbolUpdater(syms.LinkEditPLT) + lep.AddUint32(target.Arch, uint32(sDynid)) + + plt := ldr.MakeSymbolUpdater(syms.PLT) + ldr.SetPlt(s, int32(plt.Size())) + + // jmpq *got+size(IP) + plt.AddUint8(0xff) + plt.AddUint8(0x25) + plt.AddPCRelPlus(target.Arch, syms.GOT, int64(ldr.SymGot(s))) + } else { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } +} + +func tlsIEtoLE(P []byte, off, size int) { + // Transform the PC-relative instruction into a constant load. + // That is, + // + // MOVQ X(IP), REG -> MOVQ $Y, REG + // + // To determine the instruction and register, we study the op codes. + // Consult an AMD64 instruction encoding guide to decipher this. + if off < 3 { + log.Fatal("R_X86_64_GOTTPOFF reloc not preceded by MOVQ or ADDQ instruction") + } + op := P[off-3 : off] + reg := op[2] >> 3 + + if op[1] == 0x8b || reg == 4 { + // MOVQ + if op[0] == 0x4c { + op[0] = 0x49 + } else if size == 4 && op[0] == 0x44 { + op[0] = 0x41 + } + if op[1] == 0x8b { + op[1] = 0xc7 + } else { + op[1] = 0x81 // special case for SP + } + op[2] = 0xc0 | reg + } else { + // An alternate op is ADDQ. This is handled by GNU gold, + // but right now is not generated by the Go compiler: + // ADDQ X(IP), REG -> ADDQ $Y, REG + // Consider adding support for it here. + log.Fatalf("expected TLS IE op to be MOVQ, got %v", op) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..c9ea90a9c811b60059698e2ddcce146e78439d0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/l.go @@ -0,0 +1,43 @@ +// Inferno utils/6l/l.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package amd64 + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 32 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 7 + dwarfRegLR = 16 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..3a6141b9091eb6068821c9bfbcea3265491bd0a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/amd64/obj.go @@ -0,0 +1,125 @@ +// Inferno utils/6l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package amd64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchAMD64 + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + // 0xCC is INT $3 - breakpoint instruction + CodePad: []byte{0xCC}, + + Plan9Magic: uint32(4*26*26 + 7), + Plan9_64Bit: true, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + TLSIEtoLE: tlsIEtoLE, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib64/ld-linux-x86-64.so.2", + LinuxdynldMusl: "/lib/ld-musl-x86_64.so.1", + Freebsddynld: "/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", + Dragonflydynld: "/usr/libexec/ld-elf.so.2", + Solarisdynld: "/lib/amd64/ld.so.1", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + 8 + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x200000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x200000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hdarwin: /* apple MACH */ + ld.HEADR = ld.INITIAL_MACHO_HEADR + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x1000000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* elf64 executable */ + objabi.Hfreebsd, /* freebsd */ + objabi.Hnetbsd, /* netbsd */ + objabi.Hopenbsd, /* openbsd */ + objabi.Hdragonfly, /* dragonfly */ + objabi.Hsolaris: /* solaris */ + ld.Elfinit(ctxt) + + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(1<<22, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hwindows: /* PE executable */ + // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit + return + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..0443e49197c195cc1eb824b0e668731422ff9b7e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/asm.go @@ -0,0 +1,697 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "fmt" + "log" +) + +// This assembler: +// +// .align 2 +// local.dso_init: +// ldr r0, .Lmoduledata +// .Lloadfrom: +// ldr r0, [r0] +// b runtime.addmoduledata@plt +// .align 2 +// .Lmoduledata: +// .word local.moduledata(GOT_PREL) + (. - (.Lloadfrom + 4)) +// assembles to: +// +// 00000000 : +// 0: e59f0004 ldr r0, [pc, #4] ; c +// 4: e5900000 ldr r0, [r0] +// 8: eafffffe b 0 +// 8: R_ARM_JUMP24 runtime.addmoduledata +// c: 00000004 .word 0x00000004 +// c: R_ARM_GOT_PREL local.moduledata + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op uint32) { + initfunc.AddUint32(ctxt.Arch, op) + } + o(0xe59f0004) + o(0xe08f0000) + + o(0xeafffffe) + rel, _ := initfunc.AddRel(objabi.R_CALLARM) + rel.SetOff(8) + rel.SetSiz(4) + rel.SetSym(addmoduledata) + rel.SetAdd(0xeafffffe) // vomit + + o(0x00000000) + + rel2, _ := initfunc.AddRel(objabi.R_PCREL) + rel2.SetOff(12) + rel2.SetSiz(4) + rel2.SetSym(ctxt.Moduledata) + rel2.SetAdd(4) +} + +// Preserve highest 8 bits of a, and do addition to lower 24-bit +// of a and b; used to adjust ARM branch instruction's target. +func braddoff(a int32, b int32) int32 { + return int32((uint32(a))&0xff000000 | 0x00ffffff&uint32(a+b)) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PLT32): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLARM) + + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(braddoff(int32(r.Add()), ldr.SymPlt(targ)/4))) + } + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_THM_PC22): // R_ARM_THM_CALL + ld.Exitf("R_ARM_THM_CALL, are you using -marm?") + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_GOT32): // R_ARM_GOT_BREL + if targType != sym.SDYNIMPORT { + addgotsyminternal(target, ldr, syms, targ) + } else { + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_ARM_GLOB_DAT)) + } + + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CONST) // write r->add during relocsym + su.SetRelocSym(rIdx, 0) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_GOT_PREL): // GOT(nil) + A - nil + if targType != sym.SDYNIMPORT { + addgotsyminternal(target, ldr, syms, targ) + } else { + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_ARM_GLOB_DAT)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+4+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_GOTOFF): // R_ARM_GOTOFF32 + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_GOTOFF) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_GOTPC): // R_ARM_BASE_PREL + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_CALL): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLARM) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(braddoff(int32(r.Add()), ldr.SymPlt(targ)/4))) + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_REL32): // R_ARM_REL32 + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_ABS32): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_ARM_ABS32 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PC24), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_JUMP24): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLARM) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(braddoff(int32(r.Add()), ldr.SymPlt(targ)/4))) + } + + return true + } + + // Handle references to ELF symbols from our own object files. + if targType != sym.SDYNIMPORT { + return true + } + + // Reread the reloc to incorporate any changes in type above. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_CALLARM: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(braddoff(int32(r.Add()), ldr.SymPlt(targ)/4))) // TODO: don't use r.Add for instruction bytes (issue 19811) + return true + + case objabi.R_ADDR: + if ldr.SymType(s) != sym.SDATA { + break + } + if target.IsElf() { + ld.Adddynsym(ldr, target, syms, targ) + rel := ldr.MakeSymbolUpdater(syms.Rel) + rel.AddAddrPlus(target.Arch, s, int64(r.Off())) + rel.AddUint32(target.Arch, elf.R_INFO32(uint32(ldr.SymDynid(targ)), uint32(elf.R_ARM_GLOB_DAT))) // we need a nil + A dynamic reloc + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CONST) // write r->add during relocsym + su.SetRelocSym(rIdx, 0) + return true + } + + case objabi.R_GOTPCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "R_GOTPCREL target is not SDYNIMPORT symbol: %v", ldr.SymName(targ)) + } + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_ARM_GLOB_DAT)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + } + + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write32(uint32(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + siz := r.Size + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + if siz == 4 { + out.Write32(uint32(elf.R_ARM_ABS32) | uint32(elfsym)<<8) + } else { + return false + } + case objabi.R_PCREL: + if siz == 4 { + out.Write32(uint32(elf.R_ARM_REL32) | uint32(elfsym)<<8) + } else { + return false + } + case objabi.R_CALLARM: + if siz == 4 { + relocs := ldr.Relocs(s) + r := relocs.At(ri) + if r.Add()&0xff000000 == 0xeb000000 { // BL // TODO: using r.Add here is bad (issue 19811) + out.Write32(uint32(elf.R_ARM_CALL) | uint32(elfsym)<<8) + } else { + out.Write32(uint32(elf.R_ARM_JUMP24) | uint32(elfsym)<<8) + } + } else { + return false + } + case objabi.R_TLS_LE: + out.Write32(uint32(elf.R_ARM_TLS_LE32) | uint32(elfsym)<<8) + case objabi.R_TLS_IE: + out.Write32(uint32(elf.R_ARM_TLS_IE32) | uint32(elfsym)<<8) + case objabi.R_GOTPCREL: + if siz == 4 { + out.Write32(uint32(elf.R_ARM_GOT_PREL) | uint32(elfsym)<<8) + } else { + return false + } + } + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // str lr, [sp, #-4]! + plt.AddUint32(ctxt.Arch, 0xe52de004) + + // ldr lr, [pc, #4] + plt.AddUint32(ctxt.Arch, 0xe59fe004) + + // add lr, pc, lr + plt.AddUint32(ctxt.Arch, 0xe08fe00e) + + // ldr pc, [lr, #8]! + plt.AddUint32(ctxt.Arch, 0xe5bef008) + + // .word &GLOBAL_OFFSET_TABLE[0] - . + plt.AddPCRelPlus(ctxt.Arch, got.Sym(), 4) + + // the first .plt entry requires 3 .plt.got entries + got.AddUint32(ctxt.Arch, 0) + + got.AddUint32(ctxt.Arch, 0) + got.AddUint32(ctxt.Arch, 0) + } +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func pereloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + rs := r.Xsym + rt := r.Type + + if ldr.SymDynid(rs) < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + out.Write32(uint32(sectoff)) + out.Write32(uint32(ldr.SymDynid(rs))) + + var v uint32 + switch rt { + default: + // unsupported relocation type + return false + + case objabi.R_DWARFSECREF: + v = ld.IMAGE_REL_ARM_SECREL + + case objabi.R_ADDR: + v = ld.IMAGE_REL_ARM_ADDR32 + + case objabi.R_PEIMAGEOFF: + v = ld.IMAGE_REL_ARM_ADDR32NB + } + + out.Write16(uint16(v)) + + return true +} + +// sign extend a 24-bit integer. +func signext24(x int64) int32 { + return (int32(x) << 8) >> 8 +} + +// encode an immediate in ARM's imm12 format. copied from ../../../internal/obj/arm/asm5.go +func immrot(v uint32) uint32 { + for i := 0; i < 16; i++ { + if v&^0xff == 0 { + return uint32(i<<8) | v | 1<<25 + } + v = v<<2 | v>>30 + } + return 0 +} + +// Convert the direct jump relocation r to refer to a trampoline if the target is too far. +func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { + relocs := ldr.Relocs(s) + r := relocs.At(ri) + switch r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_CALL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PC24), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_JUMP24): + // Host object relocations that will be turned into a PLT call. + // The PLT may be too far. Insert a trampoline for them. + fallthrough + case objabi.R_CALLARM: + var t int64 + // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet + // laid out. Conservatively use a trampoline. This should be rare, as we lay out packages + // in dependency order. + if ldr.SymValue(rs) != 0 { + // Workaround for issue #58425: it appears that the + // external linker doesn't always take into account the + // relocation addend when doing reachability checks. This + // means that if you have a call from function XYZ at + // offset 8 to runtime.duffzero with addend 800 (for + // example), where the distance between the start of XYZ + // and the start of runtime.duffzero is just over the + // limit (by 100 bytes, say), you can get "relocation + // doesn't fit" errors from the external linker. To deal + // with this, ignore the addend when performing the + // distance calculation (this assumes that we're only + // handling backward jumps; ideally we might want to check + // both with and without the addend). + if ctxt.IsExternal() { + t = (ldr.SymValue(rs) - (ldr.SymValue(s) + int64(r.Off()))) / 4 + } else { + // r.Add is the instruction + // low 24-bit encodes the target address + t = (ldr.SymValue(rs) + int64(signext24(r.Add()&0xffffff)*4) - (ldr.SymValue(s) + int64(r.Off()))) / 4 + } + } + if t > 0x7fffff || t <= -0x800000 || ldr.SymValue(rs) == 0 || (*ld.FlagDebugTramp > 1 && ldr.SymPkg(s) != ldr.SymPkg(rs)) { + // direct call too far, need to insert trampoline. + // look up existing trampolines first. if we found one within the range + // of direct call, we can reuse it. otherwise create a new one. + offset := (signext24(r.Add()&0xffffff) + 2) * 4 + var tramp loader.Sym + for i := 0; ; i++ { + oName := ldr.SymName(rs) + name := oName + fmt.Sprintf("%+d-tramp%d", offset, i) + tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + ldr.SetAttrReachable(tramp, true) + if ldr.SymType(tramp) == sym.SDYNIMPORT { + // don't reuse trampoline defined in other module + continue + } + if oName == "runtime.deferreturn" { + ldr.SetIsDeferReturnTramp(tramp, true) + } + if ldr.SymValue(tramp) == 0 { + // either the trampoline does not exist -- we need to create one, + // or found one the address which is not assigned -- this will be + // laid down immediately after the current function. use this one. + break + } + + t = (ldr.SymValue(tramp) - 8 - (ldr.SymValue(s) + int64(r.Off()))) / 4 + if t >= -0x800000 && t < 0x7fffff { + // found an existing trampoline that is not too far + // we can just use it + break + } + } + if ldr.SymType(tramp) == 0 { + // trampoline does not exist, create one + trampb := ldr.MakeSymbolUpdater(tramp) + ctxt.AddTramp(trampb) + if ctxt.DynlinkingGo() || ldr.SymType(rs) == sym.SDYNIMPORT { + if immrot(uint32(offset)) == 0 { + ctxt.Errorf(s, "odd offset in dynlink direct call: %v+%d", ldr.SymName(rs), offset) + } + gentrampdyn(ctxt.Arch, trampb, rs, int64(offset)) + } else if ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE { + gentramppic(ctxt.Arch, trampb, rs, int64(offset)) + } else { + gentramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, int64(offset)) + } + } + // modify reloc to point to tramp, which will be resolved later + sb := ldr.MakeSymbolUpdater(s) + relocs := sb.Relocs() + r := relocs.At(ri) + r.SetSym(tramp) + r.SetAdd(r.Add()&0xff000000 | 0xfffffe) // clear the offset embedded in the instruction + } + default: + ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + } +} + +// generate a trampoline to target+offset. +func gentramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(12) // 3 instructions + P := make([]byte, tramp.Size()) + t := ldr.SymValue(target) + offset + o1 := uint32(0xe5900000 | 12<<12 | 15<<16) // MOVW (R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe12fff10 | 12) // JMP (R12) + o3 := uint32(t) // WORD $target + arch.ByteOrder.PutUint32(P, o1) + arch.ByteOrder.PutUint32(P[4:], o2) + arch.ByteOrder.PutUint32(P[8:], o3) + tramp.SetData(P) + + if linkmode == ld.LinkExternal || ldr.SymValue(target) == 0 { + r, _ := tramp.AddRel(objabi.R_ADDR) + r.SetOff(8) + r.SetSiz(4) + r.SetSym(target) + r.SetAdd(offset) + } +} + +// generate a trampoline to target+offset in position independent code. +func gentramppic(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(16) // 4 instructions + P := make([]byte, tramp.Size()) + o1 := uint32(0xe5900000 | 12<<12 | 15<<16 | 4) // MOVW 4(R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe0800000 | 12<<12 | 15<<16 | 12) // ADD R15, R12, R12 + o3 := uint32(0xe12fff10 | 12) // JMP (R12) + o4 := uint32(0) // WORD $(target-pc) // filled in with relocation + arch.ByteOrder.PutUint32(P, o1) + arch.ByteOrder.PutUint32(P[4:], o2) + arch.ByteOrder.PutUint32(P[8:], o3) + arch.ByteOrder.PutUint32(P[12:], o4) + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_PCREL) + r.SetOff(12) + r.SetSiz(4) + r.SetSym(target) + r.SetAdd(offset + 4) +} + +// generate a trampoline to target+offset in dynlink mode (using GOT). +func gentrampdyn(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(20) // 5 instructions + o1 := uint32(0xe5900000 | 12<<12 | 15<<16 | 8) // MOVW 8(R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe0800000 | 12<<12 | 15<<16 | 12) // ADD R15, R12, R12 + o3 := uint32(0xe5900000 | 12<<12 | 12<<16) // MOVW (R12), R12 + o4 := uint32(0xe12fff10 | 12) // JMP (R12) + o5 := uint32(0) // WORD $target@GOT // filled in with relocation + o6 := uint32(0) + if offset != 0 { + // insert an instruction to add offset + tramp.SetSize(24) // 6 instructions + o6 = o5 + o5 = o4 + o4 = 0xe2800000 | 12<<12 | 12<<16 | immrot(uint32(offset)) // ADD $offset, R12, R12 + o1 = uint32(0xe5900000 | 12<<12 | 15<<16 | 12) // MOVW 12(R15), R12 + } + P := make([]byte, tramp.Size()) + arch.ByteOrder.PutUint32(P, o1) + arch.ByteOrder.PutUint32(P[4:], o2) + arch.ByteOrder.PutUint32(P[8:], o3) + arch.ByteOrder.PutUint32(P[12:], o4) + arch.ByteOrder.PutUint32(P[16:], o5) + if offset != 0 { + arch.ByteOrder.PutUint32(P[20:], o6) + } + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_GOTPCREL) + r.SetOff(16) + r.SetSiz(4) + r.SetSym(target) + r.SetAdd(8) + if offset != 0 { + // increase reloc offset by 4 as we inserted an ADD instruction + r.SetOff(20) + r.SetAdd(12) + } +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + rs := r.Sym() + if target.IsExternal() { + switch r.Type() { + case objabi.R_CALLARM: + // set up addend for eventual relocation via outer symbol. + _, off := ld.FoldSubSymbolOffset(ldr, rs) + xadd := int64(signext24(r.Add()&0xffffff))*4 + off + if xadd/4 > 0x7fffff || xadd/4 < -0x800000 { + ldr.Errorf(s, "direct call too far %d", xadd/4) + } + return int64(braddoff(int32(0xff000000&uint32(r.Add())), int32(0xffffff&uint32(xadd/4)))), 1, true + } + return -1, 0, false + } + + const isOk = true + const noExtReloc = 0 + switch r.Type() { + // The following three arch specific relocations are only for generation of + // Linux/ARM ELF's PLT entry (3 assembler instruction) + case objabi.R_PLT0: // add ip, pc, #0xXX00000 + if ldr.SymValue(syms.GOTPLT) < ldr.SymValue(syms.PLT) { + ldr.Errorf(s, ".got.plt should be placed after .plt section.") + } + return 0xe28fc600 + (0xff & (int64(uint32(ldr.SymValue(rs)-(ldr.SymValue(syms.PLT)+int64(r.Off()))+r.Add())) >> 20)), noExtReloc, isOk + case objabi.R_PLT1: // add ip, ip, #0xYY000 + return 0xe28cca00 + (0xff & (int64(uint32(ldr.SymValue(rs)-(ldr.SymValue(syms.PLT)+int64(r.Off()))+r.Add()+4)) >> 12)), noExtReloc, isOk + case objabi.R_PLT2: // ldr pc, [ip, #0xZZZ]! + return 0xe5bcf000 + (0xfff & int64(uint32(ldr.SymValue(rs)-(ldr.SymValue(syms.PLT)+int64(r.Off()))+r.Add()+8))), noExtReloc, isOk + case objabi.R_CALLARM: // bl XXXXXX or b YYYYYY + // r.Add is the instruction + // low 24-bit encodes the target address + t := (ldr.SymValue(rs) + int64(signext24(r.Add()&0xffffff)*4) - (ldr.SymValue(s) + int64(r.Off()))) / 4 + if t > 0x7fffff || t < -0x800000 { + ldr.Errorf(s, "direct call too far: %s %x", ldr.SymName(rs), t) + } + return int64(braddoff(int32(0xff000000&uint32(r.Add())), int32(0xffffff&t))), noExtReloc, isOk + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + log.Fatalf("unexpected relocation variant") + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + rs := r.Sym() + var rr loader.ExtReloc + switch r.Type() { + case objabi.R_CALLARM: + // set up addend for eventual relocation via outer symbol. + rs, off := ld.FoldSubSymbolOffset(ldr, rs) + rr.Xadd = int64(signext24(r.Add()&0xffffff))*4 + off + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && rst != sym.SUNDEFEXT && ldr.SymSect(rs) == nil { + ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) + } + rr.Xsym = rs + rr.Type = r.Type() + rr.Size = r.Siz() + return rr, true + } + return rr, false +} + +func addpltreloc(ldr *loader.Loader, plt *loader.SymbolBuilder, got *loader.SymbolBuilder, s loader.Sym, typ objabi.RelocType) { + r, _ := plt.AddRel(typ) + r.SetSym(got.Sym()) + r.SetOff(int32(plt.Size())) + r.SetSiz(4) + r.SetAdd(int64(ldr.SymGot(s)) - 8) + + plt.SetReachable(true) + plt.SetSize(plt.Size() + 4) + plt.Grow(plt.Size()) +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + if target.IsElf() { + plt := ldr.MakeSymbolUpdater(syms.PLT) + got := ldr.MakeSymbolUpdater(syms.GOTPLT) + rel := ldr.MakeSymbolUpdater(syms.RelPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // .got entry + ldr.SetGot(s, int32(got.Size())) + + // In theory, all GOT should point to the first PLT entry, + // Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's + // dynamic linker won't, so we'd better do it ourselves. + got.AddAddrPlus(target.Arch, plt.Sym(), 0) + + // .plt entry, this depends on the .got entry + ldr.SetPlt(s, int32(plt.Size())) + + addpltreloc(ldr, plt, got, s, objabi.R_PLT0) // add lr, pc, #0xXX00000 + addpltreloc(ldr, plt, got, s, objabi.R_PLT1) // add lr, lr, #0xYY000 + addpltreloc(ldr, plt, got, s, objabi.R_PLT2) // ldr pc, [lr, #0xZZZ]! + + // rel + rel.AddAddrPlus(target.Arch, got.Sym(), int64(ldr.SymGot(s))) + + rel.AddUint32(target.Arch, elf.R_INFO32(uint32(ldr.SymDynid(s)), uint32(elf.R_ARM_JUMP_SLOT))) + } else { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } +} + +func addgotsyminternal(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymGot(s) >= 0 { + return + } + + got := ldr.MakeSymbolUpdater(syms.GOT) + ldr.SetGot(s, int32(got.Size())) + got.AddAddrPlus(target.Arch, s, 0) + + if target.IsElf() { + } else { + ldr.Errorf(s, "addgotsyminternal: unsupported binary format") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/l.go new file mode 100644 index 0000000000000000000000000000000000000000..d16dc2726e70b1f65eaa5d92acea773993f762d3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/l.go @@ -0,0 +1,75 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +// Writing object files. + +// Inferno utils/5l/l.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + maxAlign = 8 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 4 // single-instruction alignment +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 13 + dwarfRegLR = 14 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..3a1830ce10e11756ca62240952abc03f0396e18b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm/obj.go @@ -0,0 +1,113 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchARM + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + TrampLimit: 0x1c00000, // 24-bit signed offset * 4, leave room for PLT etc. + + Plan9Magic: 0x647, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Trampoline: trampoline, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib/ld-linux.so.3", // 2 for OABI, 3 for EABI + LinuxdynldMusl: "/lib/ld-musl-arm.so.1", + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 8, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* arm elf */ + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd: + *ld.FlagD = false + // with dynamic linking + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hwindows: /* PE executable */ + // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit + return + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..6645795506abd86cf058ed1d9028a73921a42ba1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/asm.go @@ -0,0 +1,1423 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "fmt" + "log" +) + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op uint32) { + initfunc.AddUint32(ctxt.Arch, op) + } + // 0000000000000000 : + // 0: 90000000 adrp x0, 0 + // 0: R_AARCH64_ADR_PREL_PG_HI21 local.moduledata + // 4: 91000000 add x0, x0, #0x0 + // 4: R_AARCH64_ADD_ABS_LO12_NC local.moduledata + o(0x90000000) + o(0x91000000) + rel, _ := initfunc.AddRel(objabi.R_ADDRARM64) + rel.SetOff(0) + rel.SetSiz(8) + rel.SetSym(ctxt.Moduledata) + + // 8: 14000000 b 0 + // 8: R_AARCH64_CALL26 runtime.addmoduledata + o(0x14000000) + rel2, _ := initfunc.AddRel(objabi.R_CALLARM64) + rel2.SetOff(8) + rel2.SetSiz(4) + rel2.SetSym(addmoduledata) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + const pcrel = 1 + switch r.Type() { + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL32): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_AARCH64_PREL32 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_AARCH64_PREL64 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+8) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26): + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in callarm64", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLARM64) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_GOT_PAGE), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LD64_GOT_LO12_NC): + if targType != sym.SDYNIMPORT { + // have symbol + // TODO: turn LDR of GOT entry into ADR of symbol itself + } + + // fall back to using GOT + // TODO: just needs relocation, no need to put in .dynsym + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_AARCH64_GLOB_DAT)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_GOT) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_PREL_PG_HI21), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADD_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_PCREL) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ABS64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_AARCH64_ABS64 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + if target.IsPIE() && target.IsInternal() { + // For internal linking PIE, this R_ADDR relocation cannot + // be resolved statically. We need to generate a dynamic + // relocation. Let the code below handle it. + break + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST8_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_LDST8) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST16_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_LDST16) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST32_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_LDST32) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST64_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_LDST64) + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST128_ABS_LO12_NC): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_LDST128) + return true + + // Handle relocations found in Mach-O object files. + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_UNSIGNED*2: + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected reloc for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + if target.IsPIE() && target.IsInternal() { + // For internal linking PIE, this R_ADDR relocation cannot + // be resolved statically. We need to generate a dynamic + // relocation. Let the code below handle it. + break + } + return true + + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLARM64) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + } + return true + + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_PAGE21*2 + pcrel, + objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_PAGEOFF12*2: + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_PCREL) + return true + + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGE21*2 + pcrel, + objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12*2: + if targType != sym.SDYNIMPORT { + // have symbol + // turn MOVD sym@GOT (adrp+ldr) into MOVD $sym (adrp+add) + data := ldr.Data(s) + off := r.Off() + if int(off+3) >= len(data) { + ldr.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", ldr.SymName(targ)) + return false + } + o := target.Arch.ByteOrder.Uint32(data[off:]) + su := ldr.MakeSymbolUpdater(s) + switch { + case (o>>24)&0x9f == 0x90: // adrp + // keep instruction unchanged, change relocation type below + case o>>24 == 0xf9: // ldr + // rewrite to add + o = (0x91 << 24) | (o & (1<<22 - 1)) + su.MakeWritable() + su.SetUint32(target.Arch, int64(off), o) + default: + ldr.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", ldr.SymName(targ)) + return false + } + su.SetRelocType(rIdx, objabi.R_ARM64_PCREL) + return true + } + ld.AddGotSym(target, ldr, syms, targ, 0) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ARM64_GOT) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ))) + return true + } + + // Reread the reloc to incorporate any changes in type above. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_CALLARM64: + if targType != sym.SDYNIMPORT { + // nothing to do, the relocation will be laid out in reloc + return true + } + if target.IsExternal() { + // External linker will do this relocation. + return true + } + // Internal linking. + if r.Add() != 0 { + ldr.Errorf(s, "PLT call with non-zero addend (%v)", r.Add()) + } + // Build a PLT entry and change the relocation target to that entry. + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + return true + + case objabi.R_ADDRARM64: + if targType == sym.SDYNIMPORT && ldr.SymType(s) == sym.STEXT && target.IsDarwin() { + // Loading the address of a dynamic symbol. Rewrite to use GOT. + // turn MOVD $sym (adrp+add) into MOVD sym@GOT (adrp+ldr) + if r.Add() != 0 { + ldr.Errorf(s, "unexpected nonzero addend for dynamic symbol %s", ldr.SymName(targ)) + return false + } + su := ldr.MakeSymbolUpdater(s) + data := ldr.Data(s) + off := r.Off() + if int(off+8) > len(data) { + ldr.Errorf(s, "unexpected R_ADDRARM64 reloc for dynamic symbol %s", ldr.SymName(targ)) + return false + } + o := target.Arch.ByteOrder.Uint32(data[off+4:]) + if o>>24 == 0x91 { // add + // rewrite to ldr + o = (0xf9 << 24) | 1<<22 | (o & (1<<22 - 1)) + su.MakeWritable() + su.SetUint32(target.Arch, int64(off+4), o) + if target.IsInternal() { + ld.AddGotSym(target, ldr, syms, targ, 0) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ))) + su.SetRelocType(rIdx, objabi.R_ARM64_PCREL_LDST64) + } else { + su.SetRelocType(rIdx, objabi.R_ARM64_GOTPCREL) + } + return true + } + ldr.Errorf(s, "unexpected R_ADDRARM64 reloc for dynamic symbol %s", ldr.SymName(targ)) + } + + case objabi.R_ADDR: + if ldr.SymType(s) == sym.STEXT && target.IsElf() { + // The code is asking for the address of an external + // function. We provide it with the address of the + // correspondent GOT symbol. + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_AARCH64_GLOB_DAT)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + } + + // Process dynamic relocations for the data sections. + if target.IsPIE() && target.IsInternal() { + // When internally linking, generate dynamic relocations + // for all typical R_ADDR relocations. The exception + // are those R_ADDR that are created as part of generating + // the dynamic relocations and must be resolved statically. + // + // There are three phases relevant to understanding this: + // + // dodata() // we are here + // address() // symbol address assignment + // reloc() // resolution of static R_ADDR relocs + // + // At this point symbol addresses have not been + // assigned yet (as the final size of the .rela section + // will affect the addresses), and so we cannot write + // the Elf64_Rela.r_offset now. Instead we delay it + // until after the 'address' phase of the linker is + // complete. We do this via Addaddrplus, which creates + // a new R_ADDR relocation which will be resolved in + // the 'reloc' phase. + // + // These synthetic static R_ADDR relocs must be skipped + // now, or else we will be caught in an infinite loop + // of generating synthetic relocs for our synthetic + // relocs. + // + // Furthermore, the rela sections contain dynamic + // relocations with R_ADDR relocations on + // Elf64_Rela.r_offset. This field should contain the + // symbol offset as determined by reloc(), not the + // final dynamically linked address as a dynamic + // relocation would provide. + switch ldr.SymName(s) { + case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic": + return false + } + } else { + // Either internally linking a static executable, + // in which case we can resolve these relocations + // statically in the 'reloc' phase, or externally + // linking, in which case the relocation will be + // prepared in the 'reloc' phase and passed to the + // external linker in the 'asmb' phase. + if ldr.SymType(s) != sym.SDATA && ldr.SymType(s) != sym.SRODATA { + break + } + } + + if target.IsElf() { + // Generate R_AARCH64_RELATIVE relocations for best + // efficiency in the dynamic linker. + // + // As noted above, symbol addresses have not been + // assigned yet, so we can't generate the final reloc + // entry yet. We ultimately want: + // + // r_offset = s + r.Off + // r_info = R_AARCH64_RELATIVE + // r_addend = targ + r.Add + // + // The dynamic linker will set *offset = base address + + // addend. + // + // AddAddrPlus is used for r_offset and r_addend to + // generate new R_ADDR relocations that will update + // these fields in the 'reloc' phase. + rela := ldr.MakeSymbolUpdater(syms.Rela) + rela.AddAddrPlus(target.Arch, s, int64(r.Off())) + if r.Siz() == 8 { + rela.AddUint64(target.Arch, elf.R_INFO(0, uint32(elf.R_AARCH64_RELATIVE))) + } else { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + // Not mark r done here. So we still apply it statically, + // so in the file content we'll also have the right offset + // to the relocation target. So it can be examined statically + // (e.g. go version). + return true + } + + if target.IsDarwin() { + // Mach-O relocations are a royal pain to lay out. + // They use a compact stateful bytecode representation. + // Here we record what are needed and encode them later. + ld.MachoAddRebase(s, int64(r.Off())) + // Not mark r done here. So we still apply it statically, + // so in the file content we'll also have the right offset + // to the relocation target. So it can be examined statically + // (e.g. go version). + return true + } + + case objabi.R_ARM64_GOTPCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "R_ARM64_GOTPCREL target is not SDYNIMPORT symbol: %v", ldr.SymName(targ)) + } + if r.Add() != 0 { + ldr.Errorf(s, "R_ARM64_GOTPCREL with non-zero addend (%v)", r.Add()) + } + if target.IsElf() { + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_AARCH64_GLOB_DAT)) + } else { + ld.AddGotSym(target, ldr, syms, targ, 0) + } + // turn into two relocations, one for each instruction. + su := ldr.MakeSymbolUpdater(s) + r.SetType(objabi.R_ARM64_GOT) + r.SetSiz(4) + r.SetSym(syms.GOT) + r.SetAdd(int64(ldr.SymGot(targ))) + r2, _ := su.AddRel(objabi.R_ARM64_GOT) + r2.SetSiz(4) + r2.SetOff(r.Off() + 4) + r2.SetSym(syms.GOT) + r2.SetAdd(int64(ldr.SymGot(targ))) + return true + } + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write64(uint64(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + siz := r.Size + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + switch siz { + case 4: + out.Write64(uint64(elf.R_AARCH64_ABS32) | uint64(elfsym)<<32) + case 8: + out.Write64(uint64(elf.R_AARCH64_ABS64) | uint64(elfsym)<<32) + default: + return false + } + case objabi.R_ADDRARM64: + // two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADD_ABS_LO12_NC + out.Write64(uint64(elf.R_AARCH64_ADR_PREL_PG_HI21) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_AARCH64_ADD_ABS_LO12_NC) | uint64(elfsym)<<32) + + case objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64: + // two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_LDST{64/32/16/8}_ABS_LO12_NC + out.Write64(uint64(elf.R_AARCH64_ADR_PREL_PG_HI21) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + var ldstType elf.R_AARCH64 + switch r.Type { + case objabi.R_ARM64_PCREL_LDST8: + ldstType = elf.R_AARCH64_LDST8_ABS_LO12_NC + case objabi.R_ARM64_PCREL_LDST16: + ldstType = elf.R_AARCH64_LDST16_ABS_LO12_NC + case objabi.R_ARM64_PCREL_LDST32: + ldstType = elf.R_AARCH64_LDST32_ABS_LO12_NC + case objabi.R_ARM64_PCREL_LDST64: + ldstType = elf.R_AARCH64_LDST64_ABS_LO12_NC + } + out.Write64(uint64(ldstType) | uint64(elfsym)<<32) + + case objabi.R_ARM64_TLS_LE: + out.Write64(uint64(elf.R_AARCH64_TLSLE_MOVW_TPREL_G0) | uint64(elfsym)<<32) + case objabi.R_ARM64_TLS_IE: + out.Write64(uint64(elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) | uint64(elfsym)<<32) + case objabi.R_ARM64_GOTPCREL: + out.Write64(uint64(elf.R_AARCH64_ADR_GOT_PAGE) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_AARCH64_LD64_GOT_LO12_NC) | uint64(elfsym)<<32) + case objabi.R_CALLARM64: + if siz != 4 { + return false + } + out.Write64(uint64(elf.R_AARCH64_CALL26) | uint64(elfsym)<<32) + + } + out.Write64(uint64(r.Xadd)) + + return true +} + +// sign-extends from 21, 24-bit. +func signext21(x int64) int64 { return x << (64 - 21) >> (64 - 21) } +func signext24(x int64) int64 { return x << (64 - 24) >> (64 - 24) } + +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + var v uint32 + + rs := r.Xsym + rt := r.Type + siz := r.Size + xadd := r.Xadd + + if xadd != signext24(xadd) && rt != objabi.R_ADDR { + // If the relocation target would overflow the addend, then target + // a linker-manufactured label symbol with a smaller addend instead. + // R_ADDR has full-width addend encoded in data content, so it doesn't + // use a label symbol. + label := ldr.Lookup(offsetLabelName(ldr, rs, xadd/machoRelocLimit*machoRelocLimit), ldr.SymVersion(rs)) + if label != 0 { + xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label) + rs = label + } + if xadd != signext24(xadd) { + ldr.Errorf(s, "internal error: relocation addend overflow: %s+0x%x", ldr.SymName(rs), xadd) + } + } + if rt == objabi.R_CALLARM64 && xadd != 0 { + label := ldr.Lookup(offsetLabelName(ldr, rs, xadd), ldr.SymVersion(rs)) + if label != 0 { + xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label) // should always be 0 (checked below) + rs = label + } + } + + if !ldr.SymType(s).IsDWARF() { + if ldr.SymDynid(rs) < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + v = uint32(ldr.SymDynid(rs)) + v |= 1 << 27 // external relocation + } else { + v = uint32(ldr.SymSect(rs).Extnum) + if v == 0 { + ldr.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymSect(rs).Name, ldr.SymType(rs), ldr.SymType(rs)) + return false + } + } + + switch rt { + default: + return false + case objabi.R_ADDR: + v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28 + case objabi.R_CALLARM64: + if xadd != 0 { + // Addend should be handled above via label symbols. + ldr.Errorf(s, "unexpected non-zero addend: %s+%d", ldr.SymName(rs), xadd) + } + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28 + case objabi.R_ADDRARM64, + objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64: + siz = 4 + // Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21 + // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. + if r.Xadd != 0 { + out.Write32(uint32(sectoff + 4)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff)) + } + out.Write32(uint32(sectoff + 4)) + out.Write32(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) + if r.Xadd != 0 { + out.Write32(uint32(sectoff)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff)) + } + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 + case objabi.R_ARM64_GOTPCREL: + siz = 4 + // Two relocation entries: MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 + // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. + if r.Xadd != 0 { + out.Write32(uint32(sectoff + 4)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff)) + } + out.Write32(uint32(sectoff + 4)) + out.Write32(v | (ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 << 28) | (2 << 25)) + if r.Xadd != 0 { + out.Write32(uint32(sectoff)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff)) + } + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 << 28 + } + + switch siz { + default: + return false + case 1: + v |= 0 << 25 + case 2: + v |= 1 << 25 + case 4: + v |= 2 << 25 + case 8: + v |= 3 << 25 + } + + out.Write32(uint32(sectoff)) + out.Write32(v) + return true +} + +func pereloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + rs := r.Xsym + rt := r.Type + + if (rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_PCREL_LDST8 || rt == objabi.R_ARM64_PCREL_LDST16 || + rt == objabi.R_ARM64_PCREL_LDST32 || rt == objabi.R_ARM64_PCREL_LDST64) && r.Xadd != signext21(r.Xadd) { + // If the relocation target would overflow the addend, then target + // a linker-manufactured label symbol with a smaller addend instead. + label := ldr.Lookup(offsetLabelName(ldr, rs, r.Xadd/peRelocLimit*peRelocLimit), ldr.SymVersion(rs)) + if label == 0 { + ldr.Errorf(s, "invalid relocation: %v %s+0x%x", rt, ldr.SymName(rs), r.Xadd) + return false + } + rs = label + } + if rt == objabi.R_CALLARM64 && r.Xadd != 0 { + label := ldr.Lookup(offsetLabelName(ldr, rs, r.Xadd), ldr.SymVersion(rs)) + if label == 0 { + ldr.Errorf(s, "invalid relocation: %v %s+0x%x", rt, ldr.SymName(rs), r.Xadd) + return false + } + rs = label + } + symdynid := ldr.SymDynid(rs) + if symdynid < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + switch rt { + default: + return false + + case objabi.R_DWARFSECREF: + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_SECREL) + + case objabi.R_ADDR: + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + if r.Size == 8 { + out.Write16(ld.IMAGE_REL_ARM64_ADDR64) + } else { + out.Write16(ld.IMAGE_REL_ARM64_ADDR32) + } + + case objabi.R_PEIMAGEOFF: + out.Write16(ld.IMAGE_REL_ARM64_ADDR32NB) + + case objabi.R_ADDRARM64: + // Note: r.Xadd has been taken care of below, in archreloc. + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEBASE_REL21) + + out.Write32(uint32(sectoff + 4)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEOFFSET_12A) + + case objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64: + // Note: r.Xadd has been taken care of below, in archreloc. + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEBASE_REL21) + + out.Write32(uint32(sectoff + 4)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEOFFSET_12L) + + case objabi.R_CALLARM64: + // Note: r.Xadd has been taken care of above, by using a label pointing into the middle of the function. + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_BRANCH26) + } + + return true +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (int64, int, bool) { + const noExtReloc = 0 + const isOk = true + + rs := r.Sym() + + if target.IsExternal() { + nExtReloc := 0 + switch rt := r.Type(); rt { + default: + case objabi.R_ARM64_GOTPCREL, + objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64, + objabi.R_ADDRARM64: + + // set up addend for eventual relocation via outer symbol. + rs, off := ld.FoldSubSymbolOffset(ldr, rs) + xadd := r.Add() + off + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { + ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) + } + + nExtReloc = 2 // need two ELF/Mach-O relocations. see elfreloc1/machoreloc1 + if target.IsDarwin() && xadd != 0 { + nExtReloc = 4 // need another two relocations for non-zero addend + } + + if target.IsWindows() { + var o0, o1 uint32 + if target.IsBigEndian() { + o0 = uint32(val >> 32) + o1 = uint32(val) + } else { + o0 = uint32(val) + o1 = uint32(val >> 32) + } + + // The first instruction (ADRP) has a 21-bit immediate field, + // and the second (ADD or LD/ST) has a 12-bit immediate field. + // The first instruction is only for high bits, but to get the carry bits right we have + // to put the full addend, including the bottom 12 bits again. + // That limits the distance of any addend to only 21 bits. + // But we assume that ADRP's top bit will be interpreted as a sign bit, + // so we only use 20 bits. + // pereloc takes care of introducing new symbol labels + // every megabyte for longer relocations. + xadd := uint32(xadd) + o0 |= (xadd&3)<<29 | (xadd&0xffffc)<<3 + switch rt { + case objabi.R_ARM64_PCREL_LDST8, objabi.R_ADDRARM64: + o1 |= (xadd & 0xfff) << 10 + case objabi.R_ARM64_PCREL_LDST16: + if xadd&0x1 != 0 { + ldr.Errorf(s, "offset for 16-bit load/store has unaligned value %d", xadd&0xfff) + } + o1 |= ((xadd & 0xfff) >> 1) << 10 + case objabi.R_ARM64_PCREL_LDST32: + if xadd&0x3 != 0 { + ldr.Errorf(s, "offset for 32-bit load/store has unaligned value %d", xadd&0xfff) + } + o1 |= ((xadd & 0xfff) >> 2) << 10 + case objabi.R_ARM64_PCREL_LDST64: + if xadd&0x7 != 0 { + ldr.Errorf(s, "offset for 64-bit load/store has unaligned value %d", xadd&0xfff) + } + o1 |= ((xadd & 0xfff) >> 3) << 10 + } + + if target.IsBigEndian() { + val = int64(o0)<<32 | int64(o1) + } else { + val = int64(o1)<<32 | int64(o0) + } + } + + return val, nExtReloc, isOk + + case objabi.R_CALLARM64: + nExtReloc = 1 + return val, nExtReloc, isOk + + case objabi.R_ARM64_TLS_LE: + nExtReloc = 1 + return val, nExtReloc, isOk + + case objabi.R_ARM64_TLS_IE: + nExtReloc = 2 // need two ELF relocations. see elfreloc1 + return val, nExtReloc, isOk + + case objabi.R_ADDR: + if target.IsWindows() && r.Add() != 0 { + if r.Siz() == 8 { + val = r.Add() + } else if target.IsBigEndian() { + val = int64(uint32(val)) | int64(r.Add())<<32 + } else { + val = val>>32<<32 | int64(uint32(r.Add())) + } + return val, 1, true + } + } + } + + switch rt := r.Type(); rt { + case objabi.R_ADDRARM64, + objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t >= 1<<32 || t < -1<<32 { + ldr.Errorf(s, "program too large, address relocation distance = %d", t) + } + + var o0, o1 uint32 + + if target.IsBigEndian() { + o0 = uint32(val >> 32) + o1 = uint32(val) + } else { + o0 = uint32(val) + o1 = uint32(val >> 32) + } + + o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) + switch rt { + case objabi.R_ARM64_PCREL_LDST8, objabi.R_ADDRARM64: + o1 |= uint32(t&0xfff) << 10 + case objabi.R_ARM64_PCREL_LDST16: + if t&0x1 != 0 { + ldr.Errorf(s, "offset for 16-bit load/store has unaligned value %d", t&0xfff) + } + o1 |= (uint32(t&0xfff) >> 1) << 10 + case objabi.R_ARM64_PCREL_LDST32: + if t&0x3 != 0 { + ldr.Errorf(s, "offset for 32-bit load/store has unaligned value %d", t&0xfff) + } + o1 |= (uint32(t&0xfff) >> 2) << 10 + case objabi.R_ARM64_PCREL_LDST64: + if t&0x7 != 0 { + ldr.Errorf(s, "offset for 64-bit load/store has unaligned value %d", t&0xfff) + } + o1 |= (uint32(t&0xfff) >> 3) << 10 + } + + // when laid out, the instruction order must always be o1, o2. + if target.IsBigEndian() { + return int64(o0)<<32 | int64(o1), noExtReloc, true + } + return int64(o1)<<32 | int64(o0), noExtReloc, true + + case objabi.R_ARM64_TLS_LE: + if target.IsDarwin() { + ldr.Errorf(s, "TLS reloc on unsupported OS %v", target.HeadType) + } + // The TCB is two pointers. This is not documented anywhere, but is + // de facto part of the ABI. + v := ldr.SymValue(rs) + int64(2*target.Arch.PtrSize) + if v < 0 || v >= 32678 { + ldr.Errorf(s, "TLS offset out of range %d", v) + } + return val | (v << 5), noExtReloc, true + + case objabi.R_ARM64_TLS_IE: + if target.IsPIE() && target.IsElf() { + // We are linking the final executable, so we + // can optimize any TLS IE relocation to LE. + + if !target.IsLinux() { + ldr.Errorf(s, "TLS reloc on unsupported OS %v", target.HeadType) + } + + // The TCB is two pointers. This is not documented anywhere, but is + // de facto part of the ABI. + v := ldr.SymAddr(rs) + int64(2*target.Arch.PtrSize) + r.Add() + if v < 0 || v >= 32678 { + ldr.Errorf(s, "TLS offset out of range %d", v) + } + + var o0, o1 uint32 + if target.IsBigEndian() { + o0 = uint32(val >> 32) + o1 = uint32(val) + } else { + o0 = uint32(val) + o1 = uint32(val >> 32) + } + + // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 + // turn ADRP to MOVZ + o0 = 0xd2a00000 | uint32(o0&0x1f) | (uint32((v>>16)&0xffff) << 5) + // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC + // turn LD64 to MOVK + if v&3 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", v) + } + o1 = 0xf2800000 | uint32(o1&0x1f) | (uint32(v&0xffff) << 5) + + // when laid out, the instruction order must always be o0, o1. + if target.IsBigEndian() { + return int64(o0)<<32 | int64(o1), noExtReloc, isOk + } + return int64(o1)<<32 | int64(o0), noExtReloc, isOk + } else { + log.Fatalf("cannot handle R_ARM64_TLS_IE (sym %s) when linking internally", ldr.SymName(s)) + } + + case objabi.R_CALLARM64: + var t int64 + if ldr.SymType(rs) == sym.SDYNIMPORT { + t = (ldr.SymAddr(syms.PLT) + r.Add()) - (ldr.SymValue(s) + int64(r.Off())) + } else { + t = (ldr.SymAddr(rs) + r.Add()) - (ldr.SymValue(s) + int64(r.Off())) + } + if t >= 1<<27 || t < -1<<27 { + ldr.Errorf(s, "program too large, call relocation distance = %d", t) + } + return val | ((t >> 2) & 0x03ffffff), noExtReloc, true + + case objabi.R_ARM64_GOT: + if (val>>24)&0x9f == 0x90 { + // R_AARCH64_ADR_GOT_PAGE + // patch instruction: adrp + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t >= 1<<32 || t < -1<<32 { + ldr.Errorf(s, "program too large, address relocation distance = %d", t) + } + var o0 uint32 + o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) + return val | int64(o0), noExtReloc, isOk + } else if val>>24 == 0xf9 { + // R_AARCH64_LD64_GOT_LO12_NC + // patch instruction: ldr + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&7 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LD64_GOT_LO12_NC", t) + } + var o1 uint32 + o1 |= uint32(t&0xfff) << (10 - 3) + return val | int64(uint64(o1)), noExtReloc, isOk + } else { + ldr.Errorf(s, "unsupported instruction for %x R_GOTARM64", val) + } + + case objabi.R_ARM64_PCREL: + if (val>>24)&0x9f == 0x90 { + // R_AARCH64_ADR_PREL_PG_HI21 + // patch instruction: adrp + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t >= 1<<32 || t < -1<<32 { + ldr.Errorf(s, "program too large, address relocation distance = %d", t) + } + o0 := (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) + return val | int64(o0), noExtReloc, isOk + } else if (val>>24)&0x9f == 0x91 { + // ELF R_AARCH64_ADD_ABS_LO12_NC or Mach-O ARM64_RELOC_PAGEOFF12 + // patch instruction: add + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + o1 := uint32(t&0xfff) << 10 + return val | int64(o1), noExtReloc, isOk + } else if (val>>24)&0x3b == 0x39 { + // Mach-O ARM64_RELOC_PAGEOFF12 + // patch ldr/str(b/h/w/d/q) (integer or vector) instructions, which have different scaling factors. + // Mach-O uses same relocation type for them. + shift := uint32(val) >> 30 + if shift == 0 && (val>>20)&0x048 == 0x048 { // 128-bit vector load + shift = 4 + } + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&(1<> shift) << 10 + return val | int64(o1), noExtReloc, isOk + } else { + ldr.Errorf(s, "unsupported instruction for %x R_ARM64_PCREL", val) + } + + case objabi.R_ARM64_LDST8: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + o0 := uint32(t&0xfff) << 10 + return val | int64(o0), noExtReloc, true + + case objabi.R_ARM64_LDST16: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&1 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST16_ABS_LO12_NC", t) + } + o0 := (uint32(t&0xfff) >> 1) << 10 + return val | int64(o0), noExtReloc, true + + case objabi.R_ARM64_LDST32: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&3 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST32_ABS_LO12_NC", t) + } + o0 := (uint32(t&0xfff) >> 2) << 10 + return val | int64(o0), noExtReloc, true + + case objabi.R_ARM64_LDST64: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&7 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST64_ABS_LO12_NC", t) + } + o0 := (uint32(t&0xfff) >> 3) << 10 + return val | int64(o0), noExtReloc, true + + case objabi.R_ARM64_LDST128: + t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) + if t&15 != 0 { + ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST128_ABS_LO12_NC", t) + } + o0 := (uint32(t&0xfff) >> 4) << 10 + return val | int64(o0), noExtReloc, true + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + log.Fatalf("unexpected relocation variant") + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch rt := r.Type(); rt { + case objabi.R_ARM64_GOTPCREL, + objabi.R_ARM64_PCREL_LDST8, + objabi.R_ARM64_PCREL_LDST16, + objabi.R_ARM64_PCREL_LDST32, + objabi.R_ARM64_PCREL_LDST64, + objabi.R_ADDRARM64: + rr := ld.ExtrelocViaOuterSym(ldr, r, s) + return rr, true + case objabi.R_CALLARM64, + objabi.R_ARM64_TLS_LE, + objabi.R_ARM64_TLS_IE: + return ld.ExtrelocSimple(ldr, r), true + } + return loader.ExtReloc{}, false +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // stp x16, x30, [sp, #-16]! + // identifying information + plt.AddUint32(ctxt.Arch, 0xa9bf7bf0) + + // the following two instructions (adrp + ldr) load *got[2] into x17 + // adrp x16, &got[0] + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_GOT, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x90000010) + + // is the offset value of &got[2] to &got[0], the same below + // ldr x17, [x16, ] + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_GOT, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0xf9400211) + + // add x16, x16, + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_PCREL, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x91000210) + + // br x17 + plt.AddUint32(ctxt.Arch, 0xd61f0220) + + // 3 nop for place holder + plt.AddUint32(ctxt.Arch, 0xd503201f) + plt.AddUint32(ctxt.Arch, 0xd503201f) + plt.AddUint32(ctxt.Arch, 0xd503201f) + + // check gotplt.size == 0 + if gotplt.Size() != 0 { + ctxt.Errorf(gotplt.Sym(), "got.plt is not empty at the very beginning") + } + gotplt.AddAddrPlus(ctxt.Arch, dynamic, 0) + + gotplt.AddUint64(ctxt.Arch, 0) + gotplt.AddUint64(ctxt.Arch, 0) + } +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + if target.IsElf() { + plt := ldr.MakeSymbolUpdater(syms.PLT) + gotplt := ldr.MakeSymbolUpdater(syms.GOTPLT) + rela := ldr.MakeSymbolUpdater(syms.RelaPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // adrp x16, &got.plt[0] + plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) + plt.SetUint32(target.Arch, plt.Size()-4, 0x90000010) + relocs := plt.Relocs() + plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_GOT) + + // is the offset value of &got.plt[n] to &got.plt[0] + // ldr x17, [x16, ] + plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) + plt.SetUint32(target.Arch, plt.Size()-4, 0xf9400211) + relocs = plt.Relocs() + plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_GOT) + + // add x16, x16, + plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) + plt.SetUint32(target.Arch, plt.Size()-4, 0x91000210) + relocs = plt.Relocs() + plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_PCREL) + + // br x17 + plt.AddUint32(target.Arch, 0xd61f0220) + + // add to got.plt: pointer to plt[0] + gotplt.AddAddrPlus(target.Arch, plt.Sym(), 0) + + // rela + rela.AddAddrPlus(target.Arch, gotplt.Sym(), gotplt.Size()-8) + sDynid := ldr.SymDynid(s) + + rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_AARCH64_JUMP_SLOT))) + rela.AddUint64(target.Arch, 0) + + ldr.SetPlt(s, int32(plt.Size()-16)) + } else if target.IsDarwin() { + ld.AddGotSym(target, ldr, syms, s, 0) + + sDynid := ldr.SymDynid(s) + lep := ldr.MakeSymbolUpdater(syms.LinkEditPLT) + lep.AddUint32(target.Arch, uint32(sDynid)) + + plt := ldr.MakeSymbolUpdater(syms.PLT) + ldr.SetPlt(s, int32(plt.Size())) + + // adrp x16, GOT + plt.AddUint32(target.Arch, 0x90000010) + r, _ := plt.AddRel(objabi.R_ARM64_GOT) + r.SetOff(int32(plt.Size() - 4)) + r.SetSiz(4) + r.SetSym(syms.GOT) + r.SetAdd(int64(ldr.SymGot(s))) + + // ldr x17, [x16, ] + plt.AddUint32(target.Arch, 0xf9400211) + r, _ = plt.AddRel(objabi.R_ARM64_GOT) + r.SetOff(int32(plt.Size() - 4)) + r.SetSiz(4) + r.SetSym(syms.GOT) + r.SetAdd(int64(ldr.SymGot(s))) + + // br x17 + plt.AddUint32(target.Arch, 0xd61f0220) + } else { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } +} + +const ( + machoRelocLimit = 1 << 23 + peRelocLimit = 1 << 20 +) + +func gensymlate(ctxt *ld.Link, ldr *loader.Loader) { + // When external linking on darwin, Mach-O relocation has only signed 24-bit + // addend. For large symbols, we generate "label" symbols in the middle, so + // that relocations can target them with smaller addends. + // On Windows, we only get 21 bits, again (presumably) signed. + // Also, on Windows (always) and Darwin (for very large binaries), the external + // linker does't support CALL relocations with addend, so we generate "label" + // symbols for functions of which we can target the middle (Duff's devices). + if !ctxt.IsDarwin() && !ctxt.IsWindows() || !ctxt.IsExternal() { + return + } + + limit := int64(machoRelocLimit) + if ctxt.IsWindows() { + limit = peRelocLimit + } + + // addLabelSyms adds "label" symbols at s+limit, s+2*limit, etc. + addLabelSyms := func(s loader.Sym, limit, sz int64) { + v := ldr.SymValue(s) + for off := limit; off < sz; off += limit { + p := ldr.LookupOrCreateSym(offsetLabelName(ldr, s, off), ldr.SymVersion(s)) + ldr.SetAttrReachable(p, true) + ldr.SetSymValue(p, v+off) + ldr.SetSymSect(p, ldr.SymSect(s)) + if ctxt.IsDarwin() { + ld.AddMachoSym(ldr, p) + } else if ctxt.IsWindows() { + ld.AddPELabelSym(ldr, p) + } else { + panic("missing case in gensymlate") + } + // fmt.Printf("gensymlate %s %x\n", ldr.SymName(p), ldr.SymValue(p)) + } + } + + // Generate symbol names for every offset we need in duffcopy/duffzero (only 64 each). + if s := ldr.Lookup("runtime.duffcopy", sym.SymVerABIInternal); s != 0 && ldr.AttrReachable(s) { + addLabelSyms(s, 8, 8*64) + } + if s := ldr.Lookup("runtime.duffzero", sym.SymVerABIInternal); s != 0 && ldr.AttrReachable(s) { + addLabelSyms(s, 4, 4*64) + } + + if ctxt.IsDarwin() { + big := false + for _, seg := range ld.Segments { + if seg.Length >= machoRelocLimit { + big = true + break + } + } + if !big { + return // skip work if nothing big + } + } + + for s, n := loader.Sym(1), loader.Sym(ldr.NSym()); s < n; s++ { + if !ldr.AttrReachable(s) { + continue + } + t := ldr.SymType(s) + if t == sym.STEXT { + // Except for Duff's devices (handled above), we don't + // target the middle of a function. + continue + } + if t >= sym.SDWARFSECT { + continue // no need to add label for DWARF symbols + } + sz := ldr.SymSize(s) + if sz <= limit { + continue + } + addLabelSyms(s, limit, sz) + } + + // Also for carrier symbols (for which SymSize is 0) + for _, ss := range ld.CarrierSymByType { + if ss.Sym != 0 && ss.Size > limit { + addLabelSyms(ss.Sym, limit, ss.Size) + } + } +} + +// offsetLabelName returns the name of the "label" symbol used for a +// relocation targeting s+off. The label symbols is used on Darwin/Windows +// when external linking, so that the addend fits in a Mach-O/PE relocation. +func offsetLabelName(ldr *loader.Loader, s loader.Sym, off int64) string { + if off>>20<<20 == off { + return fmt.Sprintf("%s+%dMB", ldr.SymExtname(s), off>>20) + } + return fmt.Sprintf("%s+%d", ldr.SymExtname(s), off) +} + +// Convert the direct jump relocation r to refer to a trampoline if the target is too far. +func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { + relocs := ldr.Relocs(s) + r := relocs.At(ri) + const pcrel = 1 + switch r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26), + objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: + // Host object relocations that will be turned into a PLT call. + // The PLT may be too far. Insert a trampoline for them. + fallthrough + case objabi.R_CALLARM64: + var t int64 + // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet + // laid out. Conservatively use a trampoline. This should be rare, as we lay out packages + // in dependency order. + if ldr.SymValue(rs) != 0 { + t = ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) + } + if t >= 1<<27 || t < -1<<27 || ldr.SymValue(rs) == 0 || (*ld.FlagDebugTramp > 1 && (ldr.SymPkg(s) == "" || ldr.SymPkg(s) != ldr.SymPkg(rs))) { + // direct call too far, need to insert trampoline. + // look up existing trampolines first. if we found one within the range + // of direct call, we can reuse it. otherwise create a new one. + var tramp loader.Sym + for i := 0; ; i++ { + oName := ldr.SymName(rs) + name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i) + tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + ldr.SetAttrReachable(tramp, true) + if ldr.SymType(tramp) == sym.SDYNIMPORT { + // don't reuse trampoline defined in other module + continue + } + if oName == "runtime.deferreturn" { + ldr.SetIsDeferReturnTramp(tramp, true) + } + if ldr.SymValue(tramp) == 0 { + // either the trampoline does not exist -- we need to create one, + // or found one the address which is not assigned -- this will be + // laid down immediately after the current function. use this one. + break + } + + t = ldr.SymValue(tramp) - (ldr.SymValue(s) + int64(r.Off())) + if t >= -1<<27 && t < 1<<27 { + // found an existing trampoline that is not too far + // we can just use it + break + } + } + if ldr.SymType(tramp) == 0 { + // trampoline does not exist, create one + trampb := ldr.MakeSymbolUpdater(tramp) + ctxt.AddTramp(trampb) + if ldr.SymType(rs) == sym.SDYNIMPORT { + if r.Add() != 0 { + ctxt.Errorf(s, "nonzero addend for DYNIMPORT call: %v+%d", ldr.SymName(rs), r.Add()) + } + gentrampgot(ctxt, ldr, trampb, rs) + } else { + gentramp(ctxt, ldr, trampb, rs, r.Add()) + } + } + // modify reloc to point to tramp, which will be resolved later + sb := ldr.MakeSymbolUpdater(s) + relocs := sb.Relocs() + r := relocs.At(ri) + r.SetSym(tramp) + r.SetAdd(0) // clear the offset embedded in the instruction + } + default: + ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + } +} + +// generate a trampoline to target+offset. +func gentramp(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(12) // 3 instructions + P := make([]byte, tramp.Size()) + o1 := uint32(0x90000010) // adrp x16, target + o2 := uint32(0x91000210) // add x16, pc-relative-offset + o3 := uint32(0xd61f0200) // br x16 + ctxt.Arch.ByteOrder.PutUint32(P, o1) + ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) + ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_ADDRARM64) + r.SetSiz(8) + r.SetSym(target) + r.SetAdd(offset) +} + +// generate a trampoline to target+offset for a DYNIMPORT symbol via GOT. +func gentrampgot(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym) { + tramp.SetSize(12) // 3 instructions + P := make([]byte, tramp.Size()) + o1 := uint32(0x90000010) // adrp x16, target@GOT + o2 := uint32(0xf9400210) // ldr x16, [x16, offset] + o3 := uint32(0xd61f0200) // br x16 + ctxt.Arch.ByteOrder.PutUint32(P, o1) + ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) + ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_ARM64_GOTPCREL) + r.SetSiz(8) + r.SetSym(target) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..4aa2708df694127f2bf241e1746a39aa94ab9054 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/l.go @@ -0,0 +1,74 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm64 + +// Writing object files. + +// cmd/9l/l.h from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 16 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 31 + dwarfRegLR = 30 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..3d358155badbca1e07d63b55dd4f7c790c48ace1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/arm64/obj.go @@ -0,0 +1,122 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchARM64 + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + TrampLimit: 0x7c00000, // 26-bit signed offset * 4, leave room for PLT etc. + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + GenSymsLate: gensymlate, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + Trampoline: trampoline, + + ELF: ld.ELFArch{ + Androiddynld: "/system/bin/linker64", + Linuxdynld: "/lib/ld-linux-aarch64.so.1", + LinuxdynldMusl: "/lib/ld-musl-aarch64.so.1", + + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* arm64 elf */ + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd: + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hdarwin: /* apple MACH */ + ld.HEADR = ld.INITIAL_MACHO_HEADR + if *ld.FlagRound == -1 { + *ld.FlagRound = 16384 // 16K page alignment + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(1<<32, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hwindows: /* PE executable */ + // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit + return + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench.go new file mode 100644 index 0000000000000000000000000000000000000000..96fa04ae5b164c9e03c6dcf934d18e7f16367161 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench.go @@ -0,0 +1,195 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package benchmark provides a Metrics object that enables memory and CPU +// profiling for the linker. The Metrics objects can be used to mark stages +// of the code, and name the measurements during that stage. There is also +// optional GCs that can be performed at the end of each stage, so you +// can get an accurate measurement of how each stage changes live memory. +package benchmark + +import ( + "fmt" + "io" + "os" + "runtime" + "runtime/pprof" + "time" + "unicode" +) + +type Flags int + +const ( + GC = 1 << iota + NoGC Flags = 0 +) + +type Metrics struct { + gc Flags + marks []*mark + curMark *mark + filebase string + pprofFile *os.File +} + +type mark struct { + name string + startM, endM, gcM runtime.MemStats + startT, endT time.Time +} + +// New creates a new Metrics object. +// +// Typical usage should look like: +// +// func main() { +// filename := "" // Set to enable per-phase pprof file output. +// bench := benchmark.New(benchmark.GC, filename) +// defer bench.Report(os.Stdout) +// // etc +// bench.Start("foo") +// foo() +// bench.Start("bar") +// bar() +// } +// +// Note that a nil Metrics object won't cause any errors, so one could write +// code like: +// +// func main() { +// enableBenchmarking := flag.Bool("enable", true, "enables benchmarking") +// flag.Parse() +// var bench *benchmark.Metrics +// if *enableBenchmarking { +// bench = benchmark.New(benchmark.GC) +// } +// bench.Start("foo") +// // etc. +// } +func New(gc Flags, filebase string) *Metrics { + if gc == GC { + runtime.GC() + } + return &Metrics{gc: gc, filebase: filebase} +} + +// Report reports the metrics. +// Closes the currently Start(ed) range, and writes the report to the given io.Writer. +func (m *Metrics) Report(w io.Writer) { + if m == nil { + return + } + + m.closeMark() + + gcString := "" + if m.gc == GC { + gcString = "_GC" + } + + var totTime time.Duration + for _, curMark := range m.marks { + dur := curMark.endT.Sub(curMark.startT) + totTime += dur + fmt.Fprintf(w, "%s 1 %d ns/op", makeBenchString(curMark.name+gcString), dur.Nanoseconds()) + fmt.Fprintf(w, "\t%d B/op", curMark.endM.TotalAlloc-curMark.startM.TotalAlloc) + fmt.Fprintf(w, "\t%d allocs/op", curMark.endM.Mallocs-curMark.startM.Mallocs) + if m.gc == GC { + fmt.Fprintf(w, "\t%d live-B", curMark.gcM.HeapAlloc) + } else { + fmt.Fprintf(w, "\t%d heap-B", curMark.endM.HeapAlloc) + } + fmt.Fprintf(w, "\n") + } + fmt.Fprintf(w, "%s 1 %d ns/op\n", makeBenchString("total time"+gcString), totTime.Nanoseconds()) +} + +// Start marks the beginning of a new measurement phase. +// Once a metric is started, it continues until either a Report is issued, or another Start is called. +func (m *Metrics) Start(name string) { + if m == nil { + return + } + m.closeMark() + m.curMark = &mark{name: name} + // Unlikely we need to a GC here, as one was likely just done in closeMark. + if m.shouldPProf() { + f, err := os.Create(makePProfFilename(m.filebase, name, "cpuprof")) + if err != nil { + panic(err) + } + m.pprofFile = f + if err = pprof.StartCPUProfile(m.pprofFile); err != nil { + panic(err) + } + } + runtime.ReadMemStats(&m.curMark.startM) + m.curMark.startT = time.Now() +} + +func (m *Metrics) closeMark() { + if m == nil || m.curMark == nil { + return + } + m.curMark.endT = time.Now() + if m.shouldPProf() { + pprof.StopCPUProfile() + m.pprofFile.Close() + m.pprofFile = nil + } + runtime.ReadMemStats(&m.curMark.endM) + if m.gc == GC { + runtime.GC() + runtime.ReadMemStats(&m.curMark.gcM) + if m.shouldPProf() { + // Collect a profile of the live heap. Do a + // second GC to force sweep completion so we + // get a complete snapshot of the live heap at + // the end of this phase. + runtime.GC() + f, err := os.Create(makePProfFilename(m.filebase, m.curMark.name, "memprof")) + if err != nil { + panic(err) + } + err = pprof.WriteHeapProfile(f) + if err != nil { + panic(err) + } + err = f.Close() + if err != nil { + panic(err) + } + } + } + m.marks = append(m.marks, m.curMark) + m.curMark = nil +} + +// shouldPProf returns true if we should be doing pprof runs. +func (m *Metrics) shouldPProf() bool { + return m != nil && len(m.filebase) > 0 +} + +// makeBenchString makes a benchmark string consumable by Go's benchmarking tools. +func makeBenchString(name string) string { + needCap := true + ret := []rune("Benchmark") + for _, r := range name { + if unicode.IsSpace(r) { + needCap = true + continue + } + if needCap { + r = unicode.ToUpper(r) + needCap = false + } + ret = append(ret, r) + } + return string(ret) +} + +func makePProfFilename(filebase, name, typ string) string { + return fmt.Sprintf("%s_%s.%s", filebase, makeBenchString(name), typ) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench_test.go new file mode 100644 index 0000000000000000000000000000000000000000..419dc557240d3a9c2bbad58a4ce36e2deadb685c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/benchmark/bench_test.go @@ -0,0 +1,54 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package benchmark + +import ( + "testing" +) + +func TestMakeBenchString(t *testing.T) { + tests := []struct { + have, want string + }{ + {"foo", "BenchmarkFoo"}, + {" foo ", "BenchmarkFoo"}, + {"foo bar", "BenchmarkFooBar"}, + } + for i, test := range tests { + if v := makeBenchString(test.have); test.want != v { + t.Errorf("test[%d] makeBenchString(%q) == %q, want %q", i, test.have, v, test.want) + } + } +} + +func TestPProfFlag(t *testing.T) { + tests := []struct { + name string + want bool + }{ + {"", false}, + {"foo", true}, + } + for i, test := range tests { + b := New(GC, test.name) + if v := b.shouldPProf(); test.want != v { + t.Errorf("test[%d] shouldPProf() == %v, want %v", i, v, test.want) + } + } +} + +func TestPProfNames(t *testing.T) { + want := "foo_BenchmarkTest.cpuprof" + if v := makePProfFilename("foo", "test", "cpuprof"); v != want { + t.Errorf("makePProfFilename() == %q, want %q", v, want) + } +} + +// Ensure that public APIs work with a nil Metrics object. +func TestNilBenchmarkObject(t *testing.T) { + var b *Metrics + b.Start("TEST") + b.Report(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/dwtest/dwtest.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/dwtest/dwtest.go new file mode 100644 index 0000000000000000000000000000000000000000..3fb02ee1db2fb893eb9920cb32663395bdcf5c1c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/dwtest/dwtest.go @@ -0,0 +1,197 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwtest + +import ( + "debug/dwarf" + "errors" + "fmt" + "os" +) + +// Helper type for supporting queries on DIEs within a DWARF +// .debug_info section. Invoke the populate() method below passing in +// a dwarf.Reader, which will read in all DIEs and keep track of +// parent/child relationships. Queries can then be made to ask for +// DIEs by name or by offset. This will hopefully reduce boilerplate +// for future test writing. + +type Examiner struct { + dies []*dwarf.Entry + idxByOffset map[dwarf.Offset]int + kids map[int][]int + parent map[int]int + byname map[string][]int +} + +// Populate the Examiner using the DIEs read from rdr. +func (ex *Examiner) Populate(rdr *dwarf.Reader) error { + ex.idxByOffset = make(map[dwarf.Offset]int) + ex.kids = make(map[int][]int) + ex.parent = make(map[int]int) + ex.byname = make(map[string][]int) + var nesting []int + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + return err + } + if entry.Tag == 0 { + // terminator + if len(nesting) == 0 { + return errors.New("nesting stack underflow") + } + nesting = nesting[:len(nesting)-1] + continue + } + idx := len(ex.dies) + ex.dies = append(ex.dies, entry) + if _, found := ex.idxByOffset[entry.Offset]; found { + return errors.New("DIE clash on offset") + } + ex.idxByOffset[entry.Offset] = idx + if name, ok := entry.Val(dwarf.AttrName).(string); ok { + ex.byname[name] = append(ex.byname[name], idx) + } + if len(nesting) > 0 { + parent := nesting[len(nesting)-1] + ex.kids[parent] = append(ex.kids[parent], idx) + ex.parent[idx] = parent + } + if entry.Children { + nesting = append(nesting, idx) + } + } + if len(nesting) > 0 { + return errors.New("unterminated child sequence") + } + return nil +} + +func (e *Examiner) DIEs() []*dwarf.Entry { + return e.dies +} + +func indent(ilevel int) { + for i := 0; i < ilevel; i++ { + fmt.Printf(" ") + } +} + +// For debugging new tests +func (ex *Examiner) DumpEntry(idx int, dumpKids bool, ilevel int) { + if idx >= len(ex.dies) { + fmt.Fprintf(os.Stderr, "DumpEntry: bad DIE %d: index out of range\n", idx) + return + } + entry := ex.dies[idx] + indent(ilevel) + fmt.Printf("0x%x: %v\n", idx, entry.Tag) + for _, f := range entry.Field { + indent(ilevel) + fmt.Printf("at=%v val=%v\n", f.Attr, f.Val) + } + if dumpKids { + ksl := ex.kids[idx] + for _, k := range ksl { + ex.DumpEntry(k, true, ilevel+2) + } + } +} + +// Given a DIE offset, return the previously read dwarf.Entry, or nil +func (ex *Examiner) EntryFromOffset(off dwarf.Offset) *dwarf.Entry { + if idx, found := ex.idxByOffset[off]; found && idx != -1 { + return ex.entryFromIdx(idx) + } + return nil +} + +// Return the ID that Examiner uses to refer to the DIE at offset off +func (ex *Examiner) IdxFromOffset(off dwarf.Offset) int { + if idx, found := ex.idxByOffset[off]; found { + return idx + } + return -1 +} + +// Return the dwarf.Entry pointer for the DIE with id 'idx' +func (ex *Examiner) entryFromIdx(idx int) *dwarf.Entry { + if idx >= len(ex.dies) || idx < 0 { + return nil + } + return ex.dies[idx] +} + +// Returns a list of child entries for a die with ID 'idx' +func (ex *Examiner) Children(idx int) []*dwarf.Entry { + sl := ex.kids[idx] + ret := make([]*dwarf.Entry, len(sl)) + for i, k := range sl { + ret[i] = ex.entryFromIdx(k) + } + return ret +} + +// Returns parent DIE for DIE 'idx', or nil if the DIE is top level +func (ex *Examiner) Parent(idx int) *dwarf.Entry { + p, found := ex.parent[idx] + if !found { + return nil + } + return ex.entryFromIdx(p) +} + +// ParentCU returns the enclosing compilation unit DIE for the DIE +// with a given index, or nil if for some reason we can't establish a +// parent. +func (ex *Examiner) ParentCU(idx int) *dwarf.Entry { + for { + parentDie := ex.Parent(idx) + if parentDie == nil { + return nil + } + if parentDie.Tag == dwarf.TagCompileUnit { + return parentDie + } + idx = ex.IdxFromOffset(parentDie.Offset) + } +} + +// FileRef takes a given DIE by index and a numeric file reference +// (presumably from a decl_file or call_file attribute), looks up the +// reference in the .debug_line file table, and returns the proper +// string for it. We need to know which DIE is making the reference +// so as to find the right compilation unit. +func (ex *Examiner) FileRef(dw *dwarf.Data, dieIdx int, fileRef int64) (string, error) { + + // Find the parent compilation unit DIE for the specified DIE. + cuDie := ex.ParentCU(dieIdx) + if cuDie == nil { + return "", fmt.Errorf("no parent CU DIE for DIE with idx %d?", dieIdx) + } + // Construct a line reader and then use it to get the file string. + lr, lrerr := dw.LineReader(cuDie) + if lrerr != nil { + return "", fmt.Errorf("d.LineReader: %v", lrerr) + } + files := lr.Files() + if fileRef < 0 || int(fileRef) > len(files)-1 { + return "", fmt.Errorf("Examiner.FileRef: malformed file reference %d", fileRef) + } + return files[fileRef].Name, nil +} + +// Return a list of all DIEs with name 'name'. When searching for DIEs +// by name, keep in mind that the returned results will include child +// DIEs such as params/variables. For example, asking for all DIEs named +// "p" for even a small program will give you 400-500 entries. +func (ex *Examiner) Named(name string) []*dwarf.Entry { + sl := ex.byname[name] + ret := make([]*dwarf.Entry, len(sl)) + for i, k := range sl { + ret[i] = ex.entryFromIdx(k) + } + return ret +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ar.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ar.go new file mode 100644 index 0000000000000000000000000000000000000000..73c1cd3a2ce804312253d11df10b9237e3821ee6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ar.go @@ -0,0 +1,247 @@ +// Inferno utils/include/ar.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/include/ar.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "cmd/internal/bio" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "encoding/binary" + "fmt" + "internal/buildcfg" + "io" + "os" + "path/filepath" + "strings" +) + +const ( + SARMAG = 8 + SAR_HDR = 16 + 44 +) + +const ( + ARMAG = "!\n" +) + +type ArHdr struct { + name string + date string + uid string + gid string + mode string + size string + fmag string +} + +// pruneUndefsForWindows trims the list "undefs" of currently +// outstanding unresolved symbols to remove references to DLL import +// symbols (e.g. "__imp_XXX"). In older versions of the linker, we +// would just immediately forward references from the import sym +// (__imp_XXX) to the DLL sym (XXX), but with newer compilers this +// strategy falls down in certain cases. We instead now do this +// forwarding later on as a post-processing step, and meaning that +// during the middle part of host object loading we can see a lot of +// unresolved (SXREF) import symbols. We do not, however, want to +// trigger the inclusion of an object from a host archive if the +// reference is going to be eventually forwarded to the corresponding +// SDYNIMPORT symbol, so here we strip out such refs from the undefs +// list. +func pruneUndefsForWindows(ldr *loader.Loader, undefs, froms []loader.Sym) ([]loader.Sym, []loader.Sym) { + var newundefs []loader.Sym + var newfroms []loader.Sym + for _, s := range undefs { + sname := ldr.SymName(s) + if strings.HasPrefix(sname, "__imp_") { + dname := sname[len("__imp_"):] + ds := ldr.Lookup(dname, 0) + if ds != 0 && ldr.SymType(ds) == sym.SDYNIMPORT { + // Don't try to pull things out of a host archive to + // satisfy this symbol. + continue + } + } + newundefs = append(newundefs, s) + newfroms = append(newfroms, s) + } + return newundefs, newfroms +} + +// hostArchive reads an archive file holding host objects and links in +// required objects. The general format is the same as a Go archive +// file, but it has an armap listing symbols and the objects that +// define them. This is used for the compiler support library +// libgcc.a. +func hostArchive(ctxt *Link, name string) { + if ctxt.Debugvlog > 1 { + ctxt.Logf("hostArchive(%s)\n", name) + } + f, err := bio.Open(name) + if err != nil { + if os.IsNotExist(err) { + // It's OK if we don't have a libgcc file at all. + if ctxt.Debugvlog != 0 { + ctxt.Logf("skipping libgcc file: %v\n", err) + } + return + } + Exitf("cannot open file %s: %v", name, err) + } + defer f.Close() + + var magbuf [len(ARMAG)]byte + if _, err := io.ReadFull(f, magbuf[:]); err != nil { + Exitf("file %s too short", name) + } + + if string(magbuf[:]) != ARMAG { + Exitf("%s is not an archive file", name) + } + + var arhdr ArHdr + l := nextar(f, f.Offset(), &arhdr) + if l <= 0 { + Exitf("%s missing armap", name) + } + + var armap archiveMap + if arhdr.name == "/" || arhdr.name == "/SYM64/" { + armap = readArmap(name, f, arhdr) + } else { + Exitf("%s missing armap", name) + } + + loaded := make(map[uint64]bool) + any := true + for any { + var load []uint64 + returnAllUndefs := -1 + undefs, froms := ctxt.loader.UndefinedRelocTargets(returnAllUndefs) + if buildcfg.GOOS == "windows" { + undefs, froms = pruneUndefsForWindows(ctxt.loader, undefs, froms) + } + for k, symIdx := range undefs { + sname := ctxt.loader.SymName(symIdx) + if off := armap[sname]; off != 0 && !loaded[off] { + load = append(load, off) + loaded[off] = true + if ctxt.Debugvlog > 1 { + ctxt.Logf("hostArchive(%s): selecting object at offset %x to resolve %s [%d] reference from %s [%d]\n", name, off, sname, symIdx, ctxt.loader.SymName(froms[k]), froms[k]) + } + } + } + + for _, off := range load { + l := nextar(f, int64(off), &arhdr) + if l <= 0 { + Exitf("%s missing archive entry at offset %d", name, off) + } + pname := fmt.Sprintf("%s(%s)", name, arhdr.name) + l = atolwhex(arhdr.size) + + pkname := filepath.Base(name) + if i := strings.LastIndex(pkname, ".a"); i >= 0 { + pkname = pkname[:i] + } + libar := sym.Library{Pkg: pkname} + h := ldobj(ctxt, f, &libar, l, pname, name) + if h.ld == nil { + Errorf(nil, "%s unrecognized object file at offset %d", name, off) + continue + } + f.MustSeek(h.off, 0) + h.ld(ctxt, f, h.pkg, h.length, h.pn) + if *flagCaptureHostObjs != "" { + captureHostObj(h) + } + } + + any = len(load) > 0 + } +} + +// archiveMap is an archive symbol map: a mapping from symbol name to +// offset within the archive file. +type archiveMap map[string]uint64 + +// readArmap reads the archive symbol map. +func readArmap(filename string, f *bio.Reader, arhdr ArHdr) archiveMap { + is64 := arhdr.name == "/SYM64/" + wordSize := 4 + if is64 { + wordSize = 8 + } + + contents := make([]byte, atolwhex(arhdr.size)) + if _, err := io.ReadFull(f, contents); err != nil { + Exitf("short read from %s", filename) + } + + var c uint64 + if is64 { + c = binary.BigEndian.Uint64(contents) + } else { + c = uint64(binary.BigEndian.Uint32(contents)) + } + contents = contents[wordSize:] + + ret := make(archiveMap) + + names := contents[c*uint64(wordSize):] + for i := uint64(0); i < c; i++ { + n := 0 + for names[n] != 0 { + n++ + } + name := string(names[:n]) + names = names[n+1:] + + // For Mach-O and PE/386 files we strip a leading + // underscore from the symbol name. + if buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" || (buildcfg.GOOS == "windows" && buildcfg.GOARCH == "386") { + if name[0] == '_' && len(name) > 1 { + name = name[1:] + } + } + + var off uint64 + if is64 { + off = binary.BigEndian.Uint64(contents) + } else { + off = uint64(binary.BigEndian.Uint32(contents)) + } + contents = contents[wordSize:] + + ret[name] = off + } + + return ret +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/asmb.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/asmb.go new file mode 100644 index 0000000000000000000000000000000000000000..ca9a57741c03939c677664c6acf9f9138844dfbb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/asmb.go @@ -0,0 +1,215 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "runtime" + "sync" +) + +// Assembling the binary is broken into two steps: +// - writing out the code/data/dwarf Segments, applying relocations on the fly +// - writing out the architecture specific pieces. +// +// This function handles the first part. +func asmb(ctxt *Link) { + // TODO(jfaller): delete me. + if thearch.Asmb != nil { + thearch.Asmb(ctxt, ctxt.loader) + return + } + + if ctxt.IsELF { + Asmbelfsetup() + } + + var wg sync.WaitGroup + f := func(ctxt *Link, out *OutBuf, start, length int64) { + pad := thearch.CodePad + if pad == nil { + pad = zeros[:] + } + CodeblkPad(ctxt, out, start, length, pad) + } + + for _, sect := range Segtext.Sections { + offset := sect.Vaddr - Segtext.Vaddr + Segtext.Fileoff + // Handle text sections with Codeblk + if sect.Name == ".text" { + writeParallel(&wg, f, ctxt, offset, sect.Vaddr, sect.Length) + } else { + writeParallel(&wg, datblk, ctxt, offset, sect.Vaddr, sect.Length) + } + } + + if Segrodata.Filelen > 0 { + writeParallel(&wg, datblk, ctxt, Segrodata.Fileoff, Segrodata.Vaddr, Segrodata.Filelen) + } + + if Segrelrodata.Filelen > 0 { + writeParallel(&wg, datblk, ctxt, Segrelrodata.Fileoff, Segrelrodata.Vaddr, Segrelrodata.Filelen) + } + + writeParallel(&wg, datblk, ctxt, Segdata.Fileoff, Segdata.Vaddr, Segdata.Filelen) + + writeParallel(&wg, dwarfblk, ctxt, Segdwarf.Fileoff, Segdwarf.Vaddr, Segdwarf.Filelen) + + if Segpdata.Filelen > 0 { + writeParallel(&wg, pdatablk, ctxt, Segpdata.Fileoff, Segpdata.Vaddr, Segpdata.Filelen) + } + if Segxdata.Filelen > 0 { + writeParallel(&wg, xdatablk, ctxt, Segxdata.Fileoff, Segxdata.Vaddr, Segxdata.Filelen) + } + + wg.Wait() +} + +// Assembling the binary is broken into two steps: +// - writing out the code/data/dwarf Segments +// - writing out the architecture specific pieces. +// +// This function handles the second part. +func asmb2(ctxt *Link) { + if thearch.Asmb2 != nil { + thearch.Asmb2(ctxt, ctxt.loader) + return + } + + symSize = 0 + spSize = 0 + lcSize = 0 + + switch ctxt.HeadType { + default: + panic("unknown platform") + + // Macho + case objabi.Hdarwin: + asmbMacho(ctxt) + + // Plan9 + case objabi.Hplan9: + asmbPlan9(ctxt) + + // PE + case objabi.Hwindows: + asmbPe(ctxt) + + // Xcoff + case objabi.Haix: + asmbXcoff(ctxt) + + // Elf + case objabi.Hdragonfly, + objabi.Hfreebsd, + objabi.Hlinux, + objabi.Hnetbsd, + objabi.Hopenbsd, + objabi.Hsolaris: + asmbElf(ctxt) + } + + if *FlagC { + fmt.Printf("textsize=%d\n", Segtext.Filelen) + fmt.Printf("datsize=%d\n", Segdata.Filelen) + fmt.Printf("bsssize=%d\n", Segdata.Length-Segdata.Filelen) + fmt.Printf("symsize=%d\n", symSize) + fmt.Printf("lcsize=%d\n", lcSize) + fmt.Printf("total=%d\n", Segtext.Filelen+Segdata.Length+uint64(symSize)+uint64(lcSize)) + } +} + +// writePlan9Header writes out the plan9 header at the present position in the OutBuf. +func writePlan9Header(buf *OutBuf, magic uint32, entry int64, is64Bit bool) { + if is64Bit { + magic |= 0x00008000 + } + buf.Write32b(magic) + buf.Write32b(uint32(Segtext.Filelen)) + buf.Write32b(uint32(Segdata.Filelen)) + buf.Write32b(uint32(Segdata.Length - Segdata.Filelen)) + buf.Write32b(uint32(symSize)) + if is64Bit { + buf.Write32b(uint32(entry &^ 0x80000000)) + } else { + buf.Write32b(uint32(entry)) + } + buf.Write32b(uint32(spSize)) + buf.Write32b(uint32(lcSize)) + // amd64 includes the entry at the beginning of the symbol table. + if is64Bit { + buf.Write64b(uint64(entry)) + } +} + +// asmbPlan9 assembles a plan 9 binary. +func asmbPlan9(ctxt *Link) { + if !*FlagS { + *FlagS = true + symo := int64(Segdata.Fileoff + Segdata.Filelen) + ctxt.Out.SeekSet(symo) + asmbPlan9Sym(ctxt) + } + ctxt.Out.SeekSet(0) + writePlan9Header(ctxt.Out, thearch.Plan9Magic, Entryvalue(ctxt), thearch.Plan9_64Bit) +} + +// sizeExtRelocs precomputes the size needed for the reloc records, +// sets the size and offset for relocation records in each section, +// and mmap the output buffer with the proper size. +func sizeExtRelocs(ctxt *Link, relsize uint32) { + if relsize == 0 { + panic("sizeExtRelocs: relocation size not set") + } + var sz int64 + for _, seg := range Segments { + for _, sect := range seg.Sections { + sect.Reloff = uint64(ctxt.Out.Offset() + sz) + sect.Rellen = uint64(relsize * sect.Relcount) + sz += int64(sect.Rellen) + } + } + filesz := ctxt.Out.Offset() + sz + err := ctxt.Out.Mmap(uint64(filesz)) + if err != nil { + Exitf("mapping output file failed: %v", err) + } +} + +// relocSectFn wraps the function writing relocations of a section +// for parallel execution. Returns the wrapped function and a wait +// group for which the caller should wait. +func relocSectFn(ctxt *Link, relocSect func(*Link, *OutBuf, *sym.Section, []loader.Sym)) (func(*Link, *sym.Section, []loader.Sym), *sync.WaitGroup) { + var fn func(ctxt *Link, sect *sym.Section, syms []loader.Sym) + var wg sync.WaitGroup + var sem chan int + if ctxt.Out.isMmapped() { + // Write sections in parallel. + sem = make(chan int, 2*runtime.GOMAXPROCS(0)) + fn = func(ctxt *Link, sect *sym.Section, syms []loader.Sym) { + wg.Add(1) + sem <- 1 + out, err := ctxt.Out.View(sect.Reloff) + if err != nil { + panic(err) + } + go func() { + relocSect(ctxt, out, sect, syms) + wg.Done() + <-sem + }() + } + } else { + // We cannot Mmap. Write sequentially. + fn = func(ctxt *Link, sect *sym.Section, syms []loader.Sym) { + relocSect(ctxt, ctxt.Out, sect, syms) + } + } + return fn, &wg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/config.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/config.go new file mode 100644 index 0000000000000000000000000000000000000000..3a186b47f715aedb67762b1ecc46e58d90f11a27 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/config.go @@ -0,0 +1,224 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "fmt" + "internal/buildcfg" + "internal/platform" +) + +// A BuildMode indicates the sort of object we are building. +// +// Possible build modes are the same as those for the -buildmode flag +// in cmd/go, and are documented in 'go help buildmode'. +type BuildMode uint8 + +const ( + BuildModeUnset BuildMode = iota + BuildModeExe + BuildModePIE + BuildModeCArchive + BuildModeCShared + BuildModeShared + BuildModePlugin +) + +// Set implements flag.Value to set the build mode based on the argument +// to the -buildmode flag. +func (mode *BuildMode) Set(s string) error { + switch s { + default: + return fmt.Errorf("invalid buildmode: %q", s) + case "exe": + switch buildcfg.GOOS + "/" + buildcfg.GOARCH { + case "darwin/arm64", "windows/arm", "windows/arm64": // On these platforms, everything is PIE + *mode = BuildModePIE + default: + *mode = BuildModeExe + } + case "pie": + *mode = BuildModePIE + case "c-archive": + *mode = BuildModeCArchive + case "c-shared": + *mode = BuildModeCShared + case "shared": + *mode = BuildModeShared + case "plugin": + *mode = BuildModePlugin + } + + if !platform.BuildModeSupported("gc", s, buildcfg.GOOS, buildcfg.GOARCH) { + return fmt.Errorf("buildmode %s not supported on %s/%s", s, buildcfg.GOOS, buildcfg.GOARCH) + } + + return nil +} + +func (mode BuildMode) String() string { + switch mode { + case BuildModeUnset: + return "" // avoid showing a default in usage message + case BuildModeExe: + return "exe" + case BuildModePIE: + return "pie" + case BuildModeCArchive: + return "c-archive" + case BuildModeCShared: + return "c-shared" + case BuildModeShared: + return "shared" + case BuildModePlugin: + return "plugin" + } + return fmt.Sprintf("BuildMode(%d)", uint8(mode)) +} + +// LinkMode indicates whether an external linker is used for the final link. +type LinkMode uint8 + +const ( + LinkAuto LinkMode = iota + LinkInternal + LinkExternal +) + +func (mode *LinkMode) Set(s string) error { + switch s { + default: + return fmt.Errorf("invalid linkmode: %q", s) + case "auto": + *mode = LinkAuto + case "internal": + *mode = LinkInternal + case "external": + *mode = LinkExternal + } + return nil +} + +func (mode *LinkMode) String() string { + switch *mode { + case LinkAuto: + return "auto" + case LinkInternal: + return "internal" + case LinkExternal: + return "external" + } + return fmt.Sprintf("LinkMode(%d)", uint8(*mode)) +} + +// mustLinkExternal reports whether the program being linked requires +// the external linker be used to complete the link. +func mustLinkExternal(ctxt *Link) (res bool, reason string) { + if ctxt.Debugvlog > 1 { + defer func() { + if res { + ctxt.Logf("external linking is forced by: %s\n", reason) + } + }() + } + + if platform.MustLinkExternal(buildcfg.GOOS, buildcfg.GOARCH, false) { + return true, fmt.Sprintf("%s/%s requires external linking", buildcfg.GOOS, buildcfg.GOARCH) + } + + if *flagMsan { + return true, "msan" + } + + if *flagAsan { + return true, "asan" + } + + if iscgo && platform.MustLinkExternal(buildcfg.GOOS, buildcfg.GOARCH, true) { + return true, buildcfg.GOARCH + " does not support internal cgo" + } + + // Some build modes require work the internal linker cannot do (yet). + switch ctxt.BuildMode { + case BuildModeCArchive: + return true, "buildmode=c-archive" + case BuildModeCShared: + return true, "buildmode=c-shared" + case BuildModePIE: + if !platform.InternalLinkPIESupported(buildcfg.GOOS, buildcfg.GOARCH) { + // Internal linking does not support TLS_IE. + return true, "buildmode=pie" + } + case BuildModePlugin: + return true, "buildmode=plugin" + case BuildModeShared: + return true, "buildmode=shared" + } + if ctxt.linkShared { + return true, "dynamically linking with a shared library" + } + + if unknownObjFormat { + return true, "some input objects have an unrecognized file format" + } + + if len(dynimportfail) > 0 { + // This error means that we were unable to generate + // the _cgo_import.go file for some packages. + // This typically means that there are some dependencies + // that the cgo tool could not figure out. + // See issue #52863. + return true, fmt.Sprintf("some packages could not be built to support internal linking (%v)", dynimportfail) + } + + return false, "" +} + +// determineLinkMode sets ctxt.LinkMode. +// +// It is called after flags are processed and inputs are processed, +// so the ctxt.LinkMode variable has an initial value from the -linkmode +// flag and the iscgo, externalobj, and unknownObjFormat variables are set. +func determineLinkMode(ctxt *Link) { + extNeeded, extReason := mustLinkExternal(ctxt) + via := "" + + if ctxt.LinkMode == LinkAuto { + // The environment variable GO_EXTLINK_ENABLED controls the + // default value of -linkmode. If it is not set when the + // linker is called we take the value it was set to when + // cmd/link was compiled. (See make.bash.) + switch buildcfg.Getgoextlinkenabled() { + case "0": + ctxt.LinkMode = LinkInternal + via = "via GO_EXTLINK_ENABLED " + case "1": + ctxt.LinkMode = LinkExternal + via = "via GO_EXTLINK_ENABLED " + default: + preferExternal := len(preferlinkext) != 0 + if preferExternal && ctxt.Debugvlog > 0 { + ctxt.Logf("external linking prefer list is %v\n", preferlinkext) + } + if extNeeded || (iscgo && (externalobj || preferExternal)) { + ctxt.LinkMode = LinkExternal + } else { + ctxt.LinkMode = LinkInternal + } + } + } + + switch ctxt.LinkMode { + case LinkInternal: + if extNeeded { + Exitf("internal linking requested %sbut external linking required: %s", via, extReason) + } + case LinkExternal: + switch { + case buildcfg.GOARCH == "ppc64" && buildcfg.GOOS == "linux": + Exitf("external linking not supported for %s/ppc64", buildcfg.GOOS) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data.go new file mode 100644 index 0000000000000000000000000000000000000000..a5a0615c8dff8d4fa42a587c5b7966e3bb80baf2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data.go @@ -0,0 +1,3118 @@ +// Derived from Inferno utils/6l/obj.c and utils/6l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "bytes" + "cmd/internal/gcprog" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/loadpe" + "cmd/link/internal/sym" + "compress/zlib" + "debug/elf" + "encoding/binary" + "fmt" + "log" + "os" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +// isRuntimeDepPkg reports whether pkg is the runtime package or its dependency. +func isRuntimeDepPkg(pkg string) bool { + switch pkg { + case "runtime", + "sync/atomic", // runtime may call to sync/atomic, due to go:linkname + "internal/abi", // used by reflectcall (and maybe more) + "internal/bytealg", // for IndexByte + "internal/chacha8rand", // for rand + "internal/cpu": // for cpu features + return true + } + return strings.HasPrefix(pkg, "runtime/internal/") && !strings.HasSuffix(pkg, "_test") +} + +// Estimate the max size needed to hold any new trampolines created for this function. This +// is used to determine when the section can be split if it becomes too large, to ensure that +// the trampolines are in the same section as the function that uses them. +func maxSizeTrampolines(ctxt *Link, ldr *loader.Loader, s loader.Sym, isTramp bool) uint64 { + // If thearch.Trampoline is nil, then trampoline support is not available on this arch. + // A trampoline does not need any dependent trampolines. + if thearch.Trampoline == nil || isTramp { + return 0 + } + + n := uint64(0) + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.Type().IsDirectCallOrJump() { + n++ + } + } + + switch { + case ctxt.IsARM(): + return n * 20 // Trampolines in ARM range from 3 to 5 instructions. + case ctxt.IsARM64(): + return n * 12 // Trampolines in ARM64 are 3 instructions. + case ctxt.IsPPC64(): + return n * 16 // Trampolines in PPC64 are 4 instructions. + case ctxt.IsRISCV64(): + return n * 8 // Trampolines in RISCV64 are 2 instructions. + } + panic("unreachable") +} + +// Detect too-far jumps in function s, and add trampolines if necessary. +// ARM, PPC64, PPC64LE and RISCV64 support trampoline insertion for internal +// and external linking. On PPC64 and PPC64LE the text sections might be split +// but will still insert trampolines where necessary. +func trampoline(ctxt *Link, s loader.Sym) { + if thearch.Trampoline == nil { + return // no need or no support of trampolines on this arch + } + + ldr := ctxt.loader + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + rt := r.Type() + if !rt.IsDirectCallOrJump() && !isPLTCall(rt) { + continue + } + rs := r.Sym() + if !ldr.AttrReachable(rs) || ldr.SymType(rs) == sym.Sxxx { + continue // something is wrong. skip it here and we'll emit a better error later + } + + if ldr.SymValue(rs) == 0 && ldr.SymType(rs) != sym.SDYNIMPORT && ldr.SymType(rs) != sym.SUNDEFEXT { + // Symbols in the same package are laid out together. + // Except that if SymPkg(s) == "", it is a host object symbol + // which may call an external symbol via PLT. + if ldr.SymPkg(s) != "" && ldr.SymPkg(rs) == ldr.SymPkg(s) { + // RISC-V is only able to reach +/-1MiB via a JAL instruction. + // We need to generate a trampoline when an address is + // currently unknown. + if !ctxt.Target.IsRISCV64() { + continue + } + } + // Runtime packages are laid out together. + if isRuntimeDepPkg(ldr.SymPkg(s)) && isRuntimeDepPkg(ldr.SymPkg(rs)) { + continue + } + } + thearch.Trampoline(ctxt, ldr, ri, rs, s) + } +} + +// whether rt is a (host object) relocation that will be turned into +// a call to PLT. +func isPLTCall(rt objabi.RelocType) bool { + const pcrel = 1 + switch rt { + // ARM64 + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26), + objabi.MachoRelocOffset + MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: + return true + + // ARM + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_CALL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PC24), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_JUMP24): + return true + } + // TODO: other architectures. + return false +} + +// FoldSubSymbolOffset computes the offset of symbol s to its top-level outer +// symbol. Returns the top-level symbol and the offset. +// This is used in generating external relocations. +func FoldSubSymbolOffset(ldr *loader.Loader, s loader.Sym) (loader.Sym, int64) { + outer := ldr.OuterSym(s) + off := int64(0) + if outer != 0 { + off += ldr.SymValue(s) - ldr.SymValue(outer) + s = outer + } + return s, off +} + +// relocsym resolve relocations in "s", updating the symbol's content +// in "P". +// The main loop walks through the list of relocations attached to "s" +// and resolves them where applicable. Relocations are often +// architecture-specific, requiring calls into the 'archreloc' and/or +// 'archrelocvariant' functions for the architecture. When external +// linking is in effect, it may not be possible to completely resolve +// the address/offset for a symbol, in which case the goal is to lay +// the groundwork for turning a given relocation into an external reloc +// (to be applied by the external linker). For more on how relocations +// work in general, see +// +// "Linkers and Loaders", by John R. Levine (Morgan Kaufmann, 1999), ch. 7 +// +// This is a performance-critical function for the linker; be careful +// to avoid introducing unnecessary allocations in the main loop. +func (st *relocSymState) relocsym(s loader.Sym, P []byte) { + ldr := st.ldr + relocs := ldr.Relocs(s) + if relocs.Count() == 0 { + return + } + target := st.target + syms := st.syms + nExtReloc := 0 // number of external relocations + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + off := r.Off() + siz := int32(r.Siz()) + rs := r.Sym() + rt := r.Type() + weak := r.Weak() + if off < 0 || off+siz > int32(len(P)) { + rname := "" + if rs != 0 { + rname = ldr.SymName(rs) + } + st.err.Errorf(s, "invalid relocation %s: %d+%d not in [%d,%d)", rname, off, siz, 0, len(P)) + continue + } + if siz == 0 { // informational relocation - no work to do + continue + } + + var rst sym.SymKind + if rs != 0 { + rst = ldr.SymType(rs) + } + + if rs != 0 && (rst == sym.Sxxx || rst == sym.SXREF) { + // When putting the runtime but not main into a shared library + // these symbols are undefined and that's OK. + if target.IsShared() || target.IsPlugin() { + if ldr.SymName(rs) == "main.main" || (!target.IsPlugin() && ldr.SymName(rs) == "main..inittask") { + sb := ldr.MakeSymbolUpdater(rs) + sb.SetType(sym.SDYNIMPORT) + } else if strings.HasPrefix(ldr.SymName(rs), "go:info.") { + // Skip go.info symbols. They are only needed to communicate + // DWARF info between the compiler and linker. + continue + } + } else if target.IsPPC64() && ldr.SymName(rs) == ".TOC." { + // TOC symbol doesn't have a type but we do assign a value + // (see the address pass) and we can resolve it. + // TODO: give it a type. + } else { + st.err.errorUnresolved(ldr, s, rs) + continue + } + } + + if rt >= objabi.ElfRelocOffset { + continue + } + + // We need to be able to reference dynimport symbols when linking against + // shared libraries, and AIX, Darwin, OpenBSD and Solaris always need it. + if !target.IsAIX() && !target.IsDarwin() && !target.IsSolaris() && !target.IsOpenbsd() && rs != 0 && rst == sym.SDYNIMPORT && !target.IsDynlinkingGo() && !ldr.AttrSubSymbol(rs) { + if !(target.IsPPC64() && target.IsExternal() && ldr.SymName(rs) == ".TOC.") { + st.err.Errorf(s, "unhandled relocation for %s (type %d (%s) rtype %d (%s))", ldr.SymName(rs), rst, rst, rt, sym.RelocName(target.Arch, rt)) + } + } + if rs != 0 && rst != sym.STLSBSS && !weak && rt != objabi.R_METHODOFF && !ldr.AttrReachable(rs) { + st.err.Errorf(s, "unreachable sym in relocation: %s", ldr.SymName(rs)) + } + + var rv sym.RelocVariant + if target.IsPPC64() || target.IsS390X() { + rv = ldr.RelocVariant(s, ri) + } + + // TODO(mundaym): remove this special case - see issue 14218. + if target.IsS390X() { + switch rt { + case objabi.R_PCRELDBL: + rt = objabi.R_PCREL + rv = sym.RV_390_DBL + case objabi.R_CALL: + rv = sym.RV_390_DBL + } + } + + var o int64 + switch rt { + default: + switch siz { + default: + st.err.Errorf(s, "bad reloc size %#x for %s", uint32(siz), ldr.SymName(rs)) + case 1: + o = int64(P[off]) + case 2: + o = int64(target.Arch.ByteOrder.Uint16(P[off:])) + case 4: + o = int64(target.Arch.ByteOrder.Uint32(P[off:])) + case 8: + o = int64(target.Arch.ByteOrder.Uint64(P[off:])) + } + out, n, ok := thearch.Archreloc(target, ldr, syms, r, s, o) + if target.IsExternal() { + nExtReloc += n + } + if ok { + o = out + } else { + st.err.Errorf(s, "unknown reloc to %v: %d (%s)", ldr.SymName(rs), rt, sym.RelocName(target.Arch, rt)) + } + case objabi.R_TLS_LE: + if target.IsExternal() && target.IsElf() { + nExtReloc++ + o = 0 + if !target.IsAMD64() { + o = r.Add() + } + break + } + + if target.IsElf() && target.IsARM() { + // On ELF ARM, the thread pointer is 8 bytes before + // the start of the thread-local data block, so add 8 + // to the actual TLS offset (r->sym->value). + // This 8 seems to be a fundamental constant of + // ELF on ARM (or maybe Glibc on ARM); it is not + // related to the fact that our own TLS storage happens + // to take up 8 bytes. + o = 8 + ldr.SymValue(rs) + } else if target.IsElf() || target.IsPlan9() || target.IsDarwin() { + o = int64(syms.Tlsoffset) + r.Add() + } else if target.IsWindows() { + o = r.Add() + } else { + log.Fatalf("unexpected R_TLS_LE relocation for %v", target.HeadType) + } + case objabi.R_TLS_IE: + if target.IsExternal() && target.IsElf() { + nExtReloc++ + o = 0 + if !target.IsAMD64() { + o = r.Add() + } + if target.Is386() { + nExtReloc++ // need two ELF relocations on 386, see ../x86/asm.go:elfreloc1 + } + break + } + if target.IsPIE() && target.IsElf() { + // We are linking the final executable, so we + // can optimize any TLS IE relocation to LE. + if thearch.TLSIEtoLE == nil { + log.Fatalf("internal linking of TLS IE not supported on %v", target.Arch.Family) + } + thearch.TLSIEtoLE(P, int(off), int(siz)) + o = int64(syms.Tlsoffset) + } else { + log.Fatalf("cannot handle R_TLS_IE (sym %s) when linking internally", ldr.SymName(s)) + } + case objabi.R_ADDR, objabi.R_PEIMAGEOFF: + if weak && !ldr.AttrReachable(rs) { + // Redirect it to runtime.unreachableMethod, which will throw if called. + rs = syms.unreachableMethod + } + if target.IsExternal() { + nExtReloc++ + + // set up addend for eventual relocation via outer symbol. + rs := rs + rs, off := FoldSubSymbolOffset(ldr, rs) + xadd := r.Add() + off + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && rst != sym.SUNDEFEXT && ldr.SymSect(rs) == nil { + st.err.Errorf(s, "missing section for relocation target %s", ldr.SymName(rs)) + } + + o = xadd + if target.IsElf() { + if target.IsAMD64() { + o = 0 + } + } else if target.IsDarwin() { + if ldr.SymType(s).IsDWARF() { + // We generally use symbol-targeted relocations. + // DWARF tools seem to only handle section-targeted relocations, + // so generate section-targeted relocations in DWARF sections. + // See also machoreloc1. + o += ldr.SymValue(rs) + } + } else if target.IsWindows() { + // nothing to do + } else if target.IsAIX() { + o = ldr.SymValue(rs) + xadd + } else { + st.err.Errorf(s, "unhandled pcrel relocation to %s on %v", ldr.SymName(rs), target.HeadType) + } + + break + } + + // On AIX, a second relocation must be done by the loader, + // as section addresses can change once loaded. + // The "default" symbol address is still needed by the loader so + // the current relocation can't be skipped. + if target.IsAIX() && rst != sym.SDYNIMPORT { + // It's not possible to make a loader relocation in a + // symbol which is not inside .data section. + // FIXME: It should be forbidden to have R_ADDR from a + // symbol which isn't in .data. However, as .text has the + // same address once loaded, this is possible. + if ldr.SymSect(s).Seg == &Segdata { + Xcoffadddynrel(target, ldr, syms, s, r, ri) + } + } + + o = ldr.SymValue(rs) + r.Add() + if rt == objabi.R_PEIMAGEOFF { + // The R_PEIMAGEOFF offset is a RVA, so subtract + // the base address for the executable. + o -= PEBASE + } + + // On amd64, 4-byte offsets will be sign-extended, so it is impossible to + // access more than 2GB of static data; fail at link time is better than + // fail at runtime. See https://golang.org/issue/7980. + // Instead of special casing only amd64, we treat this as an error on all + // 64-bit architectures so as to be future-proof. + if int32(o) < 0 && target.Arch.PtrSize > 4 && siz == 4 { + st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x (%#x + %#x)", ldr.SymName(rs), uint64(o), ldr.SymValue(rs), r.Add()) + errorexit() + } + case objabi.R_DWARFSECREF: + if ldr.SymSect(rs) == nil { + st.err.Errorf(s, "missing DWARF section for relocation target %s", ldr.SymName(rs)) + } + + if target.IsExternal() { + // On most platforms, the external linker needs to adjust DWARF references + // as it combines DWARF sections. However, on Darwin, dsymutil does the + // DWARF linking, and it understands how to follow section offsets. + // Leaving in the relocation records confuses it (see + // https://golang.org/issue/22068) so drop them for Darwin. + if !target.IsDarwin() { + nExtReloc++ + } + + xadd := r.Add() + ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr) + + o = xadd + if target.IsElf() && target.IsAMD64() { + o = 0 + } + break + } + o = ldr.SymValue(rs) + r.Add() - int64(ldr.SymSect(rs).Vaddr) + case objabi.R_METHODOFF: + if !ldr.AttrReachable(rs) { + // Set it to a sentinel value. The runtime knows this is not pointing to + // anything valid. + o = -1 + break + } + fallthrough + case objabi.R_ADDROFF: + if weak && !ldr.AttrReachable(rs) { + continue + } + sect := ldr.SymSect(rs) + if sect == nil { + if rst == sym.SDYNIMPORT { + st.err.Errorf(s, "cannot target DYNIMPORT sym in section-relative reloc: %s", ldr.SymName(rs)) + } else if rst == sym.SUNDEFEXT { + st.err.Errorf(s, "undefined symbol in relocation: %s", ldr.SymName(rs)) + } else { + st.err.Errorf(s, "missing section for relocation target %s", ldr.SymName(rs)) + } + continue + } + + // The method offset tables using this relocation expect the offset to be relative + // to the start of the first text section, even if there are multiple. + if sect.Name == ".text" { + o = ldr.SymValue(rs) - int64(Segtext.Sections[0].Vaddr) + r.Add() + } else { + o = ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr) + r.Add() + } + + case objabi.R_ADDRCUOFF: + // debug_range and debug_loc elements use this relocation type to get an + // offset from the start of the compile unit. + o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(loader.Sym(ldr.SymUnit(rs).Textp[0])) + + // r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call. + case objabi.R_GOTPCREL: + if target.IsDynlinkingGo() && target.IsDarwin() && rs != 0 { + nExtReloc++ + o = r.Add() + break + } + if target.Is386() && target.IsExternal() && target.IsELF { + nExtReloc++ // need two ELF relocations on 386, see ../x86/asm.go:elfreloc1 + } + fallthrough + case objabi.R_CALL, objabi.R_PCREL: + if target.IsExternal() && rs != 0 && rst == sym.SUNDEFEXT { + // pass through to the external linker. + nExtReloc++ + o = 0 + break + } + if target.IsExternal() && rs != 0 && (ldr.SymSect(rs) != ldr.SymSect(s) || rt == objabi.R_GOTPCREL) { + nExtReloc++ + + // set up addend for eventual relocation via outer symbol. + rs := rs + rs, off := FoldSubSymbolOffset(ldr, rs) + xadd := r.Add() + off - int64(siz) // relative to address after the relocated chunk + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { + st.err.Errorf(s, "missing section for relocation target %s", ldr.SymName(rs)) + } + + o = xadd + if target.IsElf() { + if target.IsAMD64() { + o = 0 + } + } else if target.IsDarwin() { + if rt == objabi.R_CALL { + if target.IsExternal() && rst == sym.SDYNIMPORT { + if target.IsAMD64() { + // AMD64 dynamic relocations are relative to the end of the relocation. + o += int64(siz) + } + } else { + if rst != sym.SHOSTOBJ { + o += int64(uint64(ldr.SymValue(rs)) - ldr.SymSect(rs).Vaddr) + } + o -= int64(off) // relative to section offset, not symbol + } + } else { + o += int64(siz) + } + } else if target.IsWindows() && target.IsAMD64() { // only amd64 needs PCREL + // PE/COFF's PC32 relocation uses the address after the relocated + // bytes as the base. Compensate by skewing the addend. + o += int64(siz) + } else { + st.err.Errorf(s, "unhandled pcrel relocation to %s on %v", ldr.SymName(rs), target.HeadType) + } + + break + } + + o = 0 + if rs != 0 { + o = ldr.SymValue(rs) + } + + o += r.Add() - (ldr.SymValue(s) + int64(off) + int64(siz)) + case objabi.R_SIZE: + o = ldr.SymSize(rs) + r.Add() + + case objabi.R_XCOFFREF: + if !target.IsAIX() { + st.err.Errorf(s, "find XCOFF R_REF on non-XCOFF files") + } + if !target.IsExternal() { + st.err.Errorf(s, "find XCOFF R_REF with internal linking") + } + nExtReloc++ + continue + + case objabi.R_DWARFFILEREF: + // We don't renumber files in dwarf.go:writelines anymore. + continue + + case objabi.R_CONST: + o = r.Add() + + case objabi.R_GOTOFF: + o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(syms.GOT) + } + + if target.IsPPC64() || target.IsS390X() { + if rv != sym.RV_NONE { + o = thearch.Archrelocvariant(target, ldr, r, rv, s, o, P) + } + } + + switch siz { + default: + st.err.Errorf(s, "bad reloc size %#x for %s", uint32(siz), ldr.SymName(rs)) + case 1: + P[off] = byte(int8(o)) + case 2: + if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int16(o)) { + st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o) + } else if o != int64(int16(o)) && o != int64(uint16(o)) { + st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o)) + } + target.Arch.ByteOrder.PutUint16(P[off:], uint16(o)) + case 4: + if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int32(o)) { + st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o) + } else if o != int64(int32(o)) && o != int64(uint32(o)) { + st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o)) + } + target.Arch.ByteOrder.PutUint32(P[off:], uint32(o)) + case 8: + target.Arch.ByteOrder.PutUint64(P[off:], uint64(o)) + } + } + if target.IsExternal() { + // We'll stream out the external relocations in asmb2 (e.g. elfrelocsect) + // and we only need the count here. + atomic.AddUint32(&ldr.SymSect(s).Relcount, uint32(nExtReloc)) + } +} + +// Convert a Go relocation to an external relocation. +func extreloc(ctxt *Link, ldr *loader.Loader, s loader.Sym, r loader.Reloc) (loader.ExtReloc, bool) { + var rr loader.ExtReloc + target := &ctxt.Target + siz := int32(r.Siz()) + if siz == 0 { // informational relocation - no work to do + return rr, false + } + + rt := r.Type() + if rt >= objabi.ElfRelocOffset { + return rr, false + } + rr.Type = rt + rr.Size = uint8(siz) + + // TODO(mundaym): remove this special case - see issue 14218. + if target.IsS390X() { + switch rt { + case objabi.R_PCRELDBL: + rt = objabi.R_PCREL + } + } + + switch rt { + default: + return thearch.Extreloc(target, ldr, r, s) + + case objabi.R_TLS_LE, objabi.R_TLS_IE: + if target.IsElf() { + rs := r.Sym() + rr.Xsym = rs + if rr.Xsym == 0 { + rr.Xsym = ctxt.Tlsg + } + rr.Xadd = r.Add() + break + } + return rr, false + + case objabi.R_ADDR, objabi.R_PEIMAGEOFF: + // set up addend for eventual relocation via outer symbol. + rs := r.Sym() + if r.Weak() && !ldr.AttrReachable(rs) { + rs = ctxt.ArchSyms.unreachableMethod + } + rs, off := FoldSubSymbolOffset(ldr, rs) + rr.Xadd = r.Add() + off + rr.Xsym = rs + + case objabi.R_DWARFSECREF: + // On most platforms, the external linker needs to adjust DWARF references + // as it combines DWARF sections. However, on Darwin, dsymutil does the + // DWARF linking, and it understands how to follow section offsets. + // Leaving in the relocation records confuses it (see + // https://golang.org/issue/22068) so drop them for Darwin. + if target.IsDarwin() { + return rr, false + } + rs := r.Sym() + rr.Xsym = loader.Sym(ldr.SymSect(rs).Sym) + rr.Xadd = r.Add() + ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr) + + // r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call. + case objabi.R_GOTPCREL, objabi.R_CALL, objabi.R_PCREL: + rs := r.Sym() + if rt == objabi.R_GOTPCREL && target.IsDynlinkingGo() && target.IsDarwin() && rs != 0 { + rr.Xadd = r.Add() + rr.Xadd -= int64(siz) // relative to address after the relocated chunk + rr.Xsym = rs + break + } + if rs != 0 && ldr.SymType(rs) == sym.SUNDEFEXT { + // pass through to the external linker. + rr.Xadd = 0 + if target.IsElf() { + rr.Xadd -= int64(siz) + } + rr.Xsym = rs + break + } + if rs != 0 && (ldr.SymSect(rs) != ldr.SymSect(s) || rt == objabi.R_GOTPCREL) { + // set up addend for eventual relocation via outer symbol. + rs := rs + rs, off := FoldSubSymbolOffset(ldr, rs) + rr.Xadd = r.Add() + off + rr.Xadd -= int64(siz) // relative to address after the relocated chunk + rr.Xsym = rs + break + } + return rr, false + + case objabi.R_XCOFFREF: + return ExtrelocSimple(ldr, r), true + + // These reloc types don't need external relocations. + case objabi.R_ADDROFF, objabi.R_METHODOFF, objabi.R_ADDRCUOFF, + objabi.R_SIZE, objabi.R_CONST, objabi.R_GOTOFF: + return rr, false + } + return rr, true +} + +// ExtrelocSimple creates a simple external relocation from r, with the same +// symbol and addend. +func ExtrelocSimple(ldr *loader.Loader, r loader.Reloc) loader.ExtReloc { + var rr loader.ExtReloc + rs := r.Sym() + rr.Xsym = rs + rr.Xadd = r.Add() + rr.Type = r.Type() + rr.Size = r.Siz() + return rr +} + +// ExtrelocViaOuterSym creates an external relocation from r targeting the +// outer symbol and folding the subsymbol's offset into the addend. +func ExtrelocViaOuterSym(ldr *loader.Loader, r loader.Reloc, s loader.Sym) loader.ExtReloc { + // set up addend for eventual relocation via outer symbol. + var rr loader.ExtReloc + rs := r.Sym() + rs, off := FoldSubSymbolOffset(ldr, rs) + rr.Xadd = r.Add() + off + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && rst != sym.SUNDEFEXT && ldr.SymSect(rs) == nil { + ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) + } + rr.Xsym = rs + rr.Type = r.Type() + rr.Size = r.Siz() + return rr +} + +// relocSymState hold state information needed when making a series of +// successive calls to relocsym(). The items here are invariant +// (meaning that they are set up once initially and then don't change +// during the execution of relocsym), with the exception of a slice +// used to facilitate batch allocation of external relocations. Calls +// to relocsym happen in parallel; the assumption is that each +// parallel thread will have its own state object. +type relocSymState struct { + target *Target + ldr *loader.Loader + err *ErrorReporter + syms *ArchSyms +} + +// makeRelocSymState creates a relocSymState container object to +// pass to relocsym(). If relocsym() calls happen in parallel, +// each parallel thread should have its own state object. +func (ctxt *Link) makeRelocSymState() *relocSymState { + return &relocSymState{ + target: &ctxt.Target, + ldr: ctxt.loader, + err: &ctxt.ErrorReporter, + syms: &ctxt.ArchSyms, + } +} + +// windynrelocsym examines a text symbol 's' and looks for relocations +// from it that correspond to references to symbols defined in DLLs, +// then fixes up those relocations as needed. A reference to a symbol +// XYZ from some DLL will fall into one of two categories: an indirect +// ref via "__imp_XYZ", or a direct ref to "XYZ". Here's an example of +// an indirect ref (this is an excerpt from objdump -ldr): +// +// 1c1: 48 89 c6 movq %rax, %rsi +// 1c4: ff 15 00 00 00 00 callq *(%rip) +// 00000000000001c6: IMAGE_REL_AMD64_REL32 __imp__errno +// +// In the assembly above, the code loads up the value of __imp_errno +// and then does an indirect call to that value. +// +// Here is what a direct reference might look like: +// +// 137: e9 20 06 00 00 jmp 0x75c +// 13c: e8 00 00 00 00 callq 0x141 +// 000000000000013d: IMAGE_REL_AMD64_REL32 _errno +// +// The assembly below dispenses with the import symbol and just makes +// a direct call to _errno. +// +// The code below handles indirect refs by redirecting the target of +// the relocation from "__imp_XYZ" to "XYZ" (since the latter symbol +// is what the Windows loader is expected to resolve). For direct refs +// the call is redirected to a stub, where the stub first loads the +// symbol and then direct an indirect call to that value. +// +// Note that for a given symbol (as above) it is perfectly legal to +// have both direct and indirect references. +func windynrelocsym(ctxt *Link, rel *loader.SymbolBuilder, s loader.Sym) error { + var su *loader.SymbolBuilder + relocs := ctxt.loader.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.IsMarker() { + continue // skip marker relocations + } + targ := r.Sym() + if targ == 0 { + continue + } + if !ctxt.loader.AttrReachable(targ) { + if r.Weak() { + continue + } + return fmt.Errorf("dynamic relocation to unreachable symbol %s", + ctxt.loader.SymName(targ)) + } + tgot := ctxt.loader.SymGot(targ) + if tgot == loadpe.RedirectToDynImportGotToken { + + // Consistency check: name should be __imp_X + sname := ctxt.loader.SymName(targ) + if !strings.HasPrefix(sname, "__imp_") { + return fmt.Errorf("internal error in windynrelocsym: redirect GOT token applied to non-import symbol %s", sname) + } + + // Locate underlying symbol (which originally had type + // SDYNIMPORT but has since been retyped to SWINDOWS). + ds, err := loadpe.LookupBaseFromImport(targ, ctxt.loader, ctxt.Arch) + if err != nil { + return err + } + dstyp := ctxt.loader.SymType(ds) + if dstyp != sym.SWINDOWS { + return fmt.Errorf("internal error in windynrelocsym: underlying sym for %q has wrong type %s", sname, dstyp.String()) + } + + // Redirect relocation to the dynimport. + r.SetSym(ds) + continue + } + + tplt := ctxt.loader.SymPlt(targ) + if tplt == loadpe.CreateImportStubPltToken { + + // Consistency check: don't want to see both PLT and GOT tokens. + if tgot != -1 { + return fmt.Errorf("internal error in windynrelocsym: invalid GOT setting %d for reloc to %s", tgot, ctxt.loader.SymName(targ)) + } + + // make dynimport JMP table for PE object files. + tplt := int32(rel.Size()) + ctxt.loader.SetPlt(targ, tplt) + + if su == nil { + su = ctxt.loader.MakeSymbolUpdater(s) + } + r.SetSym(rel.Sym()) + r.SetAdd(int64(tplt)) + + // jmp *addr + switch ctxt.Arch.Family { + default: + return fmt.Errorf("internal error in windynrelocsym: unsupported arch %v", ctxt.Arch.Family) + case sys.I386: + rel.AddUint8(0xff) + rel.AddUint8(0x25) + rel.AddAddrPlus(ctxt.Arch, targ, 0) + rel.AddUint8(0x90) + rel.AddUint8(0x90) + case sys.AMD64: + rel.AddUint8(0xff) + rel.AddUint8(0x24) + rel.AddUint8(0x25) + rel.AddAddrPlus4(ctxt.Arch, targ, 0) + rel.AddUint8(0x90) + } + } else if tplt >= 0 { + if su == nil { + su = ctxt.loader.MakeSymbolUpdater(s) + } + r.SetSym(rel.Sym()) + r.SetAdd(int64(tplt)) + } + } + return nil +} + +// windynrelocsyms generates jump table to C library functions that will be +// added later. windynrelocsyms writes the table into .rel symbol. +func (ctxt *Link) windynrelocsyms() { + if !(ctxt.IsWindows() && iscgo && ctxt.IsInternal()) { + return + } + + rel := ctxt.loader.CreateSymForUpdate(".rel", 0) + rel.SetType(sym.STEXT) + + for _, s := range ctxt.Textp { + if err := windynrelocsym(ctxt, rel, s); err != nil { + ctxt.Errorf(s, "%v", err) + } + } + + ctxt.Textp = append(ctxt.Textp, rel.Sym()) +} + +func dynrelocsym(ctxt *Link, s loader.Sym) { + target := &ctxt.Target + ldr := ctxt.loader + syms := &ctxt.ArchSyms + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.IsMarker() { + continue // skip marker relocations + } + rSym := r.Sym() + if r.Weak() && !ldr.AttrReachable(rSym) { + continue + } + if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal { + // It's expected that some relocations will be done + // later by relocsym (R_TLS_LE, R_ADDROFF), so + // don't worry if Adddynrel returns false. + thearch.Adddynrel(target, ldr, syms, s, r, ri) + continue + } + + if rSym != 0 && ldr.SymType(rSym) == sym.SDYNIMPORT || r.Type() >= objabi.ElfRelocOffset { + if rSym != 0 && !ldr.AttrReachable(rSym) { + ctxt.Errorf(s, "dynamic relocation to unreachable symbol %s", ldr.SymName(rSym)) + } + if !thearch.Adddynrel(target, ldr, syms, s, r, ri) { + ctxt.Errorf(s, "unsupported dynamic relocation for symbol %s (type=%d (%s) stype=%d (%s))", ldr.SymName(rSym), r.Type(), sym.RelocName(ctxt.Arch, r.Type()), ldr.SymType(rSym), ldr.SymType(rSym)) + } + } + } +} + +func (state *dodataState) dynreloc(ctxt *Link) { + if ctxt.HeadType == objabi.Hwindows { + return + } + // -d suppresses dynamic loader format, so we may as well not + // compute these sections or mark their symbols as reachable. + if *FlagD { + return + } + + for _, s := range ctxt.Textp { + dynrelocsym(ctxt, s) + } + for _, syms := range state.data { + for _, s := range syms { + dynrelocsym(ctxt, s) + } + } + if ctxt.IsELF { + elfdynhash(ctxt) + } +} + +func CodeblkPad(ctxt *Link, out *OutBuf, addr int64, size int64, pad []byte) { + writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, ctxt.Textp, addr, size, pad) +} + +const blockSize = 1 << 20 // 1MB chunks written at a time. + +// writeBlocks writes a specified chunk of symbols to the output buffer. It +// breaks the write up into ≥blockSize chunks to write them out, and schedules +// as many goroutines as necessary to accomplish this task. This call then +// blocks, waiting on the writes to complete. Note that we use the sem parameter +// to limit the number of concurrent writes taking place. +func writeBlocks(ctxt *Link, out *OutBuf, sem chan int, ldr *loader.Loader, syms []loader.Sym, addr, size int64, pad []byte) { + for i, s := range syms { + if ldr.SymValue(s) >= addr && !ldr.AttrSubSymbol(s) { + syms = syms[i:] + break + } + } + + var wg sync.WaitGroup + max, lastAddr, written := int64(blockSize), addr+size, int64(0) + for addr < lastAddr { + // Find the last symbol we'd write. + idx := -1 + for i, s := range syms { + if ldr.AttrSubSymbol(s) { + continue + } + + // If the next symbol's size would put us out of bounds on the total length, + // stop looking. + end := ldr.SymValue(s) + ldr.SymSize(s) + if end > lastAddr { + break + } + + // We're gonna write this symbol. + idx = i + + // If we cross over the max size, we've got enough symbols. + if end > addr+max { + break + } + } + + // If we didn't find any symbols to write, we're done here. + if idx < 0 { + break + } + + // Compute the length to write, including padding. + // We need to write to the end address (lastAddr), or the next symbol's + // start address, whichever comes first. If there is no more symbols, + // just write to lastAddr. This ensures we don't leave holes between the + // blocks or at the end. + length := int64(0) + if idx+1 < len(syms) { + // Find the next top-level symbol. + // Skip over sub symbols so we won't split a container symbol + // into two blocks. + next := syms[idx+1] + for ldr.AttrSubSymbol(next) { + idx++ + next = syms[idx+1] + } + length = ldr.SymValue(next) - addr + } + if length == 0 || length > lastAddr-addr { + length = lastAddr - addr + } + + // Start the block output operator. + if o, err := out.View(uint64(out.Offset() + written)); err == nil { + sem <- 1 + wg.Add(1) + go func(o *OutBuf, ldr *loader.Loader, syms []loader.Sym, addr, size int64, pad []byte) { + writeBlock(ctxt, o, ldr, syms, addr, size, pad) + wg.Done() + <-sem + }(o, ldr, syms, addr, length, pad) + } else { // output not mmaped, don't parallelize. + writeBlock(ctxt, out, ldr, syms, addr, length, pad) + } + + // Prepare for the next loop. + if idx != -1 { + syms = syms[idx+1:] + } + written += length + addr += length + } + wg.Wait() +} + +func writeBlock(ctxt *Link, out *OutBuf, ldr *loader.Loader, syms []loader.Sym, addr, size int64, pad []byte) { + + st := ctxt.makeRelocSymState() + + // This doesn't distinguish the memory size from the file + // size, and it lays out the file based on Symbol.Value, which + // is the virtual address. DWARF compression changes file sizes, + // so dwarfcompress will fix this up later if necessary. + eaddr := addr + size + for _, s := range syms { + if ldr.AttrSubSymbol(s) { + continue + } + val := ldr.SymValue(s) + if val >= eaddr { + break + } + if val < addr { + ldr.Errorf(s, "phase error: addr=%#x but val=%#x sym=%s type=%v sect=%v sect.addr=%#x", addr, val, ldr.SymName(s), ldr.SymType(s), ldr.SymSect(s).Name, ldr.SymSect(s).Vaddr) + errorexit() + } + if addr < val { + out.WriteStringPad("", int(val-addr), pad) + addr = val + } + P := out.WriteSym(ldr, s) + st.relocsym(s, P) + if ldr.IsGeneratedSym(s) { + f := ctxt.generatorSyms[s] + f(ctxt, s) + } + addr += int64(len(P)) + siz := ldr.SymSize(s) + if addr < val+siz { + out.WriteStringPad("", int(val+siz-addr), pad) + addr = val + siz + } + if addr != val+siz { + ldr.Errorf(s, "phase error: addr=%#x value+size=%#x", addr, val+siz) + errorexit() + } + if val+siz >= eaddr { + break + } + } + + if addr < eaddr { + out.WriteStringPad("", int(eaddr-addr), pad) + } +} + +type writeFn func(*Link, *OutBuf, int64, int64) + +// writeParallel handles scheduling parallel execution of data write functions. +func writeParallel(wg *sync.WaitGroup, fn writeFn, ctxt *Link, seek, vaddr, length uint64) { + if out, err := ctxt.Out.View(seek); err != nil { + ctxt.Out.SeekSet(int64(seek)) + fn(ctxt, ctxt.Out, int64(vaddr), int64(length)) + } else { + wg.Add(1) + go func() { + defer wg.Done() + fn(ctxt, out, int64(vaddr), int64(length)) + }() + } +} + +func datblk(ctxt *Link, out *OutBuf, addr, size int64) { + writeDatblkToOutBuf(ctxt, out, addr, size) +} + +// Used only on Wasm for now. +func DatblkBytes(ctxt *Link, addr int64, size int64) []byte { + buf := make([]byte, size) + out := &OutBuf{heap: buf} + writeDatblkToOutBuf(ctxt, out, addr, size) + return buf +} + +func writeDatblkToOutBuf(ctxt *Link, out *OutBuf, addr int64, size int64) { + writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, ctxt.datap, addr, size, zeros[:]) +} + +func dwarfblk(ctxt *Link, out *OutBuf, addr int64, size int64) { + // Concatenate the section symbol lists into a single list to pass + // to writeBlocks. + // + // NB: ideally we would do a separate writeBlocks call for each + // section, but this would run the risk of undoing any file offset + // adjustments made during layout. + n := 0 + for i := range dwarfp { + n += len(dwarfp[i].syms) + } + syms := make([]loader.Sym, 0, n) + for i := range dwarfp { + syms = append(syms, dwarfp[i].syms...) + } + writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, syms, addr, size, zeros[:]) +} + +func pdatablk(ctxt *Link, out *OutBuf, addr int64, size int64) { + writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, sehp.pdata, addr, size, zeros[:]) +} + +func xdatablk(ctxt *Link, out *OutBuf, addr int64, size int64) { + writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, sehp.xdata, addr, size, zeros[:]) +} + +var covCounterDataStartOff, covCounterDataLen uint64 + +var zeros [512]byte + +var ( + strdata = make(map[string]string) + strnames []string +) + +func addstrdata1(ctxt *Link, arg string) { + eq := strings.Index(arg, "=") + dot := strings.LastIndex(arg[:eq+1], ".") + if eq < 0 || dot < 0 { + Exitf("-X flag requires argument of the form importpath.name=value") + } + pkg := arg[:dot] + if ctxt.BuildMode == BuildModePlugin && pkg == "main" { + pkg = *flagPluginPath + } + pkg = objabi.PathToPrefix(pkg) + name := pkg + arg[dot:eq] + value := arg[eq+1:] + if _, ok := strdata[name]; !ok { + strnames = append(strnames, name) + } + strdata[name] = value +} + +// addstrdata sets the initial value of the string variable name to value. +func addstrdata(arch *sys.Arch, l *loader.Loader, name, value string) { + s := l.Lookup(name, 0) + if s == 0 { + return + } + if goType := l.SymGoType(s); goType == 0 { + return + } else if typeName := l.SymName(goType); typeName != "type:string" { + Errorf(nil, "%s: cannot set with -X: not a var of type string (%s)", name, typeName) + return + } + if !l.AttrReachable(s) { + return // don't bother setting unreachable variable + } + bld := l.MakeSymbolUpdater(s) + if bld.Type() == sym.SBSS { + bld.SetType(sym.SDATA) + } + + p := fmt.Sprintf("%s.str", name) + sbld := l.CreateSymForUpdate(p, 0) + sbld.Addstring(value) + sbld.SetType(sym.SRODATA) + + // Don't reset the variable's size. String variable usually has size of + // 2*PtrSize, but in ASAN build it can be larger due to red zone. + // (See issue 56175.) + bld.SetData(make([]byte, arch.PtrSize*2)) + bld.SetReadOnly(false) + bld.ResetRelocs() + bld.SetAddrPlus(arch, 0, sbld.Sym(), 0) + bld.SetUint(arch, int64(arch.PtrSize), uint64(len(value))) +} + +func (ctxt *Link) dostrdata() { + for _, name := range strnames { + addstrdata(ctxt.Arch, ctxt.loader, name, strdata[name]) + } +} + +// addgostring adds str, as a Go string value, to s. symname is the name of the +// symbol used to define the string data and must be unique per linked object. +func addgostring(ctxt *Link, ldr *loader.Loader, s *loader.SymbolBuilder, symname, str string) { + sdata := ldr.CreateSymForUpdate(symname, 0) + if sdata.Type() != sym.Sxxx { + ctxt.Errorf(s.Sym(), "duplicate symname in addgostring: %s", symname) + } + sdata.SetLocal(true) + sdata.SetType(sym.SRODATA) + sdata.SetSize(int64(len(str))) + sdata.SetData([]byte(str)) + s.AddAddr(ctxt.Arch, sdata.Sym()) + s.AddUint(ctxt.Arch, uint64(len(str))) +} + +func addinitarrdata(ctxt *Link, ldr *loader.Loader, s loader.Sym) { + p := ldr.SymName(s) + ".ptr" + sp := ldr.CreateSymForUpdate(p, 0) + sp.SetType(sym.SINITARR) + sp.SetSize(0) + sp.SetDuplicateOK(true) + sp.AddAddr(ctxt.Arch, s) +} + +// symalign returns the required alignment for the given symbol s. +func symalign(ldr *loader.Loader, s loader.Sym) int32 { + min := int32(thearch.Minalign) + align := ldr.SymAlign(s) + if align >= min { + return align + } else if align != 0 { + return min + } + align = int32(thearch.Maxalign) + ssz := ldr.SymSize(s) + for int64(align) > ssz && align > min { + align >>= 1 + } + ldr.SetSymAlign(s, align) + return align +} + +func aligndatsize(state *dodataState, datsize int64, s loader.Sym) int64 { + return Rnd(datsize, int64(symalign(state.ctxt.loader, s))) +} + +const debugGCProg = false + +type GCProg struct { + ctxt *Link + sym *loader.SymbolBuilder + w gcprog.Writer +} + +func (p *GCProg) Init(ctxt *Link, name string) { + p.ctxt = ctxt + p.sym = ctxt.loader.CreateSymForUpdate(name, 0) + p.w.Init(p.writeByte()) + if debugGCProg { + fmt.Fprintf(os.Stderr, "ld: start GCProg %s\n", name) + p.w.Debug(os.Stderr) + } +} + +func (p *GCProg) writeByte() func(x byte) { + return func(x byte) { + p.sym.AddUint8(x) + } +} + +func (p *GCProg) End(size int64) { + p.w.ZeroUntil(size / int64(p.ctxt.Arch.PtrSize)) + p.w.End() + if debugGCProg { + fmt.Fprintf(os.Stderr, "ld: end GCProg\n") + } +} + +func (p *GCProg) AddSym(s loader.Sym) { + ldr := p.ctxt.loader + typ := ldr.SymGoType(s) + + // Things without pointers should be in sym.SNOPTRDATA or sym.SNOPTRBSS; + // everything we see should have pointers and should therefore have a type. + if typ == 0 { + switch ldr.SymName(s) { + case "runtime.data", "runtime.edata", "runtime.bss", "runtime.ebss": + // Ignore special symbols that are sometimes laid out + // as real symbols. See comment about dyld on darwin in + // the address function. + return + } + p.ctxt.Errorf(p.sym.Sym(), "missing Go type information for global symbol %s: size %d", ldr.SymName(s), ldr.SymSize(s)) + return + } + + ptrsize := int64(p.ctxt.Arch.PtrSize) + typData := ldr.Data(typ) + nptr := decodetypePtrdata(p.ctxt.Arch, typData) / ptrsize + + if debugGCProg { + fmt.Fprintf(os.Stderr, "gcprog sym: %s at %d (ptr=%d+%d)\n", ldr.SymName(s), ldr.SymValue(s), ldr.SymValue(s)/ptrsize, nptr) + } + + sval := ldr.SymValue(s) + if decodetypeUsegcprog(p.ctxt.Arch, typData) == 0 { + // Copy pointers from mask into program. + mask := decodetypeGcmask(p.ctxt, typ) + for i := int64(0); i < nptr; i++ { + if (mask[i/8]>>uint(i%8))&1 != 0 { + p.w.Ptr(sval/ptrsize + i) + } + } + return + } + + // Copy program. + prog := decodetypeGcprog(p.ctxt, typ) + p.w.ZeroUntil(sval / ptrsize) + p.w.Append(prog[4:], nptr) +} + +// cutoff is the maximum data section size permitted by the linker +// (see issue #9862). +const cutoff = 2e9 // 2 GB (or so; looks better in errors than 2^31) + +// check accumulated size of data sections +func (state *dodataState) checkdatsize(symn sym.SymKind) { + if state.datsize > cutoff { + Errorf(nil, "too much data, last section %v (%d, over %v bytes)", symn, state.datsize, cutoff) + } +} + +func checkSectSize(sect *sym.Section) { + // TODO: consider using 4 GB size limit for DWARF sections, and + // make sure we generate unsigned offset in relocations and check + // for overflow. + if sect.Length > cutoff { + Errorf(nil, "too much data in section %s (%d, over %v bytes)", sect.Name, sect.Length, cutoff) + } +} + +// fixZeroSizedSymbols gives a few special symbols with zero size some space. +func fixZeroSizedSymbols(ctxt *Link) { + // The values in moduledata are filled out by relocations + // pointing to the addresses of these special symbols. + // Typically these symbols have no size and are not laid + // out with their matching section. + // + // However on darwin, dyld will find the special symbol + // in the first loaded module, even though it is local. + // + // (An hypothesis, formed without looking in the dyld sources: + // these special symbols have no size, so their address + // matches a real symbol. The dynamic linker assumes we + // want the normal symbol with the same address and finds + // it in the other module.) + // + // To work around this we lay out the symbls whose + // addresses are vital for multi-module programs to work + // as normal symbols, and give them a little size. + // + // On AIX, as all DATA sections are merged together, ld might not put + // these symbols at the beginning of their respective section if there + // aren't real symbols, their alignment might not match the + // first symbol alignment. Therefore, there are explicitly put at the + // beginning of their section with the same alignment. + if !(ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin) && !(ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal) { + return + } + + ldr := ctxt.loader + bss := ldr.CreateSymForUpdate("runtime.bss", 0) + bss.SetSize(8) + ldr.SetAttrSpecial(bss.Sym(), false) + + ebss := ldr.CreateSymForUpdate("runtime.ebss", 0) + ldr.SetAttrSpecial(ebss.Sym(), false) + + data := ldr.CreateSymForUpdate("runtime.data", 0) + data.SetSize(8) + ldr.SetAttrSpecial(data.Sym(), false) + + edata := ldr.CreateSymForUpdate("runtime.edata", 0) + ldr.SetAttrSpecial(edata.Sym(), false) + + if ctxt.HeadType == objabi.Haix { + // XCOFFTOC symbols are part of .data section. + edata.SetType(sym.SXCOFFTOC) + } + + noptrbss := ldr.CreateSymForUpdate("runtime.noptrbss", 0) + noptrbss.SetSize(8) + ldr.SetAttrSpecial(noptrbss.Sym(), false) + + enoptrbss := ldr.CreateSymForUpdate("runtime.enoptrbss", 0) + ldr.SetAttrSpecial(enoptrbss.Sym(), false) + + noptrdata := ldr.CreateSymForUpdate("runtime.noptrdata", 0) + noptrdata.SetSize(8) + ldr.SetAttrSpecial(noptrdata.Sym(), false) + + enoptrdata := ldr.CreateSymForUpdate("runtime.enoptrdata", 0) + ldr.SetAttrSpecial(enoptrdata.Sym(), false) + + types := ldr.CreateSymForUpdate("runtime.types", 0) + types.SetType(sym.STYPE) + types.SetSize(8) + ldr.SetAttrSpecial(types.Sym(), false) + + etypes := ldr.CreateSymForUpdate("runtime.etypes", 0) + etypes.SetType(sym.SFUNCTAB) + ldr.SetAttrSpecial(etypes.Sym(), false) + + if ctxt.HeadType == objabi.Haix { + rodata := ldr.CreateSymForUpdate("runtime.rodata", 0) + rodata.SetType(sym.SSTRING) + rodata.SetSize(8) + ldr.SetAttrSpecial(rodata.Sym(), false) + + erodata := ldr.CreateSymForUpdate("runtime.erodata", 0) + ldr.SetAttrSpecial(erodata.Sym(), false) + } +} + +// makeRelroForSharedLib creates a section of readonly data if necessary. +func (state *dodataState) makeRelroForSharedLib(target *Link) { + if !target.UseRelro() { + return + } + + // "read only" data with relocations needs to go in its own section + // when building a shared library. We do this by boosting objects of + // type SXXX with relocations to type SXXXRELRO. + ldr := target.loader + for _, symnro := range sym.ReadOnly { + symnrelro := sym.RelROMap[symnro] + + ro := []loader.Sym{} + relro := state.data[symnrelro] + + for _, s := range state.data[symnro] { + relocs := ldr.Relocs(s) + isRelro := relocs.Count() > 0 + switch state.symType(s) { + case sym.STYPE, sym.STYPERELRO, sym.SGOFUNCRELRO: + // Symbols are not sorted yet, so it is possible + // that an Outer symbol has been changed to a + // relro Type before it reaches here. + isRelro = true + case sym.SFUNCTAB: + if ldr.SymName(s) == "runtime.etypes" { + // runtime.etypes must be at the end of + // the relro data. + isRelro = true + } + case sym.SGOFUNC: + // The only SGOFUNC symbols that contain relocations are .stkobj, + // and their relocations are of type objabi.R_ADDROFF, + // which always get resolved during linking. + isRelro = false + } + if isRelro { + state.setSymType(s, symnrelro) + if outer := ldr.OuterSym(s); outer != 0 { + state.setSymType(outer, symnrelro) + } + relro = append(relro, s) + } else { + ro = append(ro, s) + } + } + + // Check that we haven't made two symbols with the same .Outer into + // different types (because references two symbols with non-nil Outer + // become references to the outer symbol + offset it's vital that the + // symbol and the outer end up in the same section). + for _, s := range relro { + if outer := ldr.OuterSym(s); outer != 0 { + st := state.symType(s) + ost := state.symType(outer) + if st != ost { + state.ctxt.Errorf(s, "inconsistent types for symbol and its Outer %s (%v != %v)", + ldr.SymName(outer), st, ost) + } + } + } + + state.data[symnro] = ro + state.data[symnrelro] = relro + } +} + +// dodataState holds bits of state information needed by dodata() and the +// various helpers it calls. The lifetime of these items should not extend +// past the end of dodata(). +type dodataState struct { + // Link context + ctxt *Link + // Data symbols bucketed by type. + data [sym.SXREF][]loader.Sym + // Max alignment for each flavor of data symbol. + dataMaxAlign [sym.SXREF]int32 + // Overridden sym type + symGroupType []sym.SymKind + // Current data size so far. + datsize int64 +} + +// A note on symType/setSymType below: +// +// In the legacy linker, the types of symbols (notably data symbols) are +// changed during the symtab() phase so as to insure that similar symbols +// are bucketed together, then their types are changed back again during +// dodata. Symbol to section assignment also plays tricks along these lines +// in the case where a relro segment is needed. +// +// The value returned from setType() below reflects the effects of +// any overrides made by symtab and/or dodata. + +// symType returns the (possibly overridden) type of 's'. +func (state *dodataState) symType(s loader.Sym) sym.SymKind { + if int(s) < len(state.symGroupType) { + if override := state.symGroupType[s]; override != 0 { + return override + } + } + return state.ctxt.loader.SymType(s) +} + +// setSymType sets a new override type for 's'. +func (state *dodataState) setSymType(s loader.Sym, kind sym.SymKind) { + if s == 0 { + panic("bad") + } + if int(s) < len(state.symGroupType) { + state.symGroupType[s] = kind + } else { + su := state.ctxt.loader.MakeSymbolUpdater(s) + su.SetType(kind) + } +} + +func (ctxt *Link) dodata(symGroupType []sym.SymKind) { + + // Give zeros sized symbols space if necessary. + fixZeroSizedSymbols(ctxt) + + // Collect data symbols by type into data. + state := dodataState{ctxt: ctxt, symGroupType: symGroupType} + ldr := ctxt.loader + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) || ldr.AttrSpecial(s) || ldr.AttrSubSymbol(s) || + !ldr.TopLevelSym(s) { + continue + } + + st := state.symType(s) + + if st <= sym.STEXT || st >= sym.SXREF { + continue + } + state.data[st] = append(state.data[st], s) + + // Similarly with checking the onlist attr. + if ldr.AttrOnList(s) { + log.Fatalf("symbol %s listed multiple times", ldr.SymName(s)) + } + ldr.SetAttrOnList(s, true) + } + + // Now that we have the data symbols, but before we start + // to assign addresses, record all the necessary + // dynamic relocations. These will grow the relocation + // symbol, which is itself data. + // + // On darwin, we need the symbol table numbers for dynreloc. + if ctxt.HeadType == objabi.Hdarwin { + machosymorder(ctxt) + } + state.dynreloc(ctxt) + + // Move any RO data with relocations to a separate section. + state.makeRelroForSharedLib(ctxt) + + // Set alignment for the symbol with the largest known index, + // so as to trigger allocation of the loader's internal + // alignment array. This will avoid data races in the parallel + // section below. + lastSym := loader.Sym(ldr.NSym() - 1) + ldr.SetSymAlign(lastSym, ldr.SymAlign(lastSym)) + + // Sort symbols. + var wg sync.WaitGroup + for symn := range state.data { + symn := sym.SymKind(symn) + wg.Add(1) + go func() { + state.data[symn], state.dataMaxAlign[symn] = state.dodataSect(ctxt, symn, state.data[symn]) + wg.Done() + }() + } + wg.Wait() + + if ctxt.IsELF { + // Make .rela and .rela.plt contiguous, the ELF ABI requires this + // and Solaris actually cares. + syms := state.data[sym.SELFROSECT] + reli, plti := -1, -1 + for i, s := range syms { + switch ldr.SymName(s) { + case ".rel.plt", ".rela.plt": + plti = i + case ".rel", ".rela": + reli = i + } + } + if reli >= 0 && plti >= 0 && plti != reli+1 { + var first, second int + if plti > reli { + first, second = reli, plti + } else { + first, second = plti, reli + } + rel, plt := syms[reli], syms[plti] + copy(syms[first+2:], syms[first+1:second]) + syms[first+0] = rel + syms[first+1] = plt + + // Make sure alignment doesn't introduce a gap. + // Setting the alignment explicitly prevents + // symalign from basing it on the size and + // getting it wrong. + ldr.SetSymAlign(rel, int32(ctxt.Arch.RegSize)) + ldr.SetSymAlign(plt, int32(ctxt.Arch.RegSize)) + } + state.data[sym.SELFROSECT] = syms + } + + if ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal { + // These symbols must have the same alignment as their section. + // Otherwise, ld might change the layout of Go sections. + ldr.SetSymAlign(ldr.Lookup("runtime.data", 0), state.dataMaxAlign[sym.SDATA]) + ldr.SetSymAlign(ldr.Lookup("runtime.bss", 0), state.dataMaxAlign[sym.SBSS]) + } + + // Create *sym.Section objects and assign symbols to sections for + // data/rodata (and related) symbols. + state.allocateDataSections(ctxt) + + state.allocateSEHSections(ctxt) + + // Create *sym.Section objects and assign symbols to sections for + // DWARF symbols. + state.allocateDwarfSections(ctxt) + + /* number the sections */ + n := int16(1) + + for _, sect := range Segtext.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segrodata.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segrelrodata.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segdata.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segdwarf.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segpdata.Sections { + sect.Extnum = n + n++ + } + for _, sect := range Segxdata.Sections { + sect.Extnum = n + n++ + } +} + +// allocateDataSectionForSym creates a new sym.Section into which a +// single symbol will be placed. Here "seg" is the segment into which +// the section will go, "s" is the symbol to be placed into the new +// section, and "rwx" contains permissions for the section. +func (state *dodataState) allocateDataSectionForSym(seg *sym.Segment, s loader.Sym, rwx int) *sym.Section { + ldr := state.ctxt.loader + sname := ldr.SymName(s) + if strings.HasPrefix(sname, "go:") { + sname = ".go." + sname[len("go:"):] + } + sect := addsection(ldr, state.ctxt.Arch, seg, sname, rwx) + sect.Align = symalign(ldr, s) + state.datsize = Rnd(state.datsize, int64(sect.Align)) + sect.Vaddr = uint64(state.datsize) + return sect +} + +// allocateNamedDataSection creates a new sym.Section for a category +// of data symbols. Here "seg" is the segment into which the section +// will go, "sName" is the name to give to the section, "types" is a +// range of symbol types to be put into the section, and "rwx" +// contains permissions for the section. +func (state *dodataState) allocateNamedDataSection(seg *sym.Segment, sName string, types []sym.SymKind, rwx int) *sym.Section { + sect := addsection(state.ctxt.loader, state.ctxt.Arch, seg, sName, rwx) + if len(types) == 0 { + sect.Align = 1 + } else if len(types) == 1 { + sect.Align = state.dataMaxAlign[types[0]] + } else { + for _, symn := range types { + align := state.dataMaxAlign[symn] + if sect.Align < align { + sect.Align = align + } + } + } + state.datsize = Rnd(state.datsize, int64(sect.Align)) + sect.Vaddr = uint64(state.datsize) + return sect +} + +// assignDsymsToSection assigns a collection of data symbols to a +// newly created section. "sect" is the section into which to place +// the symbols, "syms" holds the list of symbols to assign, +// "forceType" (if non-zero) contains a new sym type to apply to each +// sym during the assignment, and "aligner" is a hook to call to +// handle alignment during the assignment process. +func (state *dodataState) assignDsymsToSection(sect *sym.Section, syms []loader.Sym, forceType sym.SymKind, aligner func(state *dodataState, datsize int64, s loader.Sym) int64) { + ldr := state.ctxt.loader + for _, s := range syms { + state.datsize = aligner(state, state.datsize, s) + ldr.SetSymSect(s, sect) + if forceType != sym.Sxxx { + state.setSymType(s, forceType) + } + ldr.SetSymValue(s, int64(uint64(state.datsize)-sect.Vaddr)) + state.datsize += ldr.SymSize(s) + } + sect.Length = uint64(state.datsize) - sect.Vaddr +} + +func (state *dodataState) assignToSection(sect *sym.Section, symn sym.SymKind, forceType sym.SymKind) { + state.assignDsymsToSection(sect, state.data[symn], forceType, aligndatsize) + state.checkdatsize(symn) +} + +// allocateSingleSymSections walks through the bucketed data symbols +// with type 'symn', creates a new section for each sym, and assigns +// the sym to a newly created section. Section name is set from the +// symbol name. "Seg" is the segment into which to place the new +// section, "forceType" is the new sym.SymKind to assign to the symbol +// within the section, and "rwx" holds section permissions. +func (state *dodataState) allocateSingleSymSections(seg *sym.Segment, symn sym.SymKind, forceType sym.SymKind, rwx int) { + ldr := state.ctxt.loader + for _, s := range state.data[symn] { + sect := state.allocateDataSectionForSym(seg, s, rwx) + ldr.SetSymSect(s, sect) + state.setSymType(s, forceType) + ldr.SetSymValue(s, int64(uint64(state.datsize)-sect.Vaddr)) + state.datsize += ldr.SymSize(s) + sect.Length = uint64(state.datsize) - sect.Vaddr + } + state.checkdatsize(symn) +} + +// allocateNamedSectionAndAssignSyms creates a new section with the +// specified name, then walks through the bucketed data symbols with +// type 'symn' and assigns each of them to this new section. "Seg" is +// the segment into which to place the new section, "secName" is the +// name to give to the new section, "forceType" (if non-zero) contains +// a new sym type to apply to each sym during the assignment, and +// "rwx" holds section permissions. +func (state *dodataState) allocateNamedSectionAndAssignSyms(seg *sym.Segment, secName string, symn sym.SymKind, forceType sym.SymKind, rwx int) *sym.Section { + + sect := state.allocateNamedDataSection(seg, secName, []sym.SymKind{symn}, rwx) + state.assignDsymsToSection(sect, state.data[symn], forceType, aligndatsize) + return sect +} + +// allocateDataSections allocates sym.Section objects for data/rodata +// (and related) symbols, and then assigns symbols to those sections. +func (state *dodataState) allocateDataSections(ctxt *Link) { + // Allocate sections. + // Data is processed before segtext, because we need + // to see all symbols in the .data and .bss sections in order + // to generate garbage collection information. + + // Writable data sections that do not need any specialized handling. + writable := []sym.SymKind{ + sym.SBUILDINFO, + sym.SELFSECT, + sym.SMACHO, + sym.SMACHOGOT, + sym.SWINDOWS, + } + for _, symn := range writable { + state.allocateSingleSymSections(&Segdata, symn, sym.SDATA, 06) + } + ldr := ctxt.loader + + // .got + if len(state.data[sym.SELFGOT]) > 0 { + state.allocateNamedSectionAndAssignSyms(&Segdata, ".got", sym.SELFGOT, sym.SDATA, 06) + } + + /* pointer-free data */ + sect := state.allocateNamedSectionAndAssignSyms(&Segdata, ".noptrdata", sym.SNOPTRDATA, sym.SDATA, 06) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.noptrdata", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.enoptrdata", 0), sect) + + hasinitarr := ctxt.linkShared + + /* shared library initializer */ + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: + hasinitarr = true + } + + if ctxt.HeadType == objabi.Haix { + if len(state.data[sym.SINITARR]) > 0 { + Errorf(nil, "XCOFF format doesn't allow .init_array section") + } + } + + if hasinitarr && len(state.data[sym.SINITARR]) > 0 { + state.allocateNamedSectionAndAssignSyms(&Segdata, ".init_array", sym.SINITARR, sym.Sxxx, 06) + } + + /* data */ + sect = state.allocateNamedSectionAndAssignSyms(&Segdata, ".data", sym.SDATA, sym.SDATA, 06) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.data", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.edata", 0), sect) + dataGcEnd := state.datsize - int64(sect.Vaddr) + + // On AIX, TOC entries must be the last of .data + // These aren't part of gc as they won't change during the runtime. + state.assignToSection(sect, sym.SXCOFFTOC, sym.SDATA) + state.checkdatsize(sym.SDATA) + sect.Length = uint64(state.datsize) - sect.Vaddr + + /* bss */ + sect = state.allocateNamedSectionAndAssignSyms(&Segdata, ".bss", sym.SBSS, sym.Sxxx, 06) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.bss", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.ebss", 0), sect) + bssGcEnd := state.datsize - int64(sect.Vaddr) + + // Emit gcdata for bss symbols now that symbol values have been assigned. + gcsToEmit := []struct { + symName string + symKind sym.SymKind + gcEnd int64 + }{ + {"runtime.gcdata", sym.SDATA, dataGcEnd}, + {"runtime.gcbss", sym.SBSS, bssGcEnd}, + } + for _, g := range gcsToEmit { + var gc GCProg + gc.Init(ctxt, g.symName) + for _, s := range state.data[g.symKind] { + gc.AddSym(s) + } + gc.End(g.gcEnd) + } + + /* pointer-free bss */ + sect = state.allocateNamedSectionAndAssignSyms(&Segdata, ".noptrbss", sym.SNOPTRBSS, sym.Sxxx, 06) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.noptrbss", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.enoptrbss", 0), sect) + + // Code coverage counters are assigned to the .noptrbss section. + // We assign them in a separate pass so that they stay aggregated + // together in a single blob (coverage runtime depends on this). + covCounterDataStartOff = sect.Length + state.assignToSection(sect, sym.SCOVERAGE_COUNTER, sym.SNOPTRBSS) + covCounterDataLen = sect.Length - covCounterDataStartOff + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.covctrs", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.ecovctrs", 0), sect) + + // Coverage instrumentation counters for libfuzzer. + if len(state.data[sym.SLIBFUZZER_8BIT_COUNTER]) > 0 { + sect := state.allocateNamedSectionAndAssignSyms(&Segdata, ".go.fuzzcntrs", sym.SLIBFUZZER_8BIT_COUNTER, sym.Sxxx, 06) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.__start___sancov_cntrs", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.__stop___sancov_cntrs", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("internal/fuzz._counters", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("internal/fuzz._ecounters", 0), sect) + } + + // Assign runtime.end to the last section of data segment. + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.end", 0), Segdata.Sections[len(Segdata.Sections)-1]) + + if len(state.data[sym.STLSBSS]) > 0 { + var sect *sym.Section + // FIXME: not clear why it is sometimes necessary to suppress .tbss section creation. + if (ctxt.IsELF || ctxt.HeadType == objabi.Haix) && (ctxt.LinkMode == LinkExternal || !*FlagD) { + sect = addsection(ldr, ctxt.Arch, &Segdata, ".tbss", 06) + sect.Align = int32(ctxt.Arch.PtrSize) + // FIXME: why does this need to be set to zero? + sect.Vaddr = 0 + } + state.datsize = 0 + + for _, s := range state.data[sym.STLSBSS] { + state.datsize = aligndatsize(state, state.datsize, s) + if sect != nil { + ldr.SetSymSect(s, sect) + } + ldr.SetSymValue(s, state.datsize) + state.datsize += ldr.SymSize(s) + } + state.checkdatsize(sym.STLSBSS) + + if sect != nil { + sect.Length = uint64(state.datsize) + } + } + + /* + * We finished data, begin read-only data. + * Not all systems support a separate read-only non-executable data section. + * ELF and Windows PE systems do. + * OS X and Plan 9 do not. + * And if we're using external linking mode, the point is moot, + * since it's not our decision; that code expects the sections in + * segtext. + */ + var segro *sym.Segment + if ctxt.IsELF && ctxt.LinkMode == LinkInternal { + segro = &Segrodata + } else if ctxt.HeadType == objabi.Hwindows { + segro = &Segrodata + } else { + segro = &Segtext + } + + state.datsize = 0 + + /* read-only executable ELF, Mach-O sections */ + if len(state.data[sym.STEXT]) != 0 { + culprit := ldr.SymName(state.data[sym.STEXT][0]) + Errorf(nil, "dodata found an sym.STEXT symbol: %s", culprit) + } + state.allocateSingleSymSections(&Segtext, sym.SELFRXSECT, sym.SRODATA, 05) + state.allocateSingleSymSections(&Segtext, sym.SMACHOPLT, sym.SRODATA, 05) + + /* read-only data */ + sect = state.allocateNamedDataSection(segro, ".rodata", sym.ReadOnly, 04) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.rodata", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.erodata", 0), sect) + if !ctxt.UseRelro() { + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.types", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.etypes", 0), sect) + } + for _, symn := range sym.ReadOnly { + symnStartValue := state.datsize + if len(state.data[symn]) != 0 { + symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0]) + } + state.assignToSection(sect, symn, sym.SRODATA) + setCarrierSize(symn, state.datsize-symnStartValue) + if ctxt.HeadType == objabi.Haix { + // Read-only symbols might be wrapped inside their outer + // symbol. + // XCOFF symbol table needs to know the size of + // these outer symbols. + xcoffUpdateOuterSize(ctxt, state.datsize-symnStartValue, symn) + } + } + + /* read-only ELF, Mach-O sections */ + state.allocateSingleSymSections(segro, sym.SELFROSECT, sym.SRODATA, 04) + + // There is some data that are conceptually read-only but are written to by + // relocations. On GNU systems, we can arrange for the dynamic linker to + // mprotect sections after relocations are applied by giving them write + // permissions in the object file and calling them ".data.rel.ro.FOO". We + // divide the .rodata section between actual .rodata and .data.rel.ro.rodata, + // but for the other sections that this applies to, we just write a read-only + // .FOO section or a read-write .data.rel.ro.FOO section depending on the + // situation. + // TODO(mwhudson): It would make sense to do this more widely, but it makes + // the system linker segfault on darwin. + const relroPerm = 06 + const fallbackPerm = 04 + relroSecPerm := fallbackPerm + genrelrosecname := func(suffix string) string { + if suffix == "" { + return ".rodata" + } + return suffix + } + seg := segro + + if ctxt.UseRelro() { + segrelro := &Segrelrodata + if ctxt.LinkMode == LinkExternal && !ctxt.IsAIX() && !ctxt.IsDarwin() { + // Using a separate segment with an external + // linker results in some programs moving + // their data sections unexpectedly, which + // corrupts the moduledata. So we use the + // rodata segment and let the external linker + // sort out a rel.ro segment. + segrelro = segro + } else { + // Reset datsize for new segment. + state.datsize = 0 + } + + if !ctxt.IsDarwin() { // We don't need the special names on darwin. + genrelrosecname = func(suffix string) string { + return ".data.rel.ro" + suffix + } + } + + relroReadOnly := []sym.SymKind{} + for _, symnro := range sym.ReadOnly { + symn := sym.RelROMap[symnro] + relroReadOnly = append(relroReadOnly, symn) + } + seg = segrelro + relroSecPerm = relroPerm + + /* data only written by relocations */ + sect = state.allocateNamedDataSection(segrelro, genrelrosecname(""), relroReadOnly, relroSecPerm) + + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.types", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.etypes", 0), sect) + + for i, symnro := range sym.ReadOnly { + if i == 0 && symnro == sym.STYPE && ctxt.HeadType != objabi.Haix { + // Skip forward so that no type + // reference uses a zero offset. + // This is unlikely but possible in small + // programs with no other read-only data. + state.datsize++ + } + + symn := sym.RelROMap[symnro] + symnStartValue := state.datsize + if len(state.data[symn]) != 0 { + symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0]) + } + + for _, s := range state.data[symn] { + outer := ldr.OuterSym(s) + if s != 0 && ldr.SymSect(outer) != nil && ldr.SymSect(outer) != sect { + ctxt.Errorf(s, "s.Outer (%s) in different section from s, %s != %s", ldr.SymName(outer), ldr.SymSect(outer).Name, sect.Name) + } + } + state.assignToSection(sect, symn, sym.SRODATA) + setCarrierSize(symn, state.datsize-symnStartValue) + if ctxt.HeadType == objabi.Haix { + // Read-only symbols might be wrapped inside their outer + // symbol. + // XCOFF symbol table needs to know the size of + // these outer symbols. + xcoffUpdateOuterSize(ctxt, state.datsize-symnStartValue, symn) + } + } + + sect.Length = uint64(state.datsize) - sect.Vaddr + } + + /* typelink */ + sect = state.allocateNamedDataSection(seg, genrelrosecname(".typelink"), []sym.SymKind{sym.STYPELINK}, relroSecPerm) + + typelink := ldr.CreateSymForUpdate("runtime.typelink", 0) + ldr.SetSymSect(typelink.Sym(), sect) + typelink.SetType(sym.SRODATA) + state.datsize += typelink.Size() + state.checkdatsize(sym.STYPELINK) + sect.Length = uint64(state.datsize) - sect.Vaddr + + /* itablink */ + sect = state.allocateNamedDataSection(seg, genrelrosecname(".itablink"), []sym.SymKind{sym.SITABLINK}, relroSecPerm) + + itablink := ldr.CreateSymForUpdate("runtime.itablink", 0) + ldr.SetSymSect(itablink.Sym(), sect) + itablink.SetType(sym.SRODATA) + state.datsize += itablink.Size() + state.checkdatsize(sym.SITABLINK) + sect.Length = uint64(state.datsize) - sect.Vaddr + + /* gosymtab */ + sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gosymtab"), sym.SSYMTAB, sym.SRODATA, relroSecPerm) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.symtab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.esymtab", 0), sect) + + /* gopclntab */ + sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gopclntab"), sym.SPCLNTAB, sym.SRODATA, relroSecPerm) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pcheader", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.funcnametab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.functab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) + setCarrierSize(sym.SPCLNTAB, int64(sect.Length)) + if ctxt.HeadType == objabi.Haix { + xcoffUpdateOuterSize(ctxt, int64(sect.Length), sym.SPCLNTAB) + } + + // 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits. + if state.datsize != int64(uint32(state.datsize)) { + Errorf(nil, "read-only data segment too large: %d", state.datsize) + } + + siz := 0 + for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + siz += len(state.data[symn]) + } + ctxt.datap = make([]loader.Sym, 0, siz) + for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + ctxt.datap = append(ctxt.datap, state.data[symn]...) + } +} + +// allocateDwarfSections allocates sym.Section objects for DWARF +// symbols, and assigns symbols to sections. +func (state *dodataState) allocateDwarfSections(ctxt *Link) { + + alignOne := func(state *dodataState, datsize int64, s loader.Sym) int64 { return datsize } + + ldr := ctxt.loader + for i := 0; i < len(dwarfp); i++ { + // First the section symbol. + s := dwarfp[i].secSym() + sect := state.allocateNamedDataSection(&Segdwarf, ldr.SymName(s), []sym.SymKind{}, 04) + ldr.SetSymSect(s, sect) + sect.Sym = sym.LoaderSym(s) + curType := ldr.SymType(s) + state.setSymType(s, sym.SRODATA) + ldr.SetSymValue(s, int64(uint64(state.datsize)-sect.Vaddr)) + state.datsize += ldr.SymSize(s) + + // Then any sub-symbols for the section symbol. + subSyms := dwarfp[i].subSyms() + state.assignDsymsToSection(sect, subSyms, sym.SRODATA, alignOne) + + for j := 0; j < len(subSyms); j++ { + s := subSyms[j] + if ctxt.HeadType == objabi.Haix && curType == sym.SDWARFLOC { + // Update the size of .debug_loc for this symbol's + // package. + addDwsectCUSize(".debug_loc", ldr.SymPkg(s), uint64(ldr.SymSize(s))) + } + } + sect.Length = uint64(state.datsize) - sect.Vaddr + checkSectSize(sect) + } +} + +// allocateSEHSections allocate a sym.Section object for SEH +// symbols, and assigns symbols to sections. +func (state *dodataState) allocateSEHSections(ctxt *Link) { + if len(sehp.pdata) > 0 { + sect := state.allocateNamedDataSection(&Segpdata, ".pdata", []sym.SymKind{}, 04) + state.assignDsymsToSection(sect, sehp.pdata, sym.SRODATA, aligndatsize) + state.checkdatsize(sym.SSEHSECT) + } + if len(sehp.xdata) > 0 { + sect := state.allocateNamedDataSection(&Segxdata, ".xdata", []sym.SymKind{}, 04) + state.assignDsymsToSection(sect, sehp.xdata, sym.SRODATA, aligndatsize) + state.checkdatsize(sym.SSEHSECT) + } +} + +type symNameSize struct { + name string + sz int64 + val int64 + sym loader.Sym +} + +func (state *dodataState) dodataSect(ctxt *Link, symn sym.SymKind, syms []loader.Sym) (result []loader.Sym, maxAlign int32) { + var head, tail, zerobase loader.Sym + ldr := ctxt.loader + sl := make([]symNameSize, len(syms)) + + // For ppc64, we want to interleave the .got and .toc sections + // from input files. Both are type sym.SELFGOT, so in that case + // we skip size comparison and do the name comparison instead + // (conveniently, .got sorts before .toc). + checkSize := symn != sym.SELFGOT + + for k, s := range syms { + ss := ldr.SymSize(s) + sl[k] = symNameSize{sz: ss, sym: s} + if !checkSize { + sl[k].name = ldr.SymName(s) + } + ds := int64(len(ldr.Data(s))) + switch { + case ss < ds: + ctxt.Errorf(s, "initialize bounds (%d < %d)", ss, ds) + case ss < 0: + ctxt.Errorf(s, "negative size (%d bytes)", ss) + case ss > cutoff: + ctxt.Errorf(s, "symbol too large (%d bytes)", ss) + } + + // If the usually-special section-marker symbols are being laid + // out as regular symbols, put them either at the beginning or + // end of their section. + if (ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin) || (ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal) { + switch ldr.SymName(s) { + case "runtime.text", "runtime.bss", "runtime.data", "runtime.types", "runtime.rodata", + "runtime.noptrdata", "runtime.noptrbss": + head = s + continue + case "runtime.etext", "runtime.ebss", "runtime.edata", "runtime.etypes", "runtime.erodata", + "runtime.enoptrdata", "runtime.enoptrbss": + tail = s + continue + } + } + } + zerobase = ldr.Lookup("runtime.zerobase", 0) + + // Perform the sort. + if symn != sym.SPCLNTAB { + sort.Slice(sl, func(i, j int) bool { + si, sj := sl[i].sym, sl[j].sym + isz, jsz := sl[i].sz, sl[j].sz + switch { + case si == head, sj == tail: + return true + case sj == head, si == tail: + return false + // put zerobase right after all the zero-sized symbols, + // so zero-sized symbols have the same address as zerobase. + case si == zerobase: + return jsz != 0 // zerobase < nonzero-sized + case sj == zerobase: + return isz == 0 // 0-sized < zerobase + } + if checkSize { + if isz != jsz { + return isz < jsz + } + } else { + iname := sl[i].name + jname := sl[j].name + if iname != jname { + return iname < jname + } + } + return si < sj + }) + } else { + // PCLNTAB was built internally, and already has the proper order. + } + + // Set alignment, construct result + syms = syms[:0] + for k := range sl { + s := sl[k].sym + if s != head && s != tail { + align := symalign(ldr, s) + if maxAlign < align { + maxAlign = align + } + } + syms = append(syms, s) + } + + return syms, maxAlign +} + +// Add buildid to beginning of text segment, on non-ELF systems. +// Non-ELF binary formats are not always flexible enough to +// give us a place to put the Go build ID. On those systems, we put it +// at the very beginning of the text segment. +// This “header” is read by cmd/go. +func (ctxt *Link) textbuildid() { + if ctxt.IsELF || *flagBuildid == "" { + return + } + + ldr := ctxt.loader + s := ldr.CreateSymForUpdate("go:buildid", 0) + // The \xff is invalid UTF-8, meant to make it less likely + // to find one of these accidentally. + data := "\xff Go build ID: " + strconv.Quote(*flagBuildid) + "\n \xff" + s.SetType(sym.STEXT) + s.SetData([]byte(data)) + s.SetSize(int64(len(data))) + + ctxt.Textp = append(ctxt.Textp, 0) + copy(ctxt.Textp[1:], ctxt.Textp) + ctxt.Textp[0] = s.Sym() +} + +func (ctxt *Link) buildinfo() { + // Write the buildinfo symbol, which go version looks for. + // The code reading this data is in package debug/buildinfo. + ldr := ctxt.loader + s := ldr.CreateSymForUpdate("go:buildinfo", 0) + s.SetType(sym.SBUILDINFO) + s.SetAlign(16) + // The \xff is invalid UTF-8, meant to make it less likely + // to find one of these accidentally. + const prefix = "\xff Go buildinf:" // 14 bytes, plus 2 data bytes filled in below + data := make([]byte, 32) + copy(data, prefix) + data[len(prefix)] = byte(ctxt.Arch.PtrSize) + data[len(prefix)+1] = 0 + if ctxt.Arch.ByteOrder == binary.BigEndian { + data[len(prefix)+1] = 1 + } + data[len(prefix)+1] |= 2 // signals new pointer-free format + data = appendString(data, strdata["runtime.buildVersion"]) + data = appendString(data, strdata["runtime.modinfo"]) + // MacOS linker gets very upset if the size os not a multiple of alignment. + for len(data)%16 != 0 { + data = append(data, 0) + } + s.SetData(data) + s.SetSize(int64(len(data))) + + // Add reference to go:buildinfo from the rodata section, + // so that external linking with -Wl,--gc-sections does not + // delete the build info. + sr := ldr.CreateSymForUpdate("go:buildinfo.ref", 0) + sr.SetType(sym.SRODATA) + sr.SetAlign(int32(ctxt.Arch.PtrSize)) + sr.AddAddr(ctxt.Arch, s.Sym()) +} + +// appendString appends s to data, prefixed by its varint-encoded length. +func appendString(data []byte, s string) []byte { + var v [binary.MaxVarintLen64]byte + n := binary.PutUvarint(v[:], uint64(len(s))) + data = append(data, v[:n]...) + data = append(data, s...) + return data +} + +// assign addresses to text +func (ctxt *Link) textaddress() { + addsection(ctxt.loader, ctxt.Arch, &Segtext, ".text", 05) + + // Assign PCs in text segment. + // Could parallelize, by assigning to text + // and then letting threads copy down, but probably not worth it. + sect := Segtext.Sections[0] + + sect.Align = int32(Funcalign) + + ldr := ctxt.loader + + text := ctxt.xdefine("runtime.text", sym.STEXT, 0) + etext := ctxt.xdefine("runtime.etext", sym.STEXT, 0) + ldr.SetSymSect(text, sect) + if ctxt.IsAIX() && ctxt.IsExternal() { + // Setting runtime.text has a real symbol prevents ld to + // change its base address resulting in wrong offsets for + // reflect methods. + u := ldr.MakeSymbolUpdater(text) + u.SetAlign(sect.Align) + u.SetSize(8) + } + + if (ctxt.DynlinkingGo() && ctxt.IsDarwin()) || (ctxt.IsAIX() && ctxt.IsExternal()) { + ldr.SetSymSect(etext, sect) + ctxt.Textp = append(ctxt.Textp, etext, 0) + copy(ctxt.Textp[1:], ctxt.Textp) + ctxt.Textp[0] = text + } + + start := uint64(Rnd(*FlagTextAddr, int64(Funcalign))) + va := start + n := 1 + sect.Vaddr = va + + limit := thearch.TrampLimit + if limit == 0 { + limit = 1 << 63 // unlimited + } + if *FlagDebugTextSize != 0 { + limit = uint64(*FlagDebugTextSize) + } + if *FlagDebugTramp > 1 { + limit = 1 // debug mode, force generating trampolines for everything + } + + if ctxt.IsAIX() && ctxt.IsExternal() { + // On AIX, normally we won't generate direct calls to external symbols, + // except in one test, cmd/go/testdata/script/link_syso_issue33139.txt. + // That test doesn't make much sense, and I'm not sure it ever works. + // Just generate trampoline for now (which will turn a direct call to + // an indirect call, which at least builds). + limit = 1 + } + + // First pass: assign addresses assuming the program is small and will + // not require trampoline generation. + big := false + for _, s := range ctxt.Textp { + sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big) + if va-start >= limit { + big = true + break + } + } + + // Second pass: only if it is too big, insert trampolines for too-far + // jumps and targets with unknown addresses. + if big { + // reset addresses + for _, s := range ctxt.Textp { + if s != text { + resetAddress(ctxt, s) + } + } + va = start + + ntramps := 0 + var curPkg string + for i, s := range ctxt.Textp { + // When we find the first symbol in a package, perform a + // single iteration that assigns temporary addresses to all + // of the text in the same package, using the maximum possible + // number of trampolines. This allows for better decisions to + // be made regarding reachability and the need for trampolines. + if symPkg := ldr.SymPkg(s); symPkg != "" && curPkg != symPkg { + curPkg = symPkg + vaTmp := va + for j := i; j < len(ctxt.Textp); j++ { + curSym := ctxt.Textp[j] + if symPkg := ldr.SymPkg(curSym); symPkg == "" || curPkg != symPkg { + break + } + // We do not pass big to assignAddress here, as this + // can result in side effects such as section splitting. + sect, n, vaTmp = assignAddress(ctxt, sect, n, curSym, vaTmp, false, false) + vaTmp += maxSizeTrampolines(ctxt, ldr, curSym, false) + } + } + + // Reset address for current symbol. + if s != text { + resetAddress(ctxt, s) + } + + // Assign actual address for current symbol. + sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big) + + // Resolve jumps, adding trampolines if they are needed. + trampoline(ctxt, s) + + // lay down trampolines after each function + for ; ntramps < len(ctxt.tramps); ntramps++ { + tramp := ctxt.tramps[ntramps] + if ctxt.IsAIX() && strings.HasPrefix(ldr.SymName(tramp), "runtime.text.") { + // Already set in assignAddress + continue + } + sect, n, va = assignAddress(ctxt, sect, n, tramp, va, true, big) + } + } + + // merge tramps into Textp, keeping Textp in address order + if ntramps != 0 { + newtextp := make([]loader.Sym, 0, len(ctxt.Textp)+ntramps) + i := 0 + for _, s := range ctxt.Textp { + for ; i < ntramps && ldr.SymValue(ctxt.tramps[i]) < ldr.SymValue(s); i++ { + newtextp = append(newtextp, ctxt.tramps[i]) + } + newtextp = append(newtextp, s) + } + newtextp = append(newtextp, ctxt.tramps[i:ntramps]...) + + ctxt.Textp = newtextp + } + } + + // Add MinLC size after etext, so it won't collide with the next symbol + // (which may confuse some symbolizer). + sect.Length = va - sect.Vaddr + uint64(ctxt.Arch.MinLC) + ldr.SetSymSect(etext, sect) + if ldr.SymValue(etext) == 0 { + // Set the address of the start/end symbols, if not already + // (i.e. not darwin+dynlink or AIX+external, see above). + ldr.SetSymValue(etext, int64(va)) + ldr.SetSymValue(text, int64(Segtext.Sections[0].Vaddr)) + } +} + +// assigns address for a text symbol, returns (possibly new) section, its number, and the address. +func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp, big bool) (*sym.Section, int, uint64) { + ldr := ctxt.loader + if thearch.AssignAddress != nil { + return thearch.AssignAddress(ldr, sect, n, s, va, isTramp) + } + + ldr.SetSymSect(s, sect) + if ldr.AttrSubSymbol(s) { + return sect, n, va + } + + align := ldr.SymAlign(s) + if align == 0 { + align = int32(Funcalign) + } + va = uint64(Rnd(int64(va), int64(align))) + if sect.Align < align { + sect.Align = align + } + + funcsize := uint64(MINFUNC) // spacing required for findfunctab + if ldr.SymSize(s) > MINFUNC { + funcsize = uint64(ldr.SymSize(s)) + } + + // If we need to split text sections, and this function doesn't fit in the current + // section, then create a new one. + // + // Only break at outermost syms. + if big && splitTextSections(ctxt) && ldr.OuterSym(s) == 0 { + // For debugging purposes, allow text size limit to be cranked down, + // so as to stress test the code that handles multiple text sections. + var textSizelimit uint64 = thearch.TrampLimit + if *FlagDebugTextSize != 0 { + textSizelimit = uint64(*FlagDebugTextSize) + } + + // Sanity check: make sure the limit is larger than any + // individual text symbol. + if funcsize > textSizelimit { + panic(fmt.Sprintf("error: text size limit %d less than text symbol %s size of %d", textSizelimit, ldr.SymName(s), funcsize)) + } + + if va-sect.Vaddr+funcsize+maxSizeTrampolines(ctxt, ldr, s, isTramp) > textSizelimit { + sectAlign := int32(thearch.Funcalign) + if ctxt.IsPPC64() { + // Align the next text section to the worst case function alignment likely + // to be encountered when processing function symbols. The start address + // is rounded against the final alignment of the text section later on in + // (*Link).address. This may happen due to usage of PCALIGN directives + // larger than Funcalign, or usage of ISA 3.1 prefixed instructions + // (see ISA 3.1 Book I 1.9). + const ppc64maxFuncalign = 64 + sectAlign = ppc64maxFuncalign + va = uint64(Rnd(int64(va), ppc64maxFuncalign)) + } + + // Set the length for the previous text section + sect.Length = va - sect.Vaddr + + // Create new section, set the starting Vaddr + sect = addsection(ctxt.loader, ctxt.Arch, &Segtext, ".text", 05) + + sect.Vaddr = va + sect.Align = sectAlign + ldr.SetSymSect(s, sect) + + // Create a symbol for the start of the secondary text sections + ntext := ldr.CreateSymForUpdate(fmt.Sprintf("runtime.text.%d", n), 0) + ntext.SetSect(sect) + if ctxt.IsAIX() { + // runtime.text.X must be a real symbol on AIX. + // Assign its address directly in order to be the + // first symbol of this new section. + ntext.SetType(sym.STEXT) + ntext.SetSize(int64(MINFUNC)) + ntext.SetOnList(true) + ntext.SetAlign(sectAlign) + ctxt.tramps = append(ctxt.tramps, ntext.Sym()) + + ntext.SetValue(int64(va)) + va += uint64(ntext.Size()) + + if align := ldr.SymAlign(s); align != 0 { + va = uint64(Rnd(int64(va), int64(align))) + } else { + va = uint64(Rnd(int64(va), int64(Funcalign))) + } + } + n++ + } + } + + ldr.SetSymValue(s, 0) + for sub := s; sub != 0; sub = ldr.SubSym(sub) { + ldr.SetSymValue(sub, ldr.SymValue(sub)+int64(va)) + if ctxt.Debugvlog > 2 { + fmt.Println("assign text address:", ldr.SymName(sub), ldr.SymValue(sub)) + } + } + + va += funcsize + + return sect, n, va +} + +func resetAddress(ctxt *Link, s loader.Sym) { + ldr := ctxt.loader + if ldr.OuterSym(s) != 0 { + return + } + oldv := ldr.SymValue(s) + for sub := s; sub != 0; sub = ldr.SubSym(sub) { + ldr.SetSymValue(sub, ldr.SymValue(sub)-oldv) + } +} + +// Return whether we may need to split text sections. +// +// On PPC64x, when external linking, a text section should not be +// larger than 2^25 bytes due to the size of call target offset field +// in the 'bl' instruction. Splitting into smaller text sections +// smaller than this limit allows the system linker to modify the long +// calls appropriately. The limit allows for the space needed for +// tables inserted by the linker. +// +// The same applies to Darwin/ARM64, with 2^27 byte threshold. +// +// Similarly for ARM, we split sections (at 2^25 bytes) to avoid +// inconsistencies between the Go linker's reachability calculations +// (e.g. will direct call from X to Y need a trampoline) and similar +// machinery in the external linker; see #58425 for more on the +// history here. +func splitTextSections(ctxt *Link) bool { + return (ctxt.IsARM() || ctxt.IsPPC64() || (ctxt.IsARM64() && ctxt.IsDarwin())) && ctxt.IsExternal() +} + +// On Wasm, we reserve 4096 bytes for zero page, then 8192 bytes for wasm_exec.js +// to store command line args and environment variables. +// Data sections starts from at least address 12288. +// Keep in sync with wasm_exec.js. +const wasmMinDataAddr = 4096 + 8192 + +// address assigns virtual addresses to all segments and sections and +// returns all segments in file order. +func (ctxt *Link) address() []*sym.Segment { + var order []*sym.Segment // Layout order + + va := uint64(*FlagTextAddr) + order = append(order, &Segtext) + Segtext.Rwx = 05 + Segtext.Vaddr = va + for i, s := range Segtext.Sections { + va = uint64(Rnd(int64(va), int64(s.Align))) + s.Vaddr = va + va += s.Length + + if ctxt.IsWasm() && i == 0 && va < wasmMinDataAddr { + va = wasmMinDataAddr + } + } + + Segtext.Length = va - uint64(*FlagTextAddr) + + if len(Segrodata.Sections) > 0 { + // align to page boundary so as not to mix + // rodata and executable text. + // + // Note: gold or GNU ld will reduce the size of the executable + // file by arranging for the relro segment to end at a page + // boundary, and overlap the end of the text segment with the + // start of the relro segment in the file. The PT_LOAD segments + // will be such that the last page of the text segment will be + // mapped twice, once r-x and once starting out rw- and, after + // relocation processing, changed to r--. + // + // Ideally the last page of the text segment would not be + // writable even for this short period. + va = uint64(Rnd(int64(va), *FlagRound)) + + order = append(order, &Segrodata) + Segrodata.Rwx = 04 + Segrodata.Vaddr = va + for _, s := range Segrodata.Sections { + va = uint64(Rnd(int64(va), int64(s.Align))) + s.Vaddr = va + va += s.Length + } + + Segrodata.Length = va - Segrodata.Vaddr + } + if len(Segrelrodata.Sections) > 0 { + // align to page boundary so as not to mix + // rodata, rel-ro data, and executable text. + va = uint64(Rnd(int64(va), *FlagRound)) + if ctxt.HeadType == objabi.Haix { + // Relro data are inside data segment on AIX. + va += uint64(XCOFFDATABASE) - uint64(XCOFFTEXTBASE) + } + + order = append(order, &Segrelrodata) + Segrelrodata.Rwx = 06 + Segrelrodata.Vaddr = va + for _, s := range Segrelrodata.Sections { + va = uint64(Rnd(int64(va), int64(s.Align))) + s.Vaddr = va + va += s.Length + } + + Segrelrodata.Length = va - Segrelrodata.Vaddr + } + + va = uint64(Rnd(int64(va), *FlagRound)) + if ctxt.HeadType == objabi.Haix && len(Segrelrodata.Sections) == 0 { + // Data sections are moved to an unreachable segment + // to ensure that they are position-independent. + // Already done if relro sections exist. + va += uint64(XCOFFDATABASE) - uint64(XCOFFTEXTBASE) + } + order = append(order, &Segdata) + Segdata.Rwx = 06 + Segdata.Vaddr = va + var data *sym.Section + var noptr *sym.Section + var bss *sym.Section + var noptrbss *sym.Section + var fuzzCounters *sym.Section + for i, s := range Segdata.Sections { + if (ctxt.IsELF || ctxt.HeadType == objabi.Haix) && s.Name == ".tbss" { + continue + } + vlen := int64(s.Length) + if i+1 < len(Segdata.Sections) && !((ctxt.IsELF || ctxt.HeadType == objabi.Haix) && Segdata.Sections[i+1].Name == ".tbss") { + vlen = int64(Segdata.Sections[i+1].Vaddr - s.Vaddr) + } + s.Vaddr = va + va += uint64(vlen) + Segdata.Length = va - Segdata.Vaddr + switch s.Name { + case ".data": + data = s + case ".noptrdata": + noptr = s + case ".bss": + bss = s + case ".noptrbss": + noptrbss = s + case ".go.fuzzcntrs": + fuzzCounters = s + } + } + + // Assign Segdata's Filelen omitting the BSS. We do this here + // simply because right now we know where the BSS starts. + Segdata.Filelen = bss.Vaddr - Segdata.Vaddr + + if len(Segpdata.Sections) > 0 { + va = uint64(Rnd(int64(va), *FlagRound)) + order = append(order, &Segpdata) + Segpdata.Rwx = 04 + Segpdata.Vaddr = va + // Segpdata.Sections is intended to contain just one section. + // Loop through the slice anyway for consistency. + for _, s := range Segpdata.Sections { + va = uint64(Rnd(int64(va), int64(s.Align))) + s.Vaddr = va + va += s.Length + } + Segpdata.Length = va - Segpdata.Vaddr + } + + if len(Segxdata.Sections) > 0 { + va = uint64(Rnd(int64(va), *FlagRound)) + order = append(order, &Segxdata) + Segxdata.Rwx = 04 + Segxdata.Vaddr = va + // Segxdata.Sections is intended to contain just one section. + // Loop through the slice anyway for consistency. + for _, s := range Segxdata.Sections { + va = uint64(Rnd(int64(va), int64(s.Align))) + s.Vaddr = va + va += s.Length + } + Segxdata.Length = va - Segxdata.Vaddr + } + + va = uint64(Rnd(int64(va), *FlagRound)) + order = append(order, &Segdwarf) + Segdwarf.Rwx = 06 + Segdwarf.Vaddr = va + for i, s := range Segdwarf.Sections { + vlen := int64(s.Length) + if i+1 < len(Segdwarf.Sections) { + vlen = int64(Segdwarf.Sections[i+1].Vaddr - s.Vaddr) + } + s.Vaddr = va + va += uint64(vlen) + if ctxt.HeadType == objabi.Hwindows { + va = uint64(Rnd(int64(va), PEFILEALIGN)) + } + Segdwarf.Length = va - Segdwarf.Vaddr + } + + ldr := ctxt.loader + var ( + rodata = ldr.SymSect(ldr.LookupOrCreateSym("runtime.rodata", 0)) + symtab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.symtab", 0)) + pclntab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0)) + types = ldr.SymSect(ldr.LookupOrCreateSym("runtime.types", 0)) + ) + + for _, s := range ctxt.datap { + if sect := ldr.SymSect(s); sect != nil { + ldr.AddToSymValue(s, int64(sect.Vaddr)) + } + v := ldr.SymValue(s) + for sub := ldr.SubSym(s); sub != 0; sub = ldr.SubSym(sub) { + ldr.AddToSymValue(sub, v) + } + } + + for _, si := range dwarfp { + for _, s := range si.syms { + if sect := ldr.SymSect(s); sect != nil { + ldr.AddToSymValue(s, int64(sect.Vaddr)) + } + sub := ldr.SubSym(s) + if sub != 0 { + panic(fmt.Sprintf("unexpected sub-sym for %s %s", ldr.SymName(s), ldr.SymType(s).String())) + } + v := ldr.SymValue(s) + for ; sub != 0; sub = ldr.SubSym(sub) { + ldr.AddToSymValue(s, v) + } + } + } + + for _, s := range sehp.pdata { + if sect := ldr.SymSect(s); sect != nil { + ldr.AddToSymValue(s, int64(sect.Vaddr)) + } + } + for _, s := range sehp.xdata { + if sect := ldr.SymSect(s); sect != nil { + ldr.AddToSymValue(s, int64(sect.Vaddr)) + } + } + + if ctxt.BuildMode == BuildModeShared { + s := ldr.LookupOrCreateSym("go:link.abihashbytes", 0) + sect := ldr.SymSect(ldr.LookupOrCreateSym(".note.go.abihash", 0)) + ldr.SetSymSect(s, sect) + ldr.SetSymValue(s, int64(sect.Vaddr+16)) + } + + // If there are multiple text sections, create runtime.text.n for + // their section Vaddr, using n for index + n := 1 + for _, sect := range Segtext.Sections[1:] { + if sect.Name != ".text" { + break + } + symname := fmt.Sprintf("runtime.text.%d", n) + if ctxt.HeadType != objabi.Haix || ctxt.LinkMode != LinkExternal { + // Addresses are already set on AIX with external linker + // because these symbols are part of their sections. + ctxt.xdefine(symname, sym.STEXT, int64(sect.Vaddr)) + } + n++ + } + + ctxt.xdefine("runtime.rodata", sym.SRODATA, int64(rodata.Vaddr)) + ctxt.xdefine("runtime.erodata", sym.SRODATA, int64(rodata.Vaddr+rodata.Length)) + ctxt.xdefine("runtime.types", sym.SRODATA, int64(types.Vaddr)) + ctxt.xdefine("runtime.etypes", sym.SRODATA, int64(types.Vaddr+types.Length)) + + s := ldr.Lookup("runtime.gcdata", 0) + ldr.SetAttrLocal(s, true) + ctxt.xdefine("runtime.egcdata", sym.SRODATA, ldr.SymAddr(s)+ldr.SymSize(s)) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.egcdata", 0), ldr.SymSect(s)) + + s = ldr.LookupOrCreateSym("runtime.gcbss", 0) + ldr.SetAttrLocal(s, true) + ctxt.xdefine("runtime.egcbss", sym.SRODATA, ldr.SymAddr(s)+ldr.SymSize(s)) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.egcbss", 0), ldr.SymSect(s)) + + ctxt.xdefine("runtime.symtab", sym.SRODATA, int64(symtab.Vaddr)) + ctxt.xdefine("runtime.esymtab", sym.SRODATA, int64(symtab.Vaddr+symtab.Length)) + ctxt.xdefine("runtime.pclntab", sym.SRODATA, int64(pclntab.Vaddr)) + ctxt.defineInternal("runtime.pcheader", sym.SRODATA) + ctxt.defineInternal("runtime.funcnametab", sym.SRODATA) + ctxt.defineInternal("runtime.cutab", sym.SRODATA) + ctxt.defineInternal("runtime.filetab", sym.SRODATA) + ctxt.defineInternal("runtime.pctab", sym.SRODATA) + ctxt.defineInternal("runtime.functab", sym.SRODATA) + ctxt.xdefine("runtime.epclntab", sym.SRODATA, int64(pclntab.Vaddr+pclntab.Length)) + ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr)) + ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr+noptr.Length)) + ctxt.xdefine("runtime.bss", sym.SBSS, int64(bss.Vaddr)) + ctxt.xdefine("runtime.ebss", sym.SBSS, int64(bss.Vaddr+bss.Length)) + ctxt.xdefine("runtime.data", sym.SDATA, int64(data.Vaddr)) + ctxt.xdefine("runtime.edata", sym.SDATA, int64(data.Vaddr+data.Length)) + ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, int64(noptrbss.Vaddr)) + ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, int64(noptrbss.Vaddr+noptrbss.Length)) + ctxt.xdefine("runtime.covctrs", sym.SCOVERAGE_COUNTER, int64(noptrbss.Vaddr+covCounterDataStartOff)) + ctxt.xdefine("runtime.ecovctrs", sym.SCOVERAGE_COUNTER, int64(noptrbss.Vaddr+covCounterDataStartOff+covCounterDataLen)) + ctxt.xdefine("runtime.end", sym.SBSS, int64(Segdata.Vaddr+Segdata.Length)) + + if fuzzCounters != nil { + ctxt.xdefine("runtime.__start___sancov_cntrs", sym.SLIBFUZZER_8BIT_COUNTER, int64(fuzzCounters.Vaddr)) + ctxt.xdefine("runtime.__stop___sancov_cntrs", sym.SLIBFUZZER_8BIT_COUNTER, int64(fuzzCounters.Vaddr+fuzzCounters.Length)) + ctxt.xdefine("internal/fuzz._counters", sym.SLIBFUZZER_8BIT_COUNTER, int64(fuzzCounters.Vaddr)) + ctxt.xdefine("internal/fuzz._ecounters", sym.SLIBFUZZER_8BIT_COUNTER, int64(fuzzCounters.Vaddr+fuzzCounters.Length)) + } + + if ctxt.IsSolaris() { + // On Solaris, in the runtime it sets the external names of the + // end symbols. Unset them and define separate symbols, so we + // keep both. + etext := ldr.Lookup("runtime.etext", 0) + edata := ldr.Lookup("runtime.edata", 0) + end := ldr.Lookup("runtime.end", 0) + ldr.SetSymExtname(etext, "runtime.etext") + ldr.SetSymExtname(edata, "runtime.edata") + ldr.SetSymExtname(end, "runtime.end") + ctxt.xdefine("_etext", ldr.SymType(etext), ldr.SymValue(etext)) + ctxt.xdefine("_edata", ldr.SymType(edata), ldr.SymValue(edata)) + ctxt.xdefine("_end", ldr.SymType(end), ldr.SymValue(end)) + ldr.SetSymSect(ldr.Lookup("_etext", 0), ldr.SymSect(etext)) + ldr.SetSymSect(ldr.Lookup("_edata", 0), ldr.SymSect(edata)) + ldr.SetSymSect(ldr.Lookup("_end", 0), ldr.SymSect(end)) + } + + if ctxt.IsPPC64() && ctxt.IsElf() { + // Resolve .TOC. symbols for all objects. Only one TOC region is supported. If a + // GOT section is present, compute it as suggested by the ELFv2 ABI. Otherwise, + // choose a similar offset from the start of the data segment. + tocAddr := int64(Segdata.Vaddr) + 0x8000 + if gotAddr := ldr.SymValue(ctxt.GOT); gotAddr != 0 { + tocAddr = gotAddr + 0x8000 + } + for i := range ctxt.DotTOC { + if i >= sym.SymVerABICount && i < sym.SymVerStatic { // these versions are not used currently + continue + } + if toc := ldr.Lookup(".TOC.", i); toc != 0 { + ldr.SetSymValue(toc, tocAddr) + } + } + } + + return order +} + +// layout assigns file offsets and lengths to the segments in order. +// Returns the file size containing all the segments. +func (ctxt *Link) layout(order []*sym.Segment) uint64 { + var prev *sym.Segment + for _, seg := range order { + if prev == nil { + seg.Fileoff = uint64(HEADR) + } else { + switch ctxt.HeadType { + default: + // Assuming the previous segment was + // aligned, the following rounding + // should ensure that this segment's + // VA ≡ Fileoff mod FlagRound. + seg.Fileoff = uint64(Rnd(int64(prev.Fileoff+prev.Filelen), *FlagRound)) + if seg.Vaddr%uint64(*FlagRound) != seg.Fileoff%uint64(*FlagRound) { + Exitf("bad segment rounding (Vaddr=%#x Fileoff=%#x FlagRound=%#x)", seg.Vaddr, seg.Fileoff, *FlagRound) + } + case objabi.Hwindows: + seg.Fileoff = prev.Fileoff + uint64(Rnd(int64(prev.Filelen), PEFILEALIGN)) + case objabi.Hplan9: + seg.Fileoff = prev.Fileoff + prev.Filelen + } + } + if seg != &Segdata { + // Link.address already set Segdata.Filelen to + // account for BSS. + seg.Filelen = seg.Length + } + prev = seg + } + return prev.Fileoff + prev.Filelen +} + +// add a trampoline with symbol s (to be laid down after the current function) +func (ctxt *Link) AddTramp(s *loader.SymbolBuilder) { + s.SetType(sym.STEXT) + s.SetReachable(true) + s.SetOnList(true) + ctxt.tramps = append(ctxt.tramps, s.Sym()) + if *FlagDebugTramp > 0 && ctxt.Debugvlog > 0 { + ctxt.Logf("trampoline %s inserted\n", s.Name()) + } +} + +// compressSyms compresses syms and returns the contents of the +// compressed section. If the section would get larger, it returns nil. +func compressSyms(ctxt *Link, syms []loader.Sym) []byte { + ldr := ctxt.loader + var total int64 + for _, sym := range syms { + total += ldr.SymSize(sym) + } + + var buf bytes.Buffer + if ctxt.IsELF { + switch ctxt.Arch.PtrSize { + case 8: + binary.Write(&buf, ctxt.Arch.ByteOrder, elf.Chdr64{ + Type: uint32(elf.COMPRESS_ZLIB), + Size: uint64(total), + Addralign: uint64(ctxt.Arch.Alignment), + }) + case 4: + binary.Write(&buf, ctxt.Arch.ByteOrder, elf.Chdr32{ + Type: uint32(elf.COMPRESS_ZLIB), + Size: uint32(total), + Addralign: uint32(ctxt.Arch.Alignment), + }) + default: + log.Fatalf("can't compress header size:%d", ctxt.Arch.PtrSize) + } + } else { + buf.Write([]byte("ZLIB")) + var sizeBytes [8]byte + binary.BigEndian.PutUint64(sizeBytes[:], uint64(total)) + buf.Write(sizeBytes[:]) + } + + var relocbuf []byte // temporary buffer for applying relocations + + // Using zlib.BestSpeed achieves very nearly the same + // compression levels of zlib.DefaultCompression, but takes + // substantially less time. This is important because DWARF + // compression can be a significant fraction of link time. + z, err := zlib.NewWriterLevel(&buf, zlib.BestSpeed) + if err != nil { + log.Fatalf("NewWriterLevel failed: %s", err) + } + st := ctxt.makeRelocSymState() + for _, s := range syms { + // Symbol data may be read-only. Apply relocations in a + // temporary buffer, and immediately write it out. + P := ldr.Data(s) + relocs := ldr.Relocs(s) + if relocs.Count() != 0 { + relocbuf = append(relocbuf[:0], P...) + P = relocbuf + st.relocsym(s, P) + } + if _, err := z.Write(P); err != nil { + log.Fatalf("compression failed: %s", err) + } + for i := ldr.SymSize(s) - int64(len(P)); i > 0; { + b := zeros[:] + if i < int64(len(b)) { + b = b[:i] + } + n, err := z.Write(b) + if err != nil { + log.Fatalf("compression failed: %s", err) + } + i -= int64(n) + } + } + if err := z.Close(); err != nil { + log.Fatalf("compression failed: %s", err) + } + if int64(buf.Len()) >= total { + // Compression didn't save any space. + return nil + } + return buf.Bytes() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2c22cfeb014de0f9caee765d63677b0b0369b0d3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/data_test.go @@ -0,0 +1,92 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "internal/buildcfg" + "testing" +) + +func setUpContext(arch *sys.Arch, iself bool, ht objabi.HeadType, bm, lm string) *Link { + ctxt := linknew(arch) + ctxt.HeadType = ht + er := loader.ErrorReporter{} + ctxt.loader = loader.NewLoader(0, &er) + ctxt.BuildMode.Set(bm) + ctxt.LinkMode.Set(lm) + ctxt.IsELF = iself + ctxt.mustSetHeadType() + ctxt.setArchSyms() + return ctxt +} + +// Make sure the addgotsym properly increases the symbols. +func TestAddGotSym(t *testing.T) { + tests := []struct { + arch *sys.Arch + ht objabi.HeadType + bm, lm string + rel string + relsize int + gotsize int + }{ + { + arch: sys.Arch386, + ht: objabi.Hlinux, + bm: "pie", + lm: "internal", + rel: ".rel", + relsize: 2 * sys.Arch386.PtrSize, + gotsize: sys.Arch386.PtrSize, + }, + { + arch: sys.ArchAMD64, + ht: objabi.Hlinux, + bm: "pie", + lm: "internal", + rel: ".rela", + relsize: 3 * sys.ArchAMD64.PtrSize, + gotsize: sys.ArchAMD64.PtrSize, + }, + { + arch: sys.ArchAMD64, + ht: objabi.Hdarwin, + bm: "pie", + lm: "external", + gotsize: sys.ArchAMD64.PtrSize, + }, + } + + // Save the architecture as we're going to set it on each test run. + origArch := buildcfg.GOARCH + defer func() { + buildcfg.GOARCH = origArch + }() + + for i, test := range tests { + iself := len(test.rel) != 0 + buildcfg.GOARCH = test.arch.Name + ctxt := setUpContext(test.arch, iself, test.ht, test.bm, test.lm) + foo := ctxt.loader.CreateSymForUpdate("foo", 0) + ctxt.loader.CreateExtSym("bar", 0) + AddGotSym(&ctxt.Target, ctxt.loader, &ctxt.ArchSyms, foo.Sym(), 0) + + if iself { + rel := ctxt.loader.Lookup(test.rel, 0) + if rel == 0 { + t.Fatalf("[%d] could not find symbol: %q", i, test.rel) + } + if s := ctxt.loader.SymSize(rel); s != int64(test.relsize) { + t.Fatalf("[%d] expected ldr.Size(%q) == %v, got %v", i, test.rel, test.relsize, s) + } + } + if s := ctxt.loader.SymSize(ctxt.loader.Lookup(".got", 0)); s != int64(test.gotsize) { + t.Fatalf(`[%d] expected ldr.Size(".got") == %v, got %v`, i, test.gotsize, s) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode.go new file mode 100644 index 0000000000000000000000000000000000000000..70b4a7ca30327d752a735b655fc2419a8af2a12b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode.go @@ -0,0 +1,561 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "internal/buildcfg" + "strings" + "unicode" +) + +var _ = fmt.Print + +type deadcodePass struct { + ctxt *Link + ldr *loader.Loader + wq heap // work queue, using min-heap for better locality + + ifaceMethod map[methodsig]bool // methods called from reached interface call sites + genericIfaceMethod map[string]bool // names of methods called from reached generic interface call sites + markableMethods []methodref // methods of reached types + reflectSeen bool // whether we have seen a reflect method call + dynlink bool + + methodsigstmp []methodsig // scratch buffer for decoding method signatures + pkginits []loader.Sym + mapinitnoop loader.Sym +} + +func (d *deadcodePass) init() { + d.ldr.InitReachable() + d.ifaceMethod = make(map[methodsig]bool) + d.genericIfaceMethod = make(map[string]bool) + if buildcfg.Experiment.FieldTrack { + d.ldr.Reachparent = make([]loader.Sym, d.ldr.NSym()) + } + d.dynlink = d.ctxt.DynlinkingGo() + + if d.ctxt.BuildMode == BuildModeShared { + // Mark all symbols defined in this library as reachable when + // building a shared library. + n := d.ldr.NDef() + for i := 1; i < n; i++ { + s := loader.Sym(i) + d.mark(s, 0) + } + d.mark(d.ctxt.mainInittasks, 0) + return + } + + var names []string + + // In a normal binary, start at main.main and the init + // functions and mark what is reachable from there. + if d.ctxt.linkShared && (d.ctxt.BuildMode == BuildModeExe || d.ctxt.BuildMode == BuildModePIE) { + names = append(names, "main.main", "main..inittask") + } else { + // The external linker refers main symbol directly. + if d.ctxt.LinkMode == LinkExternal && (d.ctxt.BuildMode == BuildModeExe || d.ctxt.BuildMode == BuildModePIE) { + if d.ctxt.HeadType == objabi.Hwindows && d.ctxt.Arch.Family == sys.I386 { + *flagEntrySymbol = "_main" + } else { + *flagEntrySymbol = "main" + } + } + names = append(names, *flagEntrySymbol) + } + // runtime.unreachableMethod is a function that will throw if called. + // We redirect unreachable methods to it. + names = append(names, "runtime.unreachableMethod") + if d.ctxt.BuildMode == BuildModePlugin { + names = append(names, objabi.PathToPrefix(*flagPluginPath)+"..inittask", objabi.PathToPrefix(*flagPluginPath)+".main", "go:plugin.tabs") + + // We don't keep the go.plugin.exports symbol, + // but we do keep the symbols it refers to. + exportsIdx := d.ldr.Lookup("go:plugin.exports", 0) + if exportsIdx != 0 { + relocs := d.ldr.Relocs(exportsIdx) + for i := 0; i < relocs.Count(); i++ { + d.mark(relocs.At(i).Sym(), 0) + } + } + } + + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("deadcode start names: %v\n", names) + } + + for _, name := range names { + // Mark symbol as a data/ABI0 symbol. + d.mark(d.ldr.Lookup(name, 0), 0) + if abiInternalVer != 0 { + // Also mark any Go functions (internal ABI). + d.mark(d.ldr.Lookup(name, abiInternalVer), 0) + } + } + + // All dynamic exports are roots. + for _, s := range d.ctxt.dynexp { + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("deadcode start dynexp: %s<%d>\n", d.ldr.SymName(s), d.ldr.SymVersion(s)) + } + d.mark(s, 0) + } + + d.mapinitnoop = d.ldr.Lookup("runtime.mapinitnoop", abiInternalVer) + if d.mapinitnoop == 0 { + panic("could not look up runtime.mapinitnoop") + } + if d.ctxt.mainInittasks != 0 { + d.mark(d.ctxt.mainInittasks, 0) + } +} + +func (d *deadcodePass) flood() { + var methods []methodref + for !d.wq.empty() { + symIdx := d.wq.pop() + + // Methods may be called via reflection. Give up on static analysis, + // and mark all exported methods of all reachable types as reachable. + d.reflectSeen = d.reflectSeen || d.ldr.IsReflectMethod(symIdx) + + isgotype := d.ldr.IsGoType(symIdx) + relocs := d.ldr.Relocs(symIdx) + var usedInIface bool + + if isgotype { + if d.dynlink { + // When dynamic linking, a type may be passed across DSO + // boundary and get converted to interface at the other side. + d.ldr.SetAttrUsedInIface(symIdx, true) + } + usedInIface = d.ldr.AttrUsedInIface(symIdx) + } + + methods = methods[:0] + for i := 0; i < relocs.Count(); i++ { + r := relocs.At(i) + if r.Weak() { + convertWeakToStrong := false + // When build with "-linkshared", we can't tell if the + // interface method in itab will be used or not. + // Ignore the weak attribute. + if d.ctxt.linkShared && d.ldr.IsItab(symIdx) { + convertWeakToStrong = true + } + // If the program uses plugins, we can no longer treat + // relocs from pkg init functions to outlined map init + // fragments as weak, since doing so can cause package + // init clashes between the main program and the + // plugin. See #62430 for more details. + if d.ctxt.canUsePlugins && r.Type().IsDirectCall() { + convertWeakToStrong = true + } + if !convertWeakToStrong { + // skip this reloc + continue + } + } + t := r.Type() + switch t { + case objabi.R_METHODOFF: + if i+2 >= relocs.Count() { + panic("expect three consecutive R_METHODOFF relocs") + } + if usedInIface { + methods = append(methods, methodref{src: symIdx, r: i}) + // The method descriptor is itself a type descriptor, and + // it can be used to reach other types, e.g. by using + // reflect.Type.Method(i).Type.In(j). We need to traverse + // its child types with UsedInIface set. (See also the + // comment below.) + rs := r.Sym() + if !d.ldr.AttrUsedInIface(rs) { + d.ldr.SetAttrUsedInIface(rs, true) + if d.ldr.AttrReachable(rs) { + d.ldr.SetAttrReachable(rs, false) + d.mark(rs, symIdx) + } + } + } + i += 2 + continue + case objabi.R_USETYPE: + // type symbol used for DWARF. we need to load the symbol but it may not + // be otherwise reachable in the program. + // do nothing for now as we still load all type symbols. + continue + case objabi.R_USEIFACE: + // R_USEIFACE is a marker relocation that tells the linker the type is + // converted to an interface, i.e. should have UsedInIface set. See the + // comment below for why we need to unset the Reachable bit and re-mark it. + rs := r.Sym() + if d.ldr.IsItab(rs) { + // This relocation can also point at an itab, in which case it + // means "the _type field of that itab". + rs = decodeItabType(d.ldr, d.ctxt.Arch, rs) + } + if !d.ldr.IsGoType(rs) && !d.ctxt.linkShared { + panic(fmt.Sprintf("R_USEIFACE in %s references %s which is not a type or itab", d.ldr.SymName(symIdx), d.ldr.SymName(rs))) + } + if !d.ldr.AttrUsedInIface(rs) { + d.ldr.SetAttrUsedInIface(rs, true) + if d.ldr.AttrReachable(rs) { + d.ldr.SetAttrReachable(rs, false) + d.mark(rs, symIdx) + } + } + continue + case objabi.R_USEIFACEMETHOD: + // R_USEIFACEMETHOD is a marker relocation that marks an interface + // method as used. + rs := r.Sym() + if d.ctxt.linkShared && (d.ldr.SymType(rs) == sym.SDYNIMPORT || d.ldr.SymType(rs) == sym.Sxxx) { + // Don't decode symbol from shared library (we'll mark all exported methods anyway). + // We check for both SDYNIMPORT and Sxxx because name-mangled symbols haven't + // been resolved at this point. + continue + } + m := d.decodeIfaceMethod(d.ldr, d.ctxt.Arch, rs, r.Add()) + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("reached iface method: %v\n", m) + } + d.ifaceMethod[m] = true + continue + case objabi.R_USENAMEDMETHOD: + name := d.decodeGenericIfaceMethod(d.ldr, r.Sym()) + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("reached generic iface method: %s\n", name) + } + d.genericIfaceMethod[name] = true + continue // don't mark referenced symbol - it is not needed in the final binary. + case objabi.R_INITORDER: + // inittasks has already run, so any R_INITORDER links are now + // superfluous - the only live inittask records are those which are + // in a scheduled list somewhere (e.g. runtime.moduledata.inittasks). + continue + } + rs := r.Sym() + if isgotype && usedInIface && d.ldr.IsGoType(rs) && !d.ldr.AttrUsedInIface(rs) { + // If a type is converted to an interface, it is possible to obtain an + // interface with a "child" type of it using reflection (e.g. obtain an + // interface of T from []chan T). We need to traverse its "child" types + // with UsedInIface attribute set. + // When visiting the child type (chan T in the example above), it will + // have UsedInIface set, so it in turn will mark and (re)visit its children + // (e.g. T above). + // We unset the reachable bit here, so if the child type is already visited, + // it will be visited again. + // Note that a type symbol can be visited at most twice, one without + // UsedInIface and one with. So termination is still guaranteed. + d.ldr.SetAttrUsedInIface(rs, true) + d.ldr.SetAttrReachable(rs, false) + } + d.mark(rs, symIdx) + } + naux := d.ldr.NAux(symIdx) + for i := 0; i < naux; i++ { + a := d.ldr.Aux(symIdx, i) + if a.Type() == goobj.AuxGotype { + // A symbol being reachable doesn't imply we need its + // type descriptor. Don't mark it. + continue + } + d.mark(a.Sym(), symIdx) + } + // Record sym if package init func (here naux != 0 is a cheap way + // to check first if it is a function symbol). + if naux != 0 && d.ldr.IsPkgInit(symIdx) { + + d.pkginits = append(d.pkginits, symIdx) + } + // Some host object symbols have an outer object, which acts like a + // "carrier" symbol, or it holds all the symbols for a particular + // section. We need to mark all "referenced" symbols from that carrier, + // so we make sure we're pulling in all outer symbols, and their sub + // symbols. This is not ideal, and these carrier/section symbols could + // be removed. + if d.ldr.IsExternal(symIdx) { + d.mark(d.ldr.OuterSym(symIdx), symIdx) + d.mark(d.ldr.SubSym(symIdx), symIdx) + } + + if len(methods) != 0 { + if !isgotype { + panic("method found on non-type symbol") + } + // Decode runtime type information for type methods + // to help work out which methods can be called + // dynamically via interfaces. + methodsigs := d.decodetypeMethods(d.ldr, d.ctxt.Arch, symIdx, &relocs) + if len(methods) != len(methodsigs) { + panic(fmt.Sprintf("%q has %d method relocations for %d methods", d.ldr.SymName(symIdx), len(methods), len(methodsigs))) + } + for i, m := range methodsigs { + methods[i].m = m + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("markable method: %v of sym %v %s\n", m, symIdx, d.ldr.SymName(symIdx)) + } + } + d.markableMethods = append(d.markableMethods, methods...) + } + } +} + +// mapinitcleanup walks all pkg init functions and looks for weak relocations +// to mapinit symbols that are no longer reachable. It rewrites +// the relocs to target a new no-op routine in the runtime. +func (d *deadcodePass) mapinitcleanup() { + for _, idx := range d.pkginits { + relocs := d.ldr.Relocs(idx) + var su *loader.SymbolBuilder + for i := 0; i < relocs.Count(); i++ { + r := relocs.At(i) + rs := r.Sym() + if r.Weak() && r.Type().IsDirectCall() && !d.ldr.AttrReachable(rs) { + // double check to make sure target is indeed map.init + rsn := d.ldr.SymName(rs) + if !strings.Contains(rsn, "map.init") { + panic(fmt.Sprintf("internal error: expected map.init sym for weak call reloc, got %s -> %s", d.ldr.SymName(idx), rsn)) + } + d.ldr.SetAttrReachable(d.mapinitnoop, true) + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("deadcode: %s rewrite %s ref to %s\n", + d.ldr.SymName(idx), rsn, + d.ldr.SymName(d.mapinitnoop)) + } + if su == nil { + su = d.ldr.MakeSymbolUpdater(idx) + } + su.SetRelocSym(i, d.mapinitnoop) + } + } + } +} + +func (d *deadcodePass) mark(symIdx, parent loader.Sym) { + if symIdx != 0 && !d.ldr.AttrReachable(symIdx) { + d.wq.push(symIdx) + d.ldr.SetAttrReachable(symIdx, true) + if buildcfg.Experiment.FieldTrack && d.ldr.Reachparent[symIdx] == 0 { + d.ldr.Reachparent[symIdx] = parent + } + if *flagDumpDep { + to := d.ldr.SymName(symIdx) + if to != "" { + to = d.dumpDepAddFlags(to, symIdx) + from := "_" + if parent != 0 { + from = d.ldr.SymName(parent) + from = d.dumpDepAddFlags(from, parent) + } + fmt.Printf("%s -> %s\n", from, to) + } + } + } +} + +func (d *deadcodePass) dumpDepAddFlags(name string, symIdx loader.Sym) string { + var flags strings.Builder + if d.ldr.AttrUsedInIface(symIdx) { + flags.WriteString("") + } + if d.ldr.IsReflectMethod(symIdx) { + flags.WriteString("") + } + if flags.Len() > 0 { + return name + " " + flags.String() + } + return name +} + +func (d *deadcodePass) markMethod(m methodref) { + relocs := d.ldr.Relocs(m.src) + d.mark(relocs.At(m.r).Sym(), m.src) + d.mark(relocs.At(m.r+1).Sym(), m.src) + d.mark(relocs.At(m.r+2).Sym(), m.src) +} + +// deadcode marks all reachable symbols. +// +// The basis of the dead code elimination is a flood fill of symbols, +// following their relocations, beginning at *flagEntrySymbol. +// +// This flood fill is wrapped in logic for pruning unused methods. +// All methods are mentioned by relocations on their receiver's *rtype. +// These relocations are specially defined as R_METHODOFF by the compiler +// so we can detect and manipulated them here. +// +// There are three ways a method of a reachable type can be invoked: +// +// 1. direct call +// 2. through a reachable interface type +// 3. reflect.Value.Method (or MethodByName), or reflect.Type.Method +// (or MethodByName) +// +// The first case is handled by the flood fill, a directly called method +// is marked as reachable. +// +// The second case is handled by decomposing all reachable interface +// types into method signatures. Each encountered method is compared +// against the interface method signatures, if it matches it is marked +// as reachable. This is extremely conservative, but easy and correct. +// +// The third case is handled by looking for functions that compiler flagged +// as REFLECTMETHOD. REFLECTMETHOD on a function F means that F does a method +// lookup with reflection, but the compiler was not able to statically determine +// the method name. +// +// All functions that call reflect.Value.Method or reflect.Type.Method are REFLECTMETHODs. +// Functions that call reflect.Value.MethodByName or reflect.Type.MethodByName with +// a non-constant argument are REFLECTMETHODs, too. If we find a REFLECTMETHOD, +// we give up on static analysis, and mark all exported methods of all reachable +// types as reachable. +// +// If the argument to MethodByName is a compile-time constant, the compiler +// emits a relocation with the method name. Matching methods are kept in all +// reachable types. +// +// Any unreached text symbols are removed from ctxt.Textp. +func deadcode(ctxt *Link) { + ldr := ctxt.loader + d := deadcodePass{ctxt: ctxt, ldr: ldr} + d.init() + d.flood() + + if ctxt.DynlinkingGo() { + // Exported methods may satisfy interfaces we don't know + // about yet when dynamically linking. + d.reflectSeen = true + } + + for { + // Mark all methods that could satisfy a discovered + // interface as reachable. We recheck old marked interfaces + // as new types (with new methods) may have been discovered + // in the last pass. + rem := d.markableMethods[:0] + for _, m := range d.markableMethods { + if (d.reflectSeen && (m.isExported() || d.dynlink)) || d.ifaceMethod[m.m] || d.genericIfaceMethod[m.m.name] { + d.markMethod(m) + } else { + rem = append(rem, m) + } + } + d.markableMethods = rem + + if d.wq.empty() { + // No new work was discovered. Done. + break + } + d.flood() + } + if *flagPruneWeakMap { + d.mapinitcleanup() + } +} + +// methodsig is a typed method signature (name + type). +type methodsig struct { + name string + typ loader.Sym // type descriptor symbol of the function +} + +// methodref holds the relocations from a receiver type symbol to its +// method. There are three relocations, one for each of the fields in +// the reflect.method struct: mtyp, ifn, and tfn. +type methodref struct { + m methodsig + src loader.Sym // receiver type symbol + r int // the index of R_METHODOFF relocations +} + +func (m methodref) isExported() bool { + for _, r := range m.m.name { + return unicode.IsUpper(r) + } + panic("methodref has no signature") +} + +// decodeMethodSig decodes an array of method signature information. +// Each element of the array is size bytes. The first 4 bytes is a +// nameOff for the method name, and the next 4 bytes is a typeOff for +// the function type. +// +// Conveniently this is the layout of both runtime.method and runtime.imethod. +func (d *deadcodePass) decodeMethodSig(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, off, size, count int) []methodsig { + if cap(d.methodsigstmp) < count { + d.methodsigstmp = append(d.methodsigstmp[:0], make([]methodsig, count)...) + } + var methods = d.methodsigstmp[:count] + for i := 0; i < count; i++ { + methods[i].name = decodetypeName(ldr, symIdx, relocs, off) + methods[i].typ = decodeRelocSym(ldr, symIdx, relocs, int32(off+4)) + off += size + } + return methods +} + +// Decode the method of interface type symbol symIdx at offset off. +func (d *deadcodePass) decodeIfaceMethod(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, off int64) methodsig { + p := ldr.Data(symIdx) + if p == nil { + panic(fmt.Sprintf("missing symbol %q", ldr.SymName(symIdx))) + } + if decodetypeKind(arch, p)&kindMask != kindInterface { + panic(fmt.Sprintf("symbol %q is not an interface", ldr.SymName(symIdx))) + } + relocs := ldr.Relocs(symIdx) + var m methodsig + m.name = decodetypeName(ldr, symIdx, &relocs, int(off)) + m.typ = decodeRelocSym(ldr, symIdx, &relocs, int32(off+4)) + return m +} + +// Decode the method name stored in symbol symIdx. The symbol should contain just the bytes of a method name. +func (d *deadcodePass) decodeGenericIfaceMethod(ldr *loader.Loader, symIdx loader.Sym) string { + return ldr.DataString(symIdx) +} + +func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs) []methodsig { + p := ldr.Data(symIdx) + if !decodetypeHasUncommon(arch, p) { + panic(fmt.Sprintf("no methods on %q", ldr.SymName(symIdx))) + } + off := commonsize(arch) // reflect.rtype + switch decodetypeKind(arch, p) & kindMask { + case kindStruct: // reflect.structType + off += 4 * arch.PtrSize + case kindPtr: // reflect.ptrType + off += arch.PtrSize + case kindFunc: // reflect.funcType + off += arch.PtrSize // 4 bytes, pointer aligned + case kindSlice: // reflect.sliceType + off += arch.PtrSize + case kindArray: // reflect.arrayType + off += 3 * arch.PtrSize + case kindChan: // reflect.chanType + off += 2 * arch.PtrSize + case kindMap: // reflect.mapType + off += 4*arch.PtrSize + 8 + case kindInterface: // reflect.interfaceType + off += 3 * arch.PtrSize + default: + // just Sizeof(rtype) + } + + mcount := int(decodeInuxi(arch, p[off+4:], 2)) + moff := int(decodeInuxi(arch, p[off+4+2+2:], 4)) + off += moff // offset to array of reflect.method values + const sizeofMethod = 4 * 4 // sizeof reflect.method in program + return d.decodeMethodSig(ldr, arch, symIdx, relocs, off, sizeofMethod, mcount) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6962a4eba01195247ff3138ddec1c23d855d684a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/deadcode_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "internal/testenv" + "path/filepath" + "testing" +) + +func TestDeadcode(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + tmpdir := t.TempDir() + + tests := []struct { + src string + pos, neg []string // positive and negative patterns + }{ + {"reflectcall", nil, []string{"main.T.M"}}, + {"typedesc", nil, []string{"type:main.T"}}, + {"ifacemethod", nil, []string{"main.T.M"}}, + {"ifacemethod2", []string{"main.T.M"}, nil}, + {"ifacemethod3", []string{"main.S.M"}, nil}, + {"ifacemethod4", nil, []string{"main.T.M"}}, + {"ifacemethod5", []string{"main.S.M"}, nil}, + {"ifacemethod6", []string{"main.S.M"}, []string{"main.S.N"}}, + {"structof_funcof", []string{"main.S.M"}, []string{"main.S.N"}}, + {"globalmap", []string{"main.small", "main.effect"}, + []string{"main.large"}}, + } + for _, test := range tests { + test := test + t.Run(test.src, func(t *testing.T) { + t.Parallel() + src := filepath.Join("testdata", "deadcode", test.src+".go") + exe := filepath.Join(tmpdir, test.src+".exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-dumpdep", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + for _, pos := range test.pos { + if !bytes.Contains(out, []byte(pos+"\n")) { + t.Errorf("%s should be reachable. Output:\n%s", pos, out) + } + } + for _, neg := range test.neg { + if bytes.Contains(out, []byte(neg+"\n")) { + t.Errorf("%s should not be reachable. Output:\n%s", neg, out) + } + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/decodesym.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/decodesym.go new file mode 100644 index 0000000000000000000000000000000000000000..05da11ec1e993c92260bdb2d7fbc92bcf25b0c74 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/decodesym.go @@ -0,0 +1,308 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "encoding/binary" + "internal/abi" + "log" +) + +// Decoding the type.* symbols. This has to be in sync with +// ../../runtime/type.go, or more specifically, with what +// cmd/compile/internal/reflectdata/reflect.go stuffs in these. + +func decodeInuxi(arch *sys.Arch, p []byte, sz int) uint64 { + switch sz { + case 2: + return uint64(arch.ByteOrder.Uint16(p)) + case 4: + return uint64(arch.ByteOrder.Uint32(p)) + case 8: + return arch.ByteOrder.Uint64(p) + default: + Exitf("dwarf: decode inuxi %d", sz) + panic("unreachable") + } +} + +func commonsize(arch *sys.Arch) int { return abi.CommonSize(arch.PtrSize) } // runtime._type +func structfieldSize(arch *sys.Arch) int { return abi.StructFieldSize(arch.PtrSize) } // runtime.structfield +func uncommonSize(arch *sys.Arch) int { return int(abi.UncommonSize()) } // runtime.uncommontype + +// Type.commonType.kind +func decodetypeKind(arch *sys.Arch, p []byte) uint8 { + return p[2*arch.PtrSize+7] & objabi.KindMask // 0x13 / 0x1f +} + +// Type.commonType.kind +func decodetypeUsegcprog(arch *sys.Arch, p []byte) uint8 { + return p[2*arch.PtrSize+7] & objabi.KindGCProg // 0x13 / 0x1f +} + +// Type.commonType.size +func decodetypeSize(arch *sys.Arch, p []byte) int64 { + return int64(decodeInuxi(arch, p, arch.PtrSize)) // 0x8 / 0x10 +} + +// Type.commonType.ptrdata +func decodetypePtrdata(arch *sys.Arch, p []byte) int64 { + return int64(decodeInuxi(arch, p[arch.PtrSize:], arch.PtrSize)) // 0x8 / 0x10 +} + +// Type.commonType.tflag +func decodetypeHasUncommon(arch *sys.Arch, p []byte) bool { + return abi.TFlag(p[abi.TFlagOff(arch.PtrSize)])&abi.TFlagUncommon != 0 +} + +// Type.FuncType.dotdotdot +func decodetypeFuncDotdotdot(arch *sys.Arch, p []byte) bool { + return uint16(decodeInuxi(arch, p[commonsize(arch)+2:], 2))&(1<<15) != 0 +} + +// Type.FuncType.inCount +func decodetypeFuncInCount(arch *sys.Arch, p []byte) int { + return int(decodeInuxi(arch, p[commonsize(arch):], 2)) +} + +func decodetypeFuncOutCount(arch *sys.Arch, p []byte) int { + return int(uint16(decodeInuxi(arch, p[commonsize(arch)+2:], 2)) & (1<<15 - 1)) +} + +// InterfaceType.methods.length +func decodetypeIfaceMethodCount(arch *sys.Arch, p []byte) int64 { + return int64(decodeInuxi(arch, p[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) +} + +// Matches runtime/typekind.go and reflect.Kind. +const ( + kindArray = 17 + kindChan = 18 + kindFunc = 19 + kindInterface = 20 + kindMap = 21 + kindPtr = 22 + kindSlice = 23 + kindStruct = 25 + kindMask = (1 << 5) - 1 +) + +func decodeReloc(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int32) loader.Reloc { + for j := 0; j < relocs.Count(); j++ { + rel := relocs.At(j) + if rel.Off() == off { + return rel + } + } + return loader.Reloc{} +} + +func decodeRelocSym(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int32) loader.Sym { + return decodeReloc(ldr, symIdx, relocs, off).Sym() +} + +// decodetypeName decodes the name from a reflect.name. +func decodetypeName(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int) string { + r := decodeRelocSym(ldr, symIdx, relocs, int32(off)) + if r == 0 { + return "" + } + + data := ldr.DataString(r) + n := 1 + binary.MaxVarintLen64 + if len(data) < n { + n = len(data) + } + nameLen, nameLenLen := binary.Uvarint([]byte(data[1:n])) + return data[1+nameLenLen : 1+nameLenLen+int(nameLen)] +} + +func decodetypeNameEmbedded(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int) bool { + r := decodeRelocSym(ldr, symIdx, relocs, int32(off)) + if r == 0 { + return false + } + data := ldr.Data(r) + return data[0]&(1<<3) != 0 +} + +func decodetypeFuncInType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym { + uadd := commonsize(arch) + 4 + if arch.PtrSize == 8 { + uadd += 4 + } + if decodetypeHasUncommon(arch, ldr.Data(symIdx)) { + uadd += uncommonSize(arch) + } + return decodeRelocSym(ldr, symIdx, relocs, int32(uadd+i*arch.PtrSize)) +} + +func decodetypeFuncOutType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym { + return decodetypeFuncInType(ldr, arch, symIdx, relocs, i+decodetypeFuncInCount(arch, ldr.Data(symIdx))) +} + +func decodetypeArrayElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30 +} + +func decodetypeArrayLen(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) int64 { + data := ldr.Data(symIdx) + return int64(decodeInuxi(arch, data[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) +} + +func decodetypeChanElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30 +} + +func decodetypeMapKey(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30 +} + +func decodetypeMapValue(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38 +} + +func decodetypePtrElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30 +} + +func decodetypeStructFieldCount(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) int { + data := ldr.Data(symIdx) + return int(decodeInuxi(arch, data[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) +} + +func decodetypeStructFieldArrayOff(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int { + data := ldr.Data(symIdx) + off := commonsize(arch) + 4*arch.PtrSize + if decodetypeHasUncommon(arch, data) { + off += uncommonSize(arch) + } + off += i * structfieldSize(arch) + return off +} + +func decodetypeStructFieldName(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) string { + off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i) + relocs := ldr.Relocs(symIdx) + return decodetypeName(ldr, symIdx, &relocs, off) +} + +func decodetypeStructFieldType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) loader.Sym { + off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i) + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(off+arch.PtrSize)) +} + +func decodetypeStructFieldOffset(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 { + off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i) + data := ldr.Data(symIdx) + return int64(decodeInuxi(arch, data[off+2*arch.PtrSize:], arch.PtrSize)) +} + +func decodetypeStructFieldEmbedded(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) bool { + off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i) + relocs := ldr.Relocs(symIdx) + return decodetypeNameEmbedded(ldr, symIdx, &relocs, off) +} + +// decodetypeStr returns the contents of an rtype's str field (a nameOff). +func decodetypeStr(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) string { + relocs := ldr.Relocs(symIdx) + str := decodetypeName(ldr, symIdx, &relocs, 4*arch.PtrSize+8) + data := ldr.Data(symIdx) + if data[abi.TFlagOff(arch.PtrSize)]&byte(abi.TFlagExtraStar) != 0 { + return str[1:] + } + return str +} + +func decodetypeGcmask(ctxt *Link, s loader.Sym) []byte { + if ctxt.loader.SymType(s) == sym.SDYNIMPORT { + symData := ctxt.loader.Data(s) + addr := decodetypeGcprogShlib(ctxt, symData) + ptrdata := decodetypePtrdata(ctxt.Arch, symData) + sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr) + if sect != nil { + bits := ptrdata / int64(ctxt.Arch.PtrSize) + r := make([]byte, (bits+7)/8) + // ldshlibsyms avoids closing the ELF file so sect.ReadAt works. + // If we remove this read (and the ones in decodetypeGcprog), we + // can close the file. + _, err := sect.ReadAt(r, int64(addr-sect.Addr)) + if err != nil { + log.Fatal(err) + } + return r + } + Exitf("cannot find gcmask for %s", ctxt.loader.SymName(s)) + return nil + } + relocs := ctxt.loader.Relocs(s) + mask := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize)) + return ctxt.loader.Data(mask) +} + +// Type.commonType.gc +func decodetypeGcprog(ctxt *Link, s loader.Sym) []byte { + if ctxt.loader.SymType(s) == sym.SDYNIMPORT { + symData := ctxt.loader.Data(s) + addr := decodetypeGcprogShlib(ctxt, symData) + sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr) + if sect != nil { + // A gcprog is a 4-byte uint32 indicating length, followed by + // the actual program. + progsize := make([]byte, 4) + _, err := sect.ReadAt(progsize, int64(addr-sect.Addr)) + if err != nil { + log.Fatal(err) + } + progbytes := make([]byte, ctxt.Arch.ByteOrder.Uint32(progsize)) + _, err = sect.ReadAt(progbytes, int64(addr-sect.Addr+4)) + if err != nil { + log.Fatal(err) + } + return append(progsize, progbytes...) + } + Exitf("cannot find gcmask for %s", ctxt.loader.SymName(s)) + return nil + } + relocs := ctxt.loader.Relocs(s) + rs := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize)) + return ctxt.loader.Data(rs) +} + +// Find the elf.Section of a given shared library that contains a given address. +func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section { + for _, shlib := range ctxt.Shlibs { + if shlib.Path == path { + for _, sect := range shlib.File.Sections[1:] { // skip the NULL section + if sect.Addr <= addr && addr < sect.Addr+sect.Size { + return sect + } + } + } + } + return nil +} + +func decodetypeGcprogShlib(ctxt *Link, data []byte) uint64 { + return decodeInuxi(ctxt.Arch, data[2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize):], ctxt.Arch.PtrSize) +} + +// decodeItabType returns the itab._type field from an itab. +func decodeItabType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { + relocs := ldr.Relocs(symIdx) + return decodeRelocSym(ldr, symIdx, &relocs, int32(arch.PtrSize)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf.go new file mode 100644 index 0000000000000000000000000000000000000000..17f2803ebfb23dbc7e0922ff2b602a8f2d40b56e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf.go @@ -0,0 +1,2299 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO/NICETOHAVE: +// - eliminate DW_CLS_ if not used +// - package info in compilation units +// - assign types to their packages +// - gdb uses c syntax, meaning clumsy quoting is needed for go identifiers. eg +// ptype struct '[]uint8' and qualifiers need to be quoted away +// - file:line info for variables +// - make strings a typedef so prettyprinters can see the underlying string type + +package ld + +import ( + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "internal/abi" + "internal/buildcfg" + "log" + "path" + "runtime" + "sort" + "strings" + "sync" +) + +// dwctxt is a wrapper intended to satisfy the method set of +// dwarf.Context, so that functions like dwarf.PutAttrs will work with +// DIEs that use loader.Sym as opposed to *sym.Symbol. It is also +// being used as a place to store tables/maps that are useful as part +// of type conversion (this is just a convenience; it would be easy to +// split these things out into another type if need be). +type dwctxt struct { + linkctxt *Link + ldr *loader.Loader + arch *sys.Arch + + // This maps type name string (e.g. "uintptr") to loader symbol for + // the DWARF DIE for that type (e.g. "go:info.type.uintptr") + tmap map[string]loader.Sym + + // This maps loader symbol for the DWARF DIE symbol generated for + // a type (e.g. "go:info.uintptr") to the type symbol itself + // ("type:uintptr"). + // FIXME: try converting this map (and the next one) to a single + // array indexed by loader.Sym -- this may perform better. + rtmap map[loader.Sym]loader.Sym + + // This maps Go type symbol (e.g. "type:XXX") to loader symbol for + // the typedef DIE for that type (e.g. "go:info.XXX..def") + tdmap map[loader.Sym]loader.Sym + + // Cache these type symbols, so as to avoid repeatedly looking them up + typeRuntimeEface loader.Sym + typeRuntimeIface loader.Sym + uintptrInfoSym loader.Sym + + // Used at various points in that parallel portion of DWARF gen to + // protect against conflicting updates to globals (such as "gdbscript") + dwmu *sync.Mutex +} + +// dwSym wraps a loader.Sym; this type is meant to obey the interface +// rules for dwarf.Sym from the cmd/internal/dwarf package. DwDie and +// DwAttr objects contain references to symbols via this type. +type dwSym loader.Sym + +func (c dwctxt) PtrSize() int { + return c.arch.PtrSize +} + +func (c dwctxt) Size(s dwarf.Sym) int64 { + return int64(len(c.ldr.Data(loader.Sym(s.(dwSym))))) +} + +func (c dwctxt) AddInt(s dwarf.Sym, size int, i int64) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + dsu.AddUintXX(c.arch, uint64(i), size) +} + +func (c dwctxt) AddBytes(s dwarf.Sym, b []byte) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + dsu.AddBytes(b) +} + +func (c dwctxt) AddString(s dwarf.Sym, v string) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + dsu.Addstring(v) +} + +func (c dwctxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + if value != 0 { + value -= dsu.Value() + } + tgtds := loader.Sym(data.(dwSym)) + dsu.AddAddrPlus(c.arch, tgtds, value) +} + +func (c dwctxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + if value != 0 { + value -= dsu.Value() + } + tgtds := loader.Sym(data.(dwSym)) + dsu.AddCURelativeAddrPlus(c.arch, tgtds, value) +} + +func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + tds := loader.Sym(t.(dwSym)) + switch size { + default: + c.linkctxt.Errorf(ds, "invalid size %d in adddwarfref\n", size) + case c.arch.PtrSize, 4: + } + dsu.AddSymRef(c.arch, tds, ofs, objabi.R_ADDROFF, size) +} + +func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { + size := 4 + if isDwarf64(c.linkctxt) { + size = 8 + } + ds := loader.Sym(s.(dwSym)) + dsu := c.ldr.MakeSymbolUpdater(ds) + tds := loader.Sym(t.(dwSym)) + switch size { + default: + c.linkctxt.Errorf(ds, "invalid size %d in adddwarfref\n", size) + case c.arch.PtrSize, 4: + } + dsu.AddSymRef(c.arch, tds, ofs, objabi.R_DWARFSECREF, size) +} + +func (c dwctxt) Logf(format string, args ...interface{}) { + c.linkctxt.Logf(format, args...) +} + +// At the moment these interfaces are only used in the compiler. + +func (c dwctxt) CurrentOffset(s dwarf.Sym) int64 { + panic("should be used only in the compiler") +} + +func (c dwctxt) RecordDclReference(s dwarf.Sym, t dwarf.Sym, dclIdx int, inlIndex int) { + panic("should be used only in the compiler") +} + +func (c dwctxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets []int32) { + panic("should be used only in the compiler") +} + +func isDwarf64(ctxt *Link) bool { + return ctxt.HeadType == objabi.Haix +} + +// https://sourceware.org/gdb/onlinedocs/gdb/dotdebug_005fgdb_005fscripts-section.html +// Each entry inside .debug_gdb_scripts section begins with a non-null prefix +// byte that specifies the kind of entry. The following entries are supported: +const ( + GdbScriptPythonFileId = 1 + GdbScriptSchemeFileId = 3 + GdbScriptPythonTextId = 4 + GdbScriptSchemeTextId = 6 +) + +var gdbscript string + +// dwarfSecInfo holds information about a DWARF output section, +// specifically a section symbol and a list of symbols contained in +// that section. On the syms list, the first symbol will always be the +// section symbol, then any remaining symbols (if any) will be +// sub-symbols in that section. Note that for some sections (eg: +// .debug_abbrev), the section symbol is all there is (all content is +// contained in it). For other sections (eg: .debug_info), the section +// symbol is empty and all the content is in the sub-symbols. Finally +// there are some sections (eg: .debug_ranges) where it is a mix (both +// the section symbol and the sub-symbols have content) +type dwarfSecInfo struct { + syms []loader.Sym +} + +// secSym returns the section symbol for the section. +func (dsi *dwarfSecInfo) secSym() loader.Sym { + if len(dsi.syms) == 0 { + return 0 + } + return dsi.syms[0] +} + +// subSyms returns a list of sub-symbols for the section. +func (dsi *dwarfSecInfo) subSyms() []loader.Sym { + if len(dsi.syms) == 0 { + return []loader.Sym{} + } + return dsi.syms[1:] +} + +// dwarfp stores the collected DWARF symbols created during +// dwarf generation. +var dwarfp []dwarfSecInfo + +func (d *dwctxt) writeabbrev() dwarfSecInfo { + abrvs := d.ldr.CreateSymForUpdate(".debug_abbrev", 0) + abrvs.SetType(sym.SDWARFSECT) + abrvs.AddBytes(dwarf.GetAbbrev()) + return dwarfSecInfo{syms: []loader.Sym{abrvs.Sym()}} +} + +var dwtypes dwarf.DWDie + +// newattr attaches a new attribute to the specified DIE. +// +// FIXME: at the moment attributes are stored in a linked list in a +// fairly space-inefficient way -- it might be better to instead look +// up all attrs in a single large table, then store indices into the +// table in the DIE. This would allow us to common up storage for +// attributes that are shared by many DIEs (ex: byte size of N). +func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data interface{}) { + a := new(dwarf.DWAttr) + a.Link = die.Attr + die.Attr = a + a.Atr = attr + a.Cls = uint8(cls) + a.Value = value + a.Data = data +} + +// Each DIE (except the root ones) has at least 1 attribute: its +// name. getattr moves the desired one to the front so +// frequently searched ones are found faster. +func getattr(die *dwarf.DWDie, attr uint16) *dwarf.DWAttr { + if die.Attr.Atr == attr { + return die.Attr + } + + a := die.Attr + b := a.Link + for b != nil { + if b.Atr == attr { + a.Link = b.Link + b.Link = die.Attr + die.Attr = b + return b + } + + a = b + b = b.Link + } + + return nil +} + +// Every DIE manufactured by the linker has at least an AT_name +// attribute (but it will only be written out if it is listed in the abbrev). +// The compiler does create nameless DWARF DIEs (ex: concrete subprogram +// instance). +// FIXME: it would be more efficient to bulk-allocate DIEs. +func (d *dwctxt) newdie(parent *dwarf.DWDie, abbrev int, name string) *dwarf.DWDie { + die := new(dwarf.DWDie) + die.Abbrev = abbrev + die.Link = parent.Child + parent.Child = die + + newattr(die, dwarf.DW_AT_name, dwarf.DW_CLS_STRING, int64(len(name)), name) + + // Sanity check: all DIEs created in the linker should be named. + if name == "" { + panic("nameless DWARF DIE") + } + + var st sym.SymKind + switch abbrev { + case dwarf.DW_ABRV_FUNCTYPEPARAM, dwarf.DW_ABRV_DOTDOTDOT, dwarf.DW_ABRV_STRUCTFIELD, dwarf.DW_ABRV_ARRAYRANGE: + // There are no relocations against these dies, and their names + // are not unique, so don't create a symbol. + return die + case dwarf.DW_ABRV_COMPUNIT, dwarf.DW_ABRV_COMPUNIT_TEXTLESS: + // Avoid collisions with "real" symbol names. + name = fmt.Sprintf(".pkg.%s.%d", name, len(d.linkctxt.compUnits)) + st = sym.SDWARFCUINFO + case dwarf.DW_ABRV_VARIABLE: + st = sym.SDWARFVAR + default: + // Everything else is assigned a type of SDWARFTYPE. that + // this also includes loose ends such as STRUCT_FIELD. + st = sym.SDWARFTYPE + } + ds := d.ldr.LookupOrCreateSym(dwarf.InfoPrefix+name, 0) + dsu := d.ldr.MakeSymbolUpdater(ds) + dsu.SetType(st) + d.ldr.SetAttrNotInSymbolTable(ds, true) + d.ldr.SetAttrReachable(ds, true) + die.Sym = dwSym(ds) + if abbrev >= dwarf.DW_ABRV_NULLTYPE && abbrev <= dwarf.DW_ABRV_TYPEDECL { + d.tmap[name] = ds + } + + return die +} + +func walktypedef(die *dwarf.DWDie) *dwarf.DWDie { + if die == nil { + return nil + } + // Resolve typedef if present. + if die.Abbrev == dwarf.DW_ABRV_TYPEDECL { + for attr := die.Attr; attr != nil; attr = attr.Link { + if attr.Atr == dwarf.DW_AT_type && attr.Cls == dwarf.DW_CLS_REFERENCE && attr.Data != nil { + return attr.Data.(*dwarf.DWDie) + } + } + } + + return die +} + +func (d *dwctxt) walksymtypedef(symIdx loader.Sym) loader.Sym { + + // We're being given the loader symbol for the type DIE, e.g. + // "go:info.type.uintptr". Map that first to the type symbol (e.g. + // "type:uintptr") and then to the typedef DIE for the type. + // FIXME: this seems clunky, maybe there is a better way to do this. + + if ts, ok := d.rtmap[symIdx]; ok { + if def, ok := d.tdmap[ts]; ok { + return def + } + d.linkctxt.Errorf(ts, "internal error: no entry for sym %d in tdmap\n", ts) + return 0 + } + d.linkctxt.Errorf(symIdx, "internal error: no entry for sym %d in rtmap\n", symIdx) + return 0 +} + +// Find child by AT_name using hashtable if available or linear scan +// if not. +func findchild(die *dwarf.DWDie, name string) *dwarf.DWDie { + var prev *dwarf.DWDie + for ; die != prev; prev, die = die, walktypedef(die) { + for a := die.Child; a != nil; a = a.Link { + if name == getattr(a, dwarf.DW_AT_name).Data { + return a + } + } + continue + } + return nil +} + +// find looks up the loader symbol for the DWARF DIE generated for the +// type with the specified name. +func (d *dwctxt) find(name string) loader.Sym { + return d.tmap[name] +} + +func (d *dwctxt) mustFind(name string) loader.Sym { + r := d.find(name) + if r == 0 { + Exitf("dwarf find: cannot find %s", name) + } + return r +} + +func (d *dwctxt) adddwarfref(sb *loader.SymbolBuilder, t loader.Sym, size int) { + switch size { + default: + d.linkctxt.Errorf(sb.Sym(), "invalid size %d in adddwarfref\n", size) + case d.arch.PtrSize, 4: + } + sb.AddSymRef(d.arch, t, 0, objabi.R_DWARFSECREF, size) +} + +func (d *dwctxt) newrefattr(die *dwarf.DWDie, attr uint16, ref loader.Sym) { + if ref == 0 { + return + } + newattr(die, attr, dwarf.DW_CLS_REFERENCE, 0, dwSym(ref)) +} + +func (d *dwctxt) dtolsym(s dwarf.Sym) loader.Sym { + if s == nil { + return 0 + } + dws := loader.Sym(s.(dwSym)) + return dws +} + +func (d *dwctxt) putdie(syms []loader.Sym, die *dwarf.DWDie) []loader.Sym { + s := d.dtolsym(die.Sym) + if s == 0 { + s = syms[len(syms)-1] + } else { + syms = append(syms, s) + } + sDwsym := dwSym(s) + dwarf.Uleb128put(d, sDwsym, int64(die.Abbrev)) + dwarf.PutAttrs(d, sDwsym, die.Abbrev, die.Attr) + if dwarf.HasChildren(die) { + for die := die.Child; die != nil; die = die.Link { + syms = d.putdie(syms, die) + } + dsu := d.ldr.MakeSymbolUpdater(syms[len(syms)-1]) + dsu.AddUint8(0) + } + return syms +} + +func reverselist(list **dwarf.DWDie) { + curr := *list + var prev *dwarf.DWDie + for curr != nil { + next := curr.Link + curr.Link = prev + prev = curr + curr = next + } + + *list = prev +} + +func reversetree(list **dwarf.DWDie) { + reverselist(list) + for die := *list; die != nil; die = die.Link { + if dwarf.HasChildren(die) { + reversetree(&die.Child) + } + } +} + +func newmemberoffsetattr(die *dwarf.DWDie, offs int32) { + newattr(die, dwarf.DW_AT_data_member_location, dwarf.DW_CLS_CONSTANT, int64(offs), nil) +} + +func (d *dwctxt) lookupOrDiag(n string) loader.Sym { + symIdx := d.ldr.Lookup(n, 0) + if symIdx == 0 { + Exitf("dwarf: missing type: %s", n) + } + if len(d.ldr.Data(symIdx)) == 0 { + Exitf("dwarf: missing type (no data): %s", n) + } + + return symIdx +} + +func (d *dwctxt) dotypedef(parent *dwarf.DWDie, name string, def *dwarf.DWDie) *dwarf.DWDie { + // Only emit typedefs for real names. + if strings.HasPrefix(name, "map[") { + return nil + } + if strings.HasPrefix(name, "struct {") { + return nil + } + // cmd/compile uses "noalg.struct {...}" as type name when hash and eq algorithm generation of + // this struct type is suppressed. + if strings.HasPrefix(name, "noalg.struct {") { + return nil + } + if strings.HasPrefix(name, "chan ") { + return nil + } + if name[0] == '[' || name[0] == '*' { + return nil + } + if def == nil { + Errorf(nil, "dwarf: bad def in dotypedef") + } + + // Create a new loader symbol for the typedef. We no longer + // do lookups of typedef symbols by name, so this is going + // to be an anonymous symbol (we want this for perf reasons). + tds := d.ldr.CreateExtSym("", 0) + tdsu := d.ldr.MakeSymbolUpdater(tds) + tdsu.SetType(sym.SDWARFTYPE) + def.Sym = dwSym(tds) + d.ldr.SetAttrNotInSymbolTable(tds, true) + d.ldr.SetAttrReachable(tds, true) + + // The typedef entry must be created after the def, + // so that future lookups will find the typedef instead + // of the real definition. This hooks the typedef into any + // circular definition loops, so that gdb can understand them. + die := d.newdie(parent, dwarf.DW_ABRV_TYPEDECL, name) + + d.newrefattr(die, dwarf.DW_AT_type, tds) + + return die +} + +// Define gotype, for composite ones recurse into constituents. +func (d *dwctxt) defgotype(gotype loader.Sym) loader.Sym { + if gotype == 0 { + return d.mustFind("") + } + + // If we already have a tdmap entry for the gotype, return it. + if ds, ok := d.tdmap[gotype]; ok { + return ds + } + + sn := d.ldr.SymName(gotype) + if !strings.HasPrefix(sn, "type:") { + d.linkctxt.Errorf(gotype, "dwarf: type name doesn't start with \"type:\"") + return d.mustFind("") + } + name := sn[5:] // could also decode from Type.string + + sdie := d.find(name) + if sdie != 0 { + return sdie + } + + gtdwSym := d.newtype(gotype) + d.tdmap[gotype] = loader.Sym(gtdwSym.Sym.(dwSym)) + return loader.Sym(gtdwSym.Sym.(dwSym)) +} + +func (d *dwctxt) newtype(gotype loader.Sym) *dwarf.DWDie { + sn := d.ldr.SymName(gotype) + name := sn[5:] // could also decode from Type.string + tdata := d.ldr.Data(gotype) + if len(tdata) == 0 { + d.linkctxt.Errorf(gotype, "missing type") + } + kind := decodetypeKind(d.arch, tdata) + bytesize := decodetypeSize(d.arch, tdata) + + var die, typedefdie *dwarf.DWDie + switch kind { + case objabi.KindBool: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BASETYPE, name) + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_boolean, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindInt, + objabi.KindInt8, + objabi.KindInt16, + objabi.KindInt32, + objabi.KindInt64: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BASETYPE, name) + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_signed, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindUint, + objabi.KindUint8, + objabi.KindUint16, + objabi.KindUint32, + objabi.KindUint64, + objabi.KindUintptr: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BASETYPE, name) + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindFloat32, + objabi.KindFloat64: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BASETYPE, name) + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_float, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindComplex64, + objabi.KindComplex128: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BASETYPE, name) + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_complex_float, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindArray: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_ARRAYTYPE, name) + typedefdie = d.dotypedef(&dwtypes, name, die) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + s := decodetypeArrayElem(d.ldr, d.arch, gotype) + d.newrefattr(die, dwarf.DW_AT_type, d.defgotype(s)) + fld := d.newdie(die, dwarf.DW_ABRV_ARRAYRANGE, "range") + + // use actual length not upper bound; correct for 0-length arrays. + newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, decodetypeArrayLen(d.ldr, d.arch, gotype), 0) + + d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) + + case objabi.KindChan: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_CHANTYPE, name) + s := decodetypeChanElem(d.ldr, d.arch, gotype) + d.newrefattr(die, dwarf.DW_AT_go_elem, d.defgotype(s)) + // Save elem type for synthesizechantypes. We could synthesize here + // but that would change the order of DIEs we output. + d.newrefattr(die, dwarf.DW_AT_type, s) + + case objabi.KindFunc: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_FUNCTYPE, name) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + typedefdie = d.dotypedef(&dwtypes, name, die) + data := d.ldr.Data(gotype) + // FIXME: add caching or reuse reloc slice. + relocs := d.ldr.Relocs(gotype) + nfields := decodetypeFuncInCount(d.arch, data) + for i := 0; i < nfields; i++ { + s := decodetypeFuncInType(d.ldr, d.arch, gotype, &relocs, i) + sn := d.ldr.SymName(s) + fld := d.newdie(die, dwarf.DW_ABRV_FUNCTYPEPARAM, sn[5:]) + d.newrefattr(fld, dwarf.DW_AT_type, d.defgotype(s)) + } + + if decodetypeFuncDotdotdot(d.arch, data) { + d.newdie(die, dwarf.DW_ABRV_DOTDOTDOT, "...") + } + nfields = decodetypeFuncOutCount(d.arch, data) + for i := 0; i < nfields; i++ { + s := decodetypeFuncOutType(d.ldr, d.arch, gotype, &relocs, i) + sn := d.ldr.SymName(s) + fld := d.newdie(die, dwarf.DW_ABRV_FUNCTYPEPARAM, sn[5:]) + d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.defgotype(s))) + } + + case objabi.KindInterface: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_IFACETYPE, name) + typedefdie = d.dotypedef(&dwtypes, name, die) + data := d.ldr.Data(gotype) + nfields := int(decodetypeIfaceMethodCount(d.arch, data)) + var s loader.Sym + if nfields == 0 { + s = d.typeRuntimeEface + } else { + s = d.typeRuntimeIface + } + d.newrefattr(die, dwarf.DW_AT_type, d.defgotype(s)) + + case objabi.KindMap: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_MAPTYPE, name) + s := decodetypeMapKey(d.ldr, d.arch, gotype) + d.newrefattr(die, dwarf.DW_AT_go_key, d.defgotype(s)) + s = decodetypeMapValue(d.ldr, d.arch, gotype) + d.newrefattr(die, dwarf.DW_AT_go_elem, d.defgotype(s)) + // Save gotype for use in synthesizemaptypes. We could synthesize here, + // but that would change the order of the DIEs. + d.newrefattr(die, dwarf.DW_AT_type, gotype) + + case objabi.KindPtr: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_PTRTYPE, name) + typedefdie = d.dotypedef(&dwtypes, name, die) + s := decodetypePtrElem(d.ldr, d.arch, gotype) + d.newrefattr(die, dwarf.DW_AT_type, d.defgotype(s)) + + case objabi.KindSlice: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_SLICETYPE, name) + typedefdie = d.dotypedef(&dwtypes, name, die) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + s := decodetypeArrayElem(d.ldr, d.arch, gotype) + elem := d.defgotype(s) + d.newrefattr(die, dwarf.DW_AT_go_elem, elem) + + case objabi.KindString: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_STRINGTYPE, name) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + + case objabi.KindStruct: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_STRUCTTYPE, name) + typedefdie = d.dotypedef(&dwtypes, name, die) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) + nfields := decodetypeStructFieldCount(d.ldr, d.arch, gotype) + for i := 0; i < nfields; i++ { + f := decodetypeStructFieldName(d.ldr, d.arch, gotype, i) + s := decodetypeStructFieldType(d.ldr, d.arch, gotype, i) + if f == "" { + sn := d.ldr.SymName(s) + f = sn[5:] // skip "type:" + } + fld := d.newdie(die, dwarf.DW_ABRV_STRUCTFIELD, f) + d.newrefattr(fld, dwarf.DW_AT_type, d.defgotype(s)) + offset := decodetypeStructFieldOffset(d.ldr, d.arch, gotype, i) + newmemberoffsetattr(fld, int32(offset)) + if decodetypeStructFieldEmbedded(d.ldr, d.arch, gotype, i) { + newattr(fld, dwarf.DW_AT_go_embedded_field, dwarf.DW_CLS_FLAG, 1, 0) + } + } + + case objabi.KindUnsafePointer: + die = d.newdie(&dwtypes, dwarf.DW_ABRV_BARE_PTRTYPE, name) + + default: + d.linkctxt.Errorf(gotype, "dwarf: definition of unknown kind %d", kind) + die = d.newdie(&dwtypes, dwarf.DW_ABRV_TYPEDECL, name) + d.newrefattr(die, dwarf.DW_AT_type, d.mustFind("")) + } + + newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, int64(kind), 0) + + if d.ldr.AttrReachable(gotype) { + newattr(die, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(gotype)) + } + + // Sanity check. + if _, ok := d.rtmap[gotype]; ok { + log.Fatalf("internal error: rtmap entry already installed\n") + } + + ds := loader.Sym(die.Sym.(dwSym)) + if typedefdie != nil { + ds = loader.Sym(typedefdie.Sym.(dwSym)) + } + d.rtmap[ds] = gotype + + if _, ok := prototypedies[sn]; ok { + prototypedies[sn] = die + } + + if typedefdie != nil { + return typedefdie + } + return die +} + +func (d *dwctxt) nameFromDIESym(dwtypeDIESym loader.Sym) string { + sn := d.ldr.SymName(dwtypeDIESym) + return sn[len(dwarf.InfoPrefix):] +} + +func (d *dwctxt) defptrto(dwtype loader.Sym) loader.Sym { + + // FIXME: it would be nice if the compiler attached an aux symbol + // ref from the element type to the pointer type -- it would be + // more efficient to do it this way as opposed to via name lookups. + + ptrname := "*" + d.nameFromDIESym(dwtype) + if die := d.find(ptrname); die != 0 { + return die + } + + pdie := d.newdie(&dwtypes, dwarf.DW_ABRV_PTRTYPE, ptrname) + d.newrefattr(pdie, dwarf.DW_AT_type, dwtype) + + // The DWARF info synthesizes pointer types that don't exist at the + // language level, like *hash<...> and *bucket<...>, and the data + // pointers of slices. Link to the ones we can find. + gts := d.ldr.Lookup("type:"+ptrname, 0) + if gts != 0 && d.ldr.AttrReachable(gts) { + newattr(pdie, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, int64(objabi.KindPtr), 0) + newattr(pdie, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(gts)) + } + + if gts != 0 { + ds := loader.Sym(pdie.Sym.(dwSym)) + d.rtmap[ds] = gts + d.tdmap[gts] = ds + } + + return d.dtolsym(pdie.Sym) +} + +// Copies src's children into dst. Copies attributes by value. +// DWAttr.data is copied as pointer only. If except is one of +// the top-level children, it will not be copied. +func (d *dwctxt) copychildrenexcept(ctxt *Link, dst *dwarf.DWDie, src *dwarf.DWDie, except *dwarf.DWDie) { + for src = src.Child; src != nil; src = src.Link { + if src == except { + continue + } + c := d.newdie(dst, src.Abbrev, getattr(src, dwarf.DW_AT_name).Data.(string)) + for a := src.Attr; a != nil; a = a.Link { + newattr(c, a.Atr, int(a.Cls), a.Value, a.Data) + } + d.copychildrenexcept(ctxt, c, src, nil) + } + + reverselist(&dst.Child) +} + +func (d *dwctxt) copychildren(ctxt *Link, dst *dwarf.DWDie, src *dwarf.DWDie) { + d.copychildrenexcept(ctxt, dst, src, nil) +} + +// Search children (assumed to have TAG_member) for the one named +// field and set its AT_type to dwtype +func (d *dwctxt) substitutetype(structdie *dwarf.DWDie, field string, dwtype loader.Sym) { + child := findchild(structdie, field) + if child == nil { + Exitf("dwarf substitutetype: %s does not have member %s", + getattr(structdie, dwarf.DW_AT_name).Data, field) + return + } + + a := getattr(child, dwarf.DW_AT_type) + if a != nil { + a.Data = dwSym(dwtype) + } else { + d.newrefattr(child, dwarf.DW_AT_type, dwtype) + } +} + +func (d *dwctxt) findprotodie(ctxt *Link, name string) *dwarf.DWDie { + die, ok := prototypedies[name] + if ok && die == nil { + d.defgotype(d.lookupOrDiag(name)) + die = prototypedies[name] + } + if die == nil { + log.Fatalf("internal error: DIE generation failed for %s\n", name) + } + return die +} + +func (d *dwctxt) synthesizestringtypes(ctxt *Link, die *dwarf.DWDie) { + prototype := walktypedef(d.findprotodie(ctxt, "type:runtime.stringStructDWARF")) + if prototype == nil { + return + } + + for ; die != nil; die = die.Link { + if die.Abbrev != dwarf.DW_ABRV_STRINGTYPE { + continue + } + d.copychildren(ctxt, die, prototype) + } +} + +func (d *dwctxt) synthesizeslicetypes(ctxt *Link, die *dwarf.DWDie) { + prototype := walktypedef(d.findprotodie(ctxt, "type:runtime.slice")) + if prototype == nil { + return + } + + for ; die != nil; die = die.Link { + if die.Abbrev != dwarf.DW_ABRV_SLICETYPE { + continue + } + d.copychildren(ctxt, die, prototype) + elem := loader.Sym(getattr(die, dwarf.DW_AT_go_elem).Data.(dwSym)) + d.substitutetype(die, "array", d.defptrto(elem)) + } +} + +func mkinternaltypename(base string, arg1 string, arg2 string) string { + if arg2 == "" { + return fmt.Sprintf("%s<%s>", base, arg1) + } + return fmt.Sprintf("%s<%s,%s>", base, arg1, arg2) +} + +// synthesizemaptypes is way too closely married to runtime/hashmap.c +const ( + MaxKeySize = abi.MapMaxKeyBytes + MaxValSize = abi.MapMaxElemBytes + BucketSize = abi.MapBucketCount +) + +func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valname string, f func(*dwarf.DWDie)) loader.Sym { + name := mkinternaltypename(typename, keyname, valname) + symname := dwarf.InfoPrefix + name + s := d.ldr.Lookup(symname, 0) + if s != 0 && d.ldr.SymType(s) == sym.SDWARFTYPE { + return s + } + die := d.newdie(&dwtypes, abbrev, name) + f(die) + return d.dtolsym(die.Sym) +} + +func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { + hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap")) + bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap")) + + if hash == nil { + return + } + + for ; die != nil; die = die.Link { + if die.Abbrev != dwarf.DW_ABRV_MAPTYPE { + continue + } + gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym)) + keytype := decodetypeMapKey(d.ldr, d.arch, gotype) + valtype := decodetypeMapValue(d.ldr, d.arch, gotype) + keydata := d.ldr.Data(keytype) + valdata := d.ldr.Data(valtype) + keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata) + keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype)) + + // compute size info like hashmap.c does. + indirectKey, indirectVal := false, false + if keysize > MaxKeySize { + keysize = int64(d.arch.PtrSize) + indirectKey = true + } + if valsize > MaxValSize { + valsize = int64(d.arch.PtrSize) + indirectVal = true + } + + // Construct type to represent an array of BucketSize keys + keyname := d.nameFromDIESym(keytype) + dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) { + newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*keysize, 0) + t := keytype + if indirectKey { + t = d.defptrto(keytype) + } + d.newrefattr(dwhk, dwarf.DW_AT_type, t) + fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size") + newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0) + d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) + }) + + // Construct type to represent an array of BucketSize values + valname := d.nameFromDIESym(valtype) + dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) { + newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*valsize, 0) + t := valtype + if indirectVal { + t = d.defptrto(valtype) + } + d.newrefattr(dwhv, dwarf.DW_AT_type, t) + fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size") + newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0) + d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) + }) + + // Construct bucket + dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) { + // Copy over all fields except the field "data" from the generic + // bucket. "data" will be replaced with keys/values below. + d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data")) + + fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys") + d.newrefattr(fld, dwarf.DW_AT_type, dwhks) + newmemberoffsetattr(fld, BucketSize) + fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values") + d.newrefattr(fld, dwarf.DW_AT_type, dwhvs) + newmemberoffsetattr(fld, BucketSize+BucketSize*int32(keysize)) + fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow") + d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym))) + newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))) + if d.arch.RegSize > d.arch.PtrSize { + fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad") + d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) + newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize)) + } + + newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(d.arch.RegSize), 0) + }) + + // Construct hash + dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) { + d.copychildren(ctxt, dwh, hash) + d.substitutetype(dwh, "buckets", d.defptrto(dwhbs)) + d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs)) + newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil) + }) + + // make map type a pointer to hash + d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs)) + } +} + +func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { + sudog := walktypedef(d.findprotodie(ctxt, "type:runtime.sudog")) + waitq := walktypedef(d.findprotodie(ctxt, "type:runtime.waitq")) + hchan := walktypedef(d.findprotodie(ctxt, "type:runtime.hchan")) + if sudog == nil || waitq == nil || hchan == nil { + return + } + + sudogsize := int(getattr(sudog, dwarf.DW_AT_byte_size).Value) + + for ; die != nil; die = die.Link { + if die.Abbrev != dwarf.DW_ABRV_CHANTYPE { + continue + } + elemgotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym)) + tname := d.ldr.SymName(elemgotype) + elemname := tname[5:] + elemtype := d.walksymtypedef(d.defgotype(d.lookupOrDiag(tname))) + + // sudog + dwss := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "sudog", elemname, "", func(dws *dwarf.DWDie) { + d.copychildren(ctxt, dws, sudog) + d.substitutetype(dws, "elem", d.defptrto(elemtype)) + newattr(dws, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(sudogsize), nil) + }) + + // waitq + dwws := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "waitq", elemname, "", func(dww *dwarf.DWDie) { + + d.copychildren(ctxt, dww, waitq) + d.substitutetype(dww, "first", d.defptrto(dwss)) + d.substitutetype(dww, "last", d.defptrto(dwss)) + newattr(dww, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(waitq, dwarf.DW_AT_byte_size).Value, nil) + }) + + // hchan + dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hchan", elemname, "", func(dwh *dwarf.DWDie) { + d.copychildren(ctxt, dwh, hchan) + d.substitutetype(dwh, "recvq", dwws) + d.substitutetype(dwh, "sendq", dwws) + newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hchan, dwarf.DW_AT_byte_size).Value, nil) + }) + + d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs)) + } +} + +// createUnitLength creates the initial length field with value v and update +// offset of unit_length if needed. +func (d *dwctxt) createUnitLength(su *loader.SymbolBuilder, v uint64) { + if isDwarf64(d.linkctxt) { + su.AddUint32(d.arch, 0xFFFFFFFF) + } + d.addDwarfAddrField(su, v) +} + +// addDwarfAddrField adds a DWARF field in DWARF 64bits or 32bits. +func (d *dwctxt) addDwarfAddrField(sb *loader.SymbolBuilder, v uint64) { + if isDwarf64(d.linkctxt) { + sb.AddUint(d.arch, v) + } else { + sb.AddUint32(d.arch, uint32(v)) + } +} + +// addDwarfAddrRef adds a DWARF pointer in DWARF 64bits or 32bits. +func (d *dwctxt) addDwarfAddrRef(sb *loader.SymbolBuilder, t loader.Sym) { + if isDwarf64(d.linkctxt) { + d.adddwarfref(sb, t, 8) + } else { + d.adddwarfref(sb, t, 4) + } +} + +// calcCompUnitRanges calculates the PC ranges of the compilation units. +func (d *dwctxt) calcCompUnitRanges() { + var prevUnit *sym.CompilationUnit + for _, s := range d.linkctxt.Textp { + sym := loader.Sym(s) + + fi := d.ldr.FuncInfo(sym) + if !fi.Valid() { + continue + } + + // Skip linker-created functions (ex: runtime.addmoduledata), since they + // don't have DWARF to begin with. + unit := d.ldr.SymUnit(sym) + if unit == nil { + continue + } + + // Update PC ranges. + // + // We don't simply compare the end of the previous + // symbol with the start of the next because there's + // often a little padding between them. Instead, we + // only create boundaries between symbols from + // different units. + sval := d.ldr.SymValue(sym) + u0val := d.ldr.SymValue(loader.Sym(unit.Textp[0])) + if prevUnit != unit { + unit.PCs = append(unit.PCs, dwarf.Range{Start: sval - u0val}) + prevUnit = unit + } + unit.PCs[len(unit.PCs)-1].End = sval - u0val + int64(len(d.ldr.Data(sym))) + } +} + +func movetomodule(ctxt *Link, parent *dwarf.DWDie) { + die := ctxt.runtimeCU.DWInfo.Child + if die == nil { + ctxt.runtimeCU.DWInfo.Child = parent.Child + return + } + for die.Link != nil { + die = die.Link + } + die.Link = parent.Child +} + +/* + * Generate a sequence of opcodes that is as short as possible. + * See section 6.2.5 + */ +const ( + LINE_BASE = -4 + LINE_RANGE = 10 + PC_RANGE = (255 - OPCODE_BASE) / LINE_RANGE + OPCODE_BASE = 11 +) + +/* + * Walk prog table, emit line program and build DIE tree. + */ + +func getCompilationDir() string { + // OSX requires this be set to something, but it's not easy to choose + // a value. Linking takes place in a temporary directory, so there's + // no point including it here. Paths in the file table are usually + // absolute, in which case debuggers will ignore this value. -trimpath + // produces relative paths, but we don't know where they start, so + // all we can do here is try not to make things worse. + return "." +} + +func (d *dwctxt) importInfoSymbol(dsym loader.Sym) { + d.ldr.SetAttrReachable(dsym, true) + d.ldr.SetAttrNotInSymbolTable(dsym, true) + dst := d.ldr.SymType(dsym) + if dst != sym.SDWARFCONST && dst != sym.SDWARFABSFCN { + log.Fatalf("error: DWARF info sym %d/%s with incorrect type %s", dsym, d.ldr.SymName(dsym), d.ldr.SymType(dsym).String()) + } + relocs := d.ldr.Relocs(dsym) + for i := 0; i < relocs.Count(); i++ { + r := relocs.At(i) + if r.Type() != objabi.R_DWARFSECREF { + continue + } + rsym := r.Sym() + // If there is an entry for the symbol in our rtmap, then it + // means we've processed the type already, and can skip this one. + if _, ok := d.rtmap[rsym]; ok { + // type already generated + continue + } + // FIXME: is there a way we could avoid materializing the + // symbol name here? + sn := d.ldr.SymName(rsym) + tn := sn[len(dwarf.InfoPrefix):] + ts := d.ldr.Lookup("type:"+tn, 0) + d.defgotype(ts) + } +} + +func expandFile(fname string) string { + fname = strings.TrimPrefix(fname, src.FileSymPrefix) + return expandGoroot(fname) +} + +// writeDirFileTables emits the portion of the DWARF line table +// prologue containing the include directories and file names, +// described in section 6.2.4 of the DWARF 4 standard. It walks the +// filepaths for the unit to discover any common directories, which +// are emitted to the directory table first, then the file table is +// emitted after that. +func (d *dwctxt) writeDirFileTables(unit *sym.CompilationUnit, lsu *loader.SymbolBuilder) { + type fileDir struct { + base string + dir int + } + dirNums := make(map[string]int) + dirs := []string{""} + files := []fileDir{} + + // Preprocess files to collect directories. This assumes that the + // file table is already de-duped. + for i, name := range unit.FileTable { + name := expandFile(name) + if len(name) == 0 { + // Can't have empty filenames, and having a unique + // filename is quite useful for debugging. + name = fmt.Sprintf("_%d", i) + } + // Note the use of "path" here and not "filepath". The compiler + // hard-codes to use "/" in DWARF paths (even for Windows), so we + // want to maintain that here. + file := path.Base(name) + dir := path.Dir(name) + dirIdx, ok := dirNums[dir] + if !ok && dir != "." { + dirIdx = len(dirNums) + 1 + dirNums[dir] = dirIdx + dirs = append(dirs, dir) + } + files = append(files, fileDir{base: file, dir: dirIdx}) + + // We can't use something that may be dead-code + // eliminated from a binary here. proc.go contains + // main and the scheduler, so it's not going anywhere. + if i := strings.Index(name, "runtime/proc.go"); i >= 0 && unit.Lib.Pkg == "runtime" { + d.dwmu.Lock() + if gdbscript == "" { + k := strings.Index(name, "runtime/proc.go") + gdbscript = name[:k] + "runtime/runtime-gdb.py" + } + d.dwmu.Unlock() + } + } + + // Emit directory section. This is a series of nul terminated + // strings, followed by a single zero byte. + lsDwsym := dwSym(lsu.Sym()) + for k := 1; k < len(dirs); k++ { + d.AddString(lsDwsym, dirs[k]) + } + lsu.AddUint8(0) // terminator + + // Emit file section. + for k := 0; k < len(files); k++ { + d.AddString(lsDwsym, files[k].base) + dwarf.Uleb128put(d, lsDwsym, int64(files[k].dir)) + lsu.AddUint8(0) // mtime + lsu.AddUint8(0) // length + } + lsu.AddUint8(0) // terminator +} + +// writelines collects up and chains together the symbols needed to +// form the DWARF line table for the specified compilation unit, +// returning a list of symbols. The returned list will include an +// initial symbol containing the line table header and prologue (with +// file table), then a series of compiler-emitted line table symbols +// (one per live function), and finally an epilog symbol containing an +// end-of-sequence operator. The prologue and epilog symbols are passed +// in (having been created earlier); here we add content to them. +func (d *dwctxt) writelines(unit *sym.CompilationUnit, lineProlog loader.Sym) []loader.Sym { + is_stmt := uint8(1) // initially = recommended default_is_stmt = 1, tracks is_stmt toggles. + + unitstart := int64(-1) + headerstart := int64(-1) + headerend := int64(-1) + + syms := make([]loader.Sym, 0, len(unit.Textp)+2) + syms = append(syms, lineProlog) + lsu := d.ldr.MakeSymbolUpdater(lineProlog) + lsDwsym := dwSym(lineProlog) + newattr(unit.DWInfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, 0, lsDwsym) + + // Write .debug_line Line Number Program Header (sec 6.2.4) + // Fields marked with (*) must be changed for 64-bit dwarf + unitLengthOffset := lsu.Size() + d.createUnitLength(lsu, 0) // unit_length (*), filled in at end + unitstart = lsu.Size() + lsu.AddUint16(d.arch, 2) // dwarf version (appendix F) -- version 3 is incompatible w/ XCode 9.0's dsymutil, latest supported on OSX 10.12 as of 2018-05 + headerLengthOffset := lsu.Size() + d.addDwarfAddrField(lsu, 0) // header_length (*), filled in at end + headerstart = lsu.Size() + + // cpos == unitstart + 4 + 2 + 4 + lsu.AddUint8(1) // minimum_instruction_length + lsu.AddUint8(is_stmt) // default_is_stmt + lsu.AddUint8(LINE_BASE & 0xFF) // line_base + lsu.AddUint8(LINE_RANGE) // line_range + lsu.AddUint8(OPCODE_BASE) // opcode_base + lsu.AddUint8(0) // standard_opcode_lengths[1] + lsu.AddUint8(1) // standard_opcode_lengths[2] + lsu.AddUint8(1) // standard_opcode_lengths[3] + lsu.AddUint8(1) // standard_opcode_lengths[4] + lsu.AddUint8(1) // standard_opcode_lengths[5] + lsu.AddUint8(0) // standard_opcode_lengths[6] + lsu.AddUint8(0) // standard_opcode_lengths[7] + lsu.AddUint8(0) // standard_opcode_lengths[8] + lsu.AddUint8(1) // standard_opcode_lengths[9] + lsu.AddUint8(0) // standard_opcode_lengths[10] + + // Call helper to emit dir and file sections. + d.writeDirFileTables(unit, lsu) + + // capture length at end of file names. + headerend = lsu.Size() + unitlen := lsu.Size() - unitstart + + // Output the state machine for each function remaining. + for _, s := range unit.Textp { + fnSym := loader.Sym(s) + _, _, _, lines := d.ldr.GetFuncDwarfAuxSyms(fnSym) + + // Chain the line symbol onto the list. + if lines != 0 { + syms = append(syms, lines) + unitlen += int64(len(d.ldr.Data(lines))) + } + } + + if d.linkctxt.HeadType == objabi.Haix { + addDwsectCUSize(".debug_line", unit.Lib.Pkg, uint64(unitlen)) + } + + if isDwarf64(d.linkctxt) { + lsu.SetUint(d.arch, unitLengthOffset+4, uint64(unitlen)) // +4 because of 0xFFFFFFFF + lsu.SetUint(d.arch, headerLengthOffset, uint64(headerend-headerstart)) + } else { + lsu.SetUint32(d.arch, unitLengthOffset, uint32(unitlen)) + lsu.SetUint32(d.arch, headerLengthOffset, uint32(headerend-headerstart)) + } + + return syms +} + +// writepcranges generates the DW_AT_ranges table for compilation unit +// "unit", and returns a collection of ranges symbols (one for the +// compilation unit DIE itself and the remainder from functions in the unit). +func (d *dwctxt) writepcranges(unit *sym.CompilationUnit, base loader.Sym, pcs []dwarf.Range, rangeProlog loader.Sym) []loader.Sym { + + syms := make([]loader.Sym, 0, len(unit.RangeSyms)+1) + syms = append(syms, rangeProlog) + rsu := d.ldr.MakeSymbolUpdater(rangeProlog) + rDwSym := dwSym(rangeProlog) + + // Create PC ranges for the compilation unit DIE. + newattr(unit.DWInfo, dwarf.DW_AT_ranges, dwarf.DW_CLS_PTR, rsu.Size(), rDwSym) + newattr(unit.DWInfo, dwarf.DW_AT_low_pc, dwarf.DW_CLS_ADDRESS, 0, dwSym(base)) + dwarf.PutBasedRanges(d, rDwSym, pcs) + + // Collect up the ranges for functions in the unit. + rsize := uint64(rsu.Size()) + for _, ls := range unit.RangeSyms { + s := loader.Sym(ls) + syms = append(syms, s) + rsize += uint64(d.ldr.SymSize(s)) + } + + if d.linkctxt.HeadType == objabi.Haix { + addDwsectCUSize(".debug_ranges", unit.Lib.Pkg, rsize) + } + + return syms +} + +/* + * Emit .debug_frame + */ +const ( + dataAlignmentFactor = -4 +) + +// appendPCDeltaCFA appends per-PC CFA deltas to b and returns the final slice. +func appendPCDeltaCFA(arch *sys.Arch, b []byte, deltapc, cfa int64) []byte { + b = append(b, dwarf.DW_CFA_def_cfa_offset_sf) + b = dwarf.AppendSleb128(b, cfa/dataAlignmentFactor) + + switch { + case deltapc < 0x40: + b = append(b, uint8(dwarf.DW_CFA_advance_loc+deltapc)) + case deltapc < 0x100: + b = append(b, dwarf.DW_CFA_advance_loc1) + b = append(b, uint8(deltapc)) + case deltapc < 0x10000: + b = append(b, dwarf.DW_CFA_advance_loc2, 0, 0) + arch.ByteOrder.PutUint16(b[len(b)-2:], uint16(deltapc)) + default: + b = append(b, dwarf.DW_CFA_advance_loc4, 0, 0, 0, 0) + arch.ByteOrder.PutUint32(b[len(b)-4:], uint32(deltapc)) + } + return b +} + +func (d *dwctxt) writeframes(fs loader.Sym) dwarfSecInfo { + fsd := dwSym(fs) + fsu := d.ldr.MakeSymbolUpdater(fs) + fsu.SetType(sym.SDWARFSECT) + isdw64 := isDwarf64(d.linkctxt) + haslr := d.linkctxt.Arch.HasLR + + // Length field is 4 bytes on Dwarf32 and 12 bytes on Dwarf64 + lengthFieldSize := int64(4) + if isdw64 { + lengthFieldSize += 8 + } + + // Emit the CIE, Section 6.4.1 + cieReserve := uint32(16) + if haslr { + cieReserve = 32 + } + if isdw64 { + cieReserve += 4 // 4 bytes added for cid + } + d.createUnitLength(fsu, uint64(cieReserve)) // initial length, must be multiple of thearch.ptrsize + d.addDwarfAddrField(fsu, ^uint64(0)) // cid + fsu.AddUint8(3) // dwarf version (appendix F) + fsu.AddUint8(0) // augmentation "" + dwarf.Uleb128put(d, fsd, 1) // code_alignment_factor + dwarf.Sleb128put(d, fsd, dataAlignmentFactor) // all CFI offset calculations include multiplication with this factor + dwarf.Uleb128put(d, fsd, int64(thearch.Dwarfreglr)) // return_address_register + + fsu.AddUint8(dwarf.DW_CFA_def_cfa) // Set the current frame address.. + dwarf.Uleb128put(d, fsd, int64(thearch.Dwarfregsp)) // ...to use the value in the platform's SP register (defined in l.go)... + if haslr { + dwarf.Uleb128put(d, fsd, int64(0)) // ...plus a 0 offset. + + fsu.AddUint8(dwarf.DW_CFA_same_value) // The platform's link register is unchanged during the prologue. + dwarf.Uleb128put(d, fsd, int64(thearch.Dwarfreglr)) + + fsu.AddUint8(dwarf.DW_CFA_val_offset) // The previous value... + dwarf.Uleb128put(d, fsd, int64(thearch.Dwarfregsp)) // ...of the platform's SP register... + dwarf.Uleb128put(d, fsd, int64(0)) // ...is CFA+0. + } else { + dwarf.Uleb128put(d, fsd, int64(d.arch.PtrSize)) // ...plus the word size (because the call instruction implicitly adds one word to the frame). + + fsu.AddUint8(dwarf.DW_CFA_offset_extended) // The previous value... + dwarf.Uleb128put(d, fsd, int64(thearch.Dwarfreglr)) // ...of the return address... + dwarf.Uleb128put(d, fsd, int64(-d.arch.PtrSize)/dataAlignmentFactor) // ...is saved at [CFA - (PtrSize/4)]. + } + + pad := int64(cieReserve) + lengthFieldSize - int64(len(d.ldr.Data(fs))) + + if pad < 0 { + Exitf("dwarf: cieReserve too small by %d bytes.", -pad) + } + + internalExec := d.linkctxt.BuildMode == BuildModeExe && d.linkctxt.IsInternal() + addAddrPlus := loader.GenAddAddrPlusFunc(internalExec) + + fsu.AddBytes(zeros[:pad]) + + var deltaBuf []byte + pcsp := obj.NewPCIter(uint32(d.arch.MinLC)) + for _, s := range d.linkctxt.Textp { + fn := loader.Sym(s) + fi := d.ldr.FuncInfo(fn) + if !fi.Valid() { + continue + } + fpcsp := d.ldr.Pcsp(s) + + // Emit a FDE, Section 6.4.1. + // First build the section contents into a byte buffer. + deltaBuf = deltaBuf[:0] + if haslr && fi.TopFrame() { + // Mark the link register as having an undefined value. + // This stops call stack unwinders progressing any further. + // TODO: similar mark on non-LR architectures. + deltaBuf = append(deltaBuf, dwarf.DW_CFA_undefined) + deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) + } + + for pcsp.Init(d.linkctxt.loader.Data(fpcsp)); !pcsp.Done; pcsp.Next() { + nextpc := pcsp.NextPC + + // pciterinit goes up to the end of the function, + // but DWARF expects us to stop just before the end. + if int64(nextpc) == int64(len(d.ldr.Data(fn))) { + nextpc-- + if nextpc < pcsp.PC { + continue + } + } + + spdelta := int64(pcsp.Value) + if !haslr { + // Return address has been pushed onto stack. + spdelta += int64(d.arch.PtrSize) + } + + if haslr && !fi.TopFrame() { + // TODO(bryanpkc): This is imprecise. In general, the instruction + // that stores the return address to the stack frame is not the + // same one that allocates the frame. + if pcsp.Value > 0 { + // The return address is preserved at (CFA-frame_size) + // after a stack frame has been allocated. + deltaBuf = append(deltaBuf, dwarf.DW_CFA_offset_extended_sf) + deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) + deltaBuf = dwarf.AppendSleb128(deltaBuf, -spdelta/dataAlignmentFactor) + } else { + // The return address is restored into the link register + // when a stack frame has been de-allocated. + deltaBuf = append(deltaBuf, dwarf.DW_CFA_same_value) + deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) + } + } + + deltaBuf = appendPCDeltaCFA(d.arch, deltaBuf, int64(nextpc)-int64(pcsp.PC), spdelta) + } + pad := int(Rnd(int64(len(deltaBuf)), int64(d.arch.PtrSize))) - len(deltaBuf) + deltaBuf = append(deltaBuf, zeros[:pad]...) + + // Emit the FDE header, Section 6.4.1. + // 4 bytes: length, must be multiple of thearch.ptrsize + // 4/8 bytes: Pointer to the CIE above, at offset 0 + // ptrsize: initial location + // ptrsize: address range + + fdeLength := uint64(4 + 2*d.arch.PtrSize + len(deltaBuf)) + if isdw64 { + fdeLength += 4 // 4 bytes added for CIE pointer + } + d.createUnitLength(fsu, fdeLength) + + if d.linkctxt.LinkMode == LinkExternal { + d.addDwarfAddrRef(fsu, fs) + } else { + d.addDwarfAddrField(fsu, 0) // CIE offset + } + addAddrPlus(fsu, d.arch, s, 0) + fsu.AddUintXX(d.arch, uint64(len(d.ldr.Data(fn))), d.arch.PtrSize) // address range + fsu.AddBytes(deltaBuf) + + if d.linkctxt.HeadType == objabi.Haix { + addDwsectCUSize(".debug_frame", d.ldr.SymPkg(fn), fdeLength+uint64(lengthFieldSize)) + } + } + + return dwarfSecInfo{syms: []loader.Sym{fs}} +} + +/* + * Walk DWarfDebugInfoEntries, and emit .debug_info + */ + +const ( + COMPUNITHEADERSIZE = 4 + 2 + 4 + 1 +) + +func (d *dwctxt) writeUnitInfo(u *sym.CompilationUnit, abbrevsym loader.Sym, infoEpilog loader.Sym) []loader.Sym { + syms := []loader.Sym{} + if len(u.Textp) == 0 && u.DWInfo.Child == nil && len(u.VarDIEs) == 0 { + return syms + } + + compunit := u.DWInfo + s := d.dtolsym(compunit.Sym) + su := d.ldr.MakeSymbolUpdater(s) + + // Write .debug_info Compilation Unit Header (sec 7.5.1) + // Fields marked with (*) must be changed for 64-bit dwarf + // This must match COMPUNITHEADERSIZE above. + d.createUnitLength(su, 0) // unit_length (*), will be filled in later. + su.AddUint16(d.arch, 4) // dwarf version (appendix F) + + // debug_abbrev_offset (*) + d.addDwarfAddrRef(su, abbrevsym) + + su.AddUint8(uint8(d.arch.PtrSize)) // address_size + + ds := dwSym(s) + dwarf.Uleb128put(d, ds, int64(compunit.Abbrev)) + dwarf.PutAttrs(d, ds, compunit.Abbrev, compunit.Attr) + + // This is an under-estimate; more will be needed for type DIEs. + cu := make([]loader.Sym, 0, len(u.AbsFnDIEs)+len(u.FuncDIEs)) + cu = append(cu, s) + cu = append(cu, u.AbsFnDIEs...) + cu = append(cu, u.FuncDIEs...) + if u.Consts != 0 { + cu = append(cu, loader.Sym(u.Consts)) + } + cu = append(cu, u.VarDIEs...) + var cusize int64 + for _, child := range cu { + cusize += int64(len(d.ldr.Data(child))) + } + + for die := compunit.Child; die != nil; die = die.Link { + l := len(cu) + lastSymSz := int64(len(d.ldr.Data(cu[l-1]))) + cu = d.putdie(cu, die) + if lastSymSz != int64(len(d.ldr.Data(cu[l-1]))) { + // putdie will sometimes append directly to the last symbol of the list + cusize = cusize - lastSymSz + int64(len(d.ldr.Data(cu[l-1]))) + } + for _, child := range cu[l:] { + cusize += int64(len(d.ldr.Data(child))) + } + } + + culu := d.ldr.MakeSymbolUpdater(infoEpilog) + culu.AddUint8(0) // closes compilation unit DIE + cu = append(cu, infoEpilog) + cusize++ + + // Save size for AIX symbol table. + if d.linkctxt.HeadType == objabi.Haix { + addDwsectCUSize(".debug_info", d.getPkgFromCUSym(s), uint64(cusize)) + } + if isDwarf64(d.linkctxt) { + cusize -= 12 // exclude the length field. + su.SetUint(d.arch, 4, uint64(cusize)) // 4 because of 0XFFFFFFFF + } else { + cusize -= 4 // exclude the length field. + su.SetUint32(d.arch, 0, uint32(cusize)) + } + return append(syms, cu...) +} + +func (d *dwctxt) writegdbscript() dwarfSecInfo { + // TODO (aix): make it available + if d.linkctxt.HeadType == objabi.Haix { + return dwarfSecInfo{} + } + if d.linkctxt.LinkMode == LinkExternal && d.linkctxt.HeadType == objabi.Hwindows && d.linkctxt.BuildMode == BuildModeCArchive { + // gcc on Windows places .debug_gdb_scripts in the wrong location, which + // causes the program not to run. See https://golang.org/issue/20183 + // Non c-archives can avoid this issue via a linker script + // (see fix near writeGDBLinkerScript). + // c-archive users would need to specify the linker script manually. + // For UX it's better not to deal with this. + return dwarfSecInfo{} + } + if gdbscript == "" { + return dwarfSecInfo{} + } + + gs := d.ldr.CreateSymForUpdate(".debug_gdb_scripts", 0) + gs.SetType(sym.SDWARFSECT) + + gs.AddUint8(GdbScriptPythonFileId) + gs.Addstring(gdbscript) + return dwarfSecInfo{syms: []loader.Sym{gs.Sym()}} +} + +// FIXME: might be worth looking replacing this map with a function +// that switches based on symbol instead. + +var prototypedies map[string]*dwarf.DWDie + +func dwarfEnabled(ctxt *Link) bool { + if *FlagW { // disable dwarf + return false + } + if ctxt.HeadType == objabi.Hplan9 || ctxt.HeadType == objabi.Hjs || ctxt.HeadType == objabi.Hwasip1 { + return false + } + + if ctxt.LinkMode == LinkExternal { + switch { + case ctxt.IsELF: + case ctxt.HeadType == objabi.Hdarwin: + case ctxt.HeadType == objabi.Hwindows: + case ctxt.HeadType == objabi.Haix: + res, err := dwarf.IsDWARFEnabledOnAIXLd(ctxt.extld()) + if err != nil { + Exitf("%v", err) + } + return res + default: + return false + } + } + + return true +} + +// mkBuiltinType populates the dwctxt2 sym lookup maps for the +// newly created builtin type DIE 'typeDie'. +func (d *dwctxt) mkBuiltinType(ctxt *Link, abrv int, tname string) *dwarf.DWDie { + // create type DIE + die := d.newdie(&dwtypes, abrv, tname) + + // Look up type symbol. + gotype := d.lookupOrDiag("type:" + tname) + + // Map from die sym to type sym + ds := loader.Sym(die.Sym.(dwSym)) + d.rtmap[ds] = gotype + + // Map from type to def sym + d.tdmap[gotype] = ds + + return die +} + +// dwarfVisitFunction takes a function (text) symbol and processes the +// subprogram DIE for the function and picks up any other DIEs +// (absfns, types) that it references. +func (d *dwctxt) dwarfVisitFunction(fnSym loader.Sym, unit *sym.CompilationUnit) { + // The DWARF subprogram DIE symbol is listed as an aux sym + // of the text (fcn) symbol, so ask the loader to retrieve it, + // as well as the associated range symbol. + infosym, _, rangesym, _ := d.ldr.GetFuncDwarfAuxSyms(fnSym) + if infosym == 0 { + return + } + d.ldr.SetAttrNotInSymbolTable(infosym, true) + d.ldr.SetAttrReachable(infosym, true) + unit.FuncDIEs = append(unit.FuncDIEs, sym.LoaderSym(infosym)) + if rangesym != 0 { + d.ldr.SetAttrNotInSymbolTable(rangesym, true) + d.ldr.SetAttrReachable(rangesym, true) + unit.RangeSyms = append(unit.RangeSyms, sym.LoaderSym(rangesym)) + } + + // Walk the relocations of the subprogram DIE symbol to discover + // references to abstract function DIEs, Go type DIES, and + // (via R_USETYPE relocs) types that were originally assigned to + // locals/params but were optimized away. + drelocs := d.ldr.Relocs(infosym) + for ri := 0; ri < drelocs.Count(); ri++ { + r := drelocs.At(ri) + // Look for "use type" relocs. + if r.Type() == objabi.R_USETYPE { + d.defgotype(r.Sym()) + continue + } + if r.Type() != objabi.R_DWARFSECREF { + continue + } + + rsym := r.Sym() + rst := d.ldr.SymType(rsym) + + // Look for abstract function references. + if rst == sym.SDWARFABSFCN { + if !d.ldr.AttrOnList(rsym) { + // abstract function + d.ldr.SetAttrOnList(rsym, true) + unit.AbsFnDIEs = append(unit.AbsFnDIEs, sym.LoaderSym(rsym)) + d.importInfoSymbol(rsym) + } + continue + } + + // Look for type references. + if rst != sym.SDWARFTYPE && rst != sym.Sxxx { + continue + } + if _, ok := d.rtmap[rsym]; ok { + // type already generated + continue + } + + rsn := d.ldr.SymName(rsym) + tn := rsn[len(dwarf.InfoPrefix):] + ts := d.ldr.Lookup("type:"+tn, 0) + d.defgotype(ts) + } +} + +// dwarfGenerateDebugInfo generated debug info entries for all types, +// variables and functions in the program. +// Along with dwarfGenerateDebugSyms they are the two main entry points into +// dwarf generation: dwarfGenerateDebugInfo does all the work that should be +// done before symbol names are mangled while dwarfGenerateDebugSyms does +// all the work that can only be done after addresses have been assigned to +// text symbols. +func dwarfGenerateDebugInfo(ctxt *Link) { + if !dwarfEnabled(ctxt) { + return + } + + d := &dwctxt{ + linkctxt: ctxt, + ldr: ctxt.loader, + arch: ctxt.Arch, + tmap: make(map[string]loader.Sym), + tdmap: make(map[loader.Sym]loader.Sym), + rtmap: make(map[loader.Sym]loader.Sym), + } + d.typeRuntimeEface = d.lookupOrDiag("type:runtime.eface") + d.typeRuntimeIface = d.lookupOrDiag("type:runtime.iface") + + if ctxt.HeadType == objabi.Haix { + // Initial map used to store package size for each DWARF section. + dwsectCUSize = make(map[string]uint64) + } + + // For ctxt.Diagnostic messages. + newattr(&dwtypes, dwarf.DW_AT_name, dwarf.DW_CLS_STRING, int64(len("dwtypes")), "dwtypes") + + // Unspecified type. There are no references to this in the symbol table. + d.newdie(&dwtypes, dwarf.DW_ABRV_NULLTYPE, "") + + // Some types that must exist to define other ones (uintptr in particular + // is needed for array size) + d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer") + die := d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BASETYPE, "uintptr") + newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(d.arch.PtrSize), 0) + newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, objabi.KindUintptr, 0) + newattr(die, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_ADDRESS, 0, dwSym(d.lookupOrDiag("type:uintptr"))) + + d.uintptrInfoSym = d.mustFind("uintptr") + + // Prototypes needed for type synthesis. + prototypedies = map[string]*dwarf.DWDie{ + "type:runtime.stringStructDWARF": nil, + "type:runtime.slice": nil, + "type:runtime.hmap": nil, + "type:runtime.bmap": nil, + "type:runtime.sudog": nil, + "type:runtime.waitq": nil, + "type:runtime.hchan": nil, + } + + // Needed by the prettyprinter code for interface inspection. + for _, typ := range []string{ + "type:internal/abi.Type", + "type:internal/abi.ArrayType", + "type:internal/abi.ChanType", + "type:internal/abi.FuncType", + "type:internal/abi.MapType", + "type:internal/abi.PtrType", + "type:internal/abi.SliceType", + "type:internal/abi.StructType", + "type:internal/abi.InterfaceType", + "type:runtime.itab", + "type:internal/abi.Imethod"} { + d.defgotype(d.lookupOrDiag(typ)) + } + + // fake root DIE for compile unit DIEs + var dwroot dwarf.DWDie + flagVariants := make(map[string]bool) + + for _, lib := range ctxt.Library { + + consts := d.ldr.Lookup(dwarf.ConstInfoPrefix+lib.Pkg, 0) + for _, unit := range lib.Units { + // We drop the constants into the first CU. + if consts != 0 { + unit.Consts = sym.LoaderSym(consts) + d.importInfoSymbol(consts) + consts = 0 + } + ctxt.compUnits = append(ctxt.compUnits, unit) + + // We need at least one runtime unit. + if unit.Lib.Pkg == "runtime" { + ctxt.runtimeCU = unit + } + + cuabrv := dwarf.DW_ABRV_COMPUNIT + if len(unit.Textp) == 0 { + cuabrv = dwarf.DW_ABRV_COMPUNIT_TEXTLESS + } + unit.DWInfo = d.newdie(&dwroot, cuabrv, unit.Lib.Pkg) + newattr(unit.DWInfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(dwarf.DW_LANG_Go), 0) + // OS X linker requires compilation dir or absolute path in comp unit name to output debug info. + compDir := getCompilationDir() + // TODO: Make this be the actual compilation directory, not + // the linker directory. If we move CU construction into the + // compiler, this should happen naturally. + newattr(unit.DWInfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir) + + var peData []byte + if producerExtra := d.ldr.Lookup(dwarf.CUInfoPrefix+"producer."+unit.Lib.Pkg, 0); producerExtra != 0 { + peData = d.ldr.Data(producerExtra) + } + producer := "Go cmd/compile " + buildcfg.Version + if len(peData) > 0 { + // We put a semicolon before the flags to clearly + // separate them from the version, which can be long + // and have lots of weird things in it in development + // versions. We promise not to put a semicolon in the + // version, so it should be safe for readers to scan + // forward to the semicolon. + producer += "; " + string(peData) + flagVariants[string(peData)] = true + } else { + flagVariants[""] = true + } + + newattr(unit.DWInfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer) + + var pkgname string + if pnSymIdx := d.ldr.Lookup(dwarf.CUInfoPrefix+"packagename."+unit.Lib.Pkg, 0); pnSymIdx != 0 { + pnsData := d.ldr.Data(pnSymIdx) + pkgname = string(pnsData) + } + newattr(unit.DWInfo, dwarf.DW_AT_go_package_name, dwarf.DW_CLS_STRING, int64(len(pkgname)), pkgname) + + // Scan all functions in this compilation unit, create + // DIEs for all referenced types, find all referenced + // abstract functions, visit range symbols. Note that + // Textp has been dead-code-eliminated already. + for _, s := range unit.Textp { + d.dwarfVisitFunction(loader.Sym(s), unit) + } + } + } + + // Fix for 31034: if the objects feeding into this link were compiled + // with different sets of flags, then don't issue an error if + // the -strictdups checks fail. + if checkStrictDups > 1 && len(flagVariants) > 1 { + checkStrictDups = 1 + } + + // Make a pass through all data symbols, looking for those + // corresponding to reachable, Go-generated, user-visible + // global variables. For each global of this sort, locate + // the corresponding compiler-generated DIE symbol and tack + // it onto the list associated with the unit. + // Also looks for dictionary symbols and generates DIE symbols for each + // type they reference. + for idx := loader.Sym(1); idx < loader.Sym(d.ldr.NDef()); idx++ { + if !d.ldr.AttrReachable(idx) || + d.ldr.AttrNotInSymbolTable(idx) || + d.ldr.SymVersion(idx) >= sym.SymVerStatic { + continue + } + t := d.ldr.SymType(idx) + switch t { + case sym.SRODATA, sym.SDATA, sym.SNOPTRDATA, sym.STYPE, sym.SBSS, sym.SNOPTRBSS, sym.STLSBSS: + // ok + default: + continue + } + // Skip things with no type, unless it's a dictionary + gt := d.ldr.SymGoType(idx) + if gt == 0 { + if t == sym.SRODATA { + if d.ldr.IsDict(idx) { + // This is a dictionary, make sure that all types referenced by this dictionary are reachable + relocs := d.ldr.Relocs(idx) + for i := 0; i < relocs.Count(); i++ { + reloc := relocs.At(i) + if reloc.Type() == objabi.R_USEIFACE { + d.defgotype(reloc.Sym()) + } + } + } + } + continue + } + // Skip file local symbols (this includes static tmps, stack + // object symbols, and local symbols in assembler src files). + if d.ldr.IsFileLocal(idx) { + continue + } + + // Find compiler-generated DWARF info sym for global in question, + // and tack it onto the appropriate unit. Note that there are + // circumstances under which we can't find the compiler-generated + // symbol-- this typically happens as a result of compiler options + // (e.g. compile package X with "-dwarf=0"). + varDIE := d.ldr.GetVarDwarfAuxSym(idx) + if varDIE != 0 { + unit := d.ldr.SymUnit(idx) + d.defgotype(gt) + unit.VarDIEs = append(unit.VarDIEs, sym.LoaderSym(varDIE)) + } + } + + d.synthesizestringtypes(ctxt, dwtypes.Child) + d.synthesizeslicetypes(ctxt, dwtypes.Child) + d.synthesizemaptypes(ctxt, dwtypes.Child) + d.synthesizechantypes(ctxt, dwtypes.Child) +} + +// dwarfGenerateDebugSyms constructs debug_line, debug_frame, and +// debug_loc. It also writes out the debug_info section using symbols +// generated in dwarfGenerateDebugInfo2. +func dwarfGenerateDebugSyms(ctxt *Link) { + if !dwarfEnabled(ctxt) { + return + } + d := &dwctxt{ + linkctxt: ctxt, + ldr: ctxt.loader, + arch: ctxt.Arch, + dwmu: new(sync.Mutex), + } + d.dwarfGenerateDebugSyms() +} + +// dwUnitSyms stores input and output symbols for DWARF generation +// for a given compilation unit. +type dwUnitSyms struct { + // Inputs for a given unit. + lineProlog loader.Sym + rangeProlog loader.Sym + infoEpilog loader.Sym + + // Outputs for a given unit. + linesyms []loader.Sym + infosyms []loader.Sym + locsyms []loader.Sym + rangessyms []loader.Sym +} + +// dwUnitPortion assembles the DWARF content for a given compilation +// unit: debug_info, debug_lines, debug_ranges, debug_loc (debug_frame +// is handled elsewhere). Order is important; the calls to writelines +// and writepcranges below make updates to the compilation unit DIE, +// hence they have to happen before the call to writeUnitInfo. +func (d *dwctxt) dwUnitPortion(u *sym.CompilationUnit, abbrevsym loader.Sym, us *dwUnitSyms) { + if u.DWInfo.Abbrev != dwarf.DW_ABRV_COMPUNIT_TEXTLESS { + us.linesyms = d.writelines(u, us.lineProlog) + base := loader.Sym(u.Textp[0]) + us.rangessyms = d.writepcranges(u, base, u.PCs, us.rangeProlog) + us.locsyms = d.collectUnitLocs(u) + } + us.infosyms = d.writeUnitInfo(u, abbrevsym, us.infoEpilog) +} + +func (d *dwctxt) dwarfGenerateDebugSyms() { + abbrevSec := d.writeabbrev() + dwarfp = append(dwarfp, abbrevSec) + d.calcCompUnitRanges() + sort.Sort(compilationUnitByStartPC(d.linkctxt.compUnits)) + + // newdie adds DIEs to the *beginning* of the parent's DIE list. + // Now that we're done creating DIEs, reverse the trees so DIEs + // appear in the order they were created. + for _, u := range d.linkctxt.compUnits { + reversetree(&u.DWInfo.Child) + } + reversetree(&dwtypes.Child) + movetomodule(d.linkctxt, &dwtypes) + + mkSecSym := func(name string) loader.Sym { + s := d.ldr.CreateSymForUpdate(name, 0) + s.SetType(sym.SDWARFSECT) + s.SetReachable(true) + return s.Sym() + } + mkAnonSym := func(kind sym.SymKind) loader.Sym { + s := d.ldr.MakeSymbolUpdater(d.ldr.CreateExtSym("", 0)) + s.SetType(kind) + s.SetReachable(true) + return s.Sym() + } + + // Create the section symbols. + frameSym := mkSecSym(".debug_frame") + locSym := mkSecSym(".debug_loc") + lineSym := mkSecSym(".debug_line") + rangesSym := mkSecSym(".debug_ranges") + infoSym := mkSecSym(".debug_info") + + // Create the section objects + lineSec := dwarfSecInfo{syms: []loader.Sym{lineSym}} + locSec := dwarfSecInfo{syms: []loader.Sym{locSym}} + rangesSec := dwarfSecInfo{syms: []loader.Sym{rangesSym}} + frameSec := dwarfSecInfo{syms: []loader.Sym{frameSym}} + infoSec := dwarfSecInfo{syms: []loader.Sym{infoSym}} + + // Create any new symbols that will be needed during the + // parallel portion below. + ncu := len(d.linkctxt.compUnits) + unitSyms := make([]dwUnitSyms, ncu) + for i := 0; i < ncu; i++ { + us := &unitSyms[i] + us.lineProlog = mkAnonSym(sym.SDWARFLINES) + us.rangeProlog = mkAnonSym(sym.SDWARFRANGE) + us.infoEpilog = mkAnonSym(sym.SDWARFFCN) + } + + var wg sync.WaitGroup + sema := make(chan struct{}, runtime.GOMAXPROCS(0)) + + // Kick off generation of .debug_frame, since it doesn't have + // any entanglements and can be started right away. + wg.Add(1) + go func() { + sema <- struct{}{} + defer func() { + <-sema + wg.Done() + }() + frameSec = d.writeframes(frameSym) + }() + + // Create a goroutine per comp unit to handle the generation that + // unit's portion of .debug_line, .debug_loc, .debug_ranges, and + // .debug_info. + wg.Add(len(d.linkctxt.compUnits)) + for i := 0; i < ncu; i++ { + go func(u *sym.CompilationUnit, us *dwUnitSyms) { + sema <- struct{}{} + defer func() { + <-sema + wg.Done() + }() + d.dwUnitPortion(u, abbrevSec.secSym(), us) + }(d.linkctxt.compUnits[i], &unitSyms[i]) + } + wg.Wait() + + markReachable := func(syms []loader.Sym) []loader.Sym { + for _, s := range syms { + d.ldr.SetAttrNotInSymbolTable(s, true) + d.ldr.SetAttrReachable(s, true) + } + return syms + } + + // Stitch together the results. + for i := 0; i < ncu; i++ { + r := &unitSyms[i] + lineSec.syms = append(lineSec.syms, markReachable(r.linesyms)...) + infoSec.syms = append(infoSec.syms, markReachable(r.infosyms)...) + locSec.syms = append(locSec.syms, markReachable(r.locsyms)...) + rangesSec.syms = append(rangesSec.syms, markReachable(r.rangessyms)...) + } + dwarfp = append(dwarfp, lineSec) + dwarfp = append(dwarfp, frameSec) + gdbScriptSec := d.writegdbscript() + if gdbScriptSec.secSym() != 0 { + dwarfp = append(dwarfp, gdbScriptSec) + } + dwarfp = append(dwarfp, infoSec) + if len(locSec.syms) > 1 { + dwarfp = append(dwarfp, locSec) + } + dwarfp = append(dwarfp, rangesSec) + + // Check to make sure we haven't listed any symbols more than once + // in the info section. This used to be done by setting and + // checking the OnList attribute in "putdie", but that strategy + // was not friendly for concurrency. + seen := loader.MakeBitmap(d.ldr.NSym()) + for _, s := range infoSec.syms { + if seen.Has(s) { + log.Fatalf("symbol %s listed multiple times", d.ldr.SymName(s)) + } + seen.Set(s) + } +} + +func (d *dwctxt) collectUnitLocs(u *sym.CompilationUnit) []loader.Sym { + syms := []loader.Sym{} + for _, fn := range u.FuncDIEs { + relocs := d.ldr.Relocs(loader.Sym(fn)) + for i := 0; i < relocs.Count(); i++ { + reloc := relocs.At(i) + if reloc.Type() != objabi.R_DWARFSECREF { + continue + } + rsym := reloc.Sym() + if d.ldr.SymType(rsym) == sym.SDWARFLOC { + syms = append(syms, rsym) + // One location list entry per function, but many relocations to it. Don't duplicate. + break + } + } + } + return syms +} + +// Add DWARF section names to the section header string table, by calling add +// on each name. ELF only. +func dwarfaddshstrings(ctxt *Link, add func(string)) { + if *FlagW { // disable dwarf + return + } + + secs := []string{"abbrev", "frame", "info", "loc", "line", "gdb_scripts", "ranges"} + for _, sec := range secs { + add(".debug_" + sec) + if ctxt.IsExternal() { + add(elfRelType + ".debug_" + sec) + } + } +} + +func dwarfaddelfsectionsyms(ctxt *Link) { + if *FlagW { // disable dwarf + return + } + if ctxt.LinkMode != LinkExternal { + return + } + + ldr := ctxt.loader + for _, si := range dwarfp { + s := si.secSym() + sect := ldr.SymSect(si.secSym()) + putelfsectionsym(ctxt, ctxt.Out, s, sect.Elfsect.(*ElfShdr).shnum) + } +} + +// dwarfcompress compresses the DWARF sections. Relocations are applied +// on the fly. After this, dwarfp will contain a different (new) set of +// symbols, and sections may have been replaced. +func dwarfcompress(ctxt *Link) { + // compressedSect is a helper type for parallelizing compression. + type compressedSect struct { + index int + compressed []byte + syms []loader.Sym + } + + supported := ctxt.IsELF || ctxt.IsWindows() || ctxt.IsDarwin() + if !ctxt.compressDWARF || !supported || ctxt.IsExternal() { + return + } + + var compressedCount int + resChannel := make(chan compressedSect) + for i := range dwarfp { + go func(resIndex int, syms []loader.Sym) { + resChannel <- compressedSect{resIndex, compressSyms(ctxt, syms), syms} + }(compressedCount, dwarfp[i].syms) + compressedCount++ + } + res := make([]compressedSect, compressedCount) + for ; compressedCount > 0; compressedCount-- { + r := <-resChannel + res[r.index] = r + } + + ldr := ctxt.loader + var newDwarfp []dwarfSecInfo + Segdwarf.Sections = Segdwarf.Sections[:0] + for _, z := range res { + s := z.syms[0] + if z.compressed == nil { + // Compression didn't help. + ds := dwarfSecInfo{syms: z.syms} + newDwarfp = append(newDwarfp, ds) + Segdwarf.Sections = append(Segdwarf.Sections, ldr.SymSect(s)) + } else { + var compressedSegName string + if ctxt.IsELF { + compressedSegName = ldr.SymSect(s).Name + } else { + compressedSegName = ".zdebug_" + ldr.SymSect(s).Name[len(".debug_"):] + } + sect := addsection(ctxt.loader, ctxt.Arch, &Segdwarf, compressedSegName, 04) + sect.Align = int32(ctxt.Arch.Alignment) + sect.Length = uint64(len(z.compressed)) + sect.Compressed = true + newSym := ldr.MakeSymbolBuilder(compressedSegName) + ldr.SetAttrReachable(s, true) + newSym.SetData(z.compressed) + newSym.SetSize(int64(len(z.compressed))) + ldr.SetSymSect(newSym.Sym(), sect) + ds := dwarfSecInfo{syms: []loader.Sym{newSym.Sym()}} + newDwarfp = append(newDwarfp, ds) + + // compressed symbols are no longer needed. + for _, s := range z.syms { + ldr.SetAttrReachable(s, false) + ldr.FreeSym(s) + } + } + } + dwarfp = newDwarfp + + // Re-compute the locations of the compressed DWARF symbols + // and sections, since the layout of these within the file is + // based on Section.Vaddr and Symbol.Value. + pos := Segdwarf.Vaddr + var prevSect *sym.Section + for _, si := range dwarfp { + for _, s := range si.syms { + ldr.SetSymValue(s, int64(pos)) + sect := ldr.SymSect(s) + if sect != prevSect { + sect.Vaddr = uint64(pos) + prevSect = sect + } + if ldr.SubSym(s) != 0 { + log.Fatalf("%s: unexpected sub-symbols", ldr.SymName(s)) + } + pos += uint64(ldr.SymSize(s)) + if ctxt.IsWindows() { + pos = uint64(Rnd(int64(pos), PEFILEALIGN)) + } + } + } + Segdwarf.Length = pos - Segdwarf.Vaddr +} + +type compilationUnitByStartPC []*sym.CompilationUnit + +func (v compilationUnitByStartPC) Len() int { return len(v) } +func (v compilationUnitByStartPC) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (v compilationUnitByStartPC) Less(i, j int) bool { + switch { + case len(v[i].Textp) == 0 && len(v[j].Textp) == 0: + return v[i].Lib.Pkg < v[j].Lib.Pkg + case len(v[i].Textp) != 0 && len(v[j].Textp) == 0: + return true + case len(v[i].Textp) == 0 && len(v[j].Textp) != 0: + return false + default: + return v[i].PCs[0].Start < v[j].PCs[0].Start + } +} + +// getPkgFromCUSym returns the package name for the compilation unit +// represented by s. +// The prefix dwarf.InfoPrefix+".pkg." needs to be removed in order to get +// the package name. +func (d *dwctxt) getPkgFromCUSym(s loader.Sym) string { + return strings.TrimPrefix(d.ldr.SymName(s), dwarf.InfoPrefix+".pkg.") +} + +// On AIX, the symbol table needs to know where are the compilation units parts +// for a specific package in each .dw section. +// dwsectCUSize map will save the size of a compilation unit for +// the corresponding .dw section. +// This size can later be retrieved with the index "sectionName.pkgName". +var dwsectCUSizeMu sync.Mutex +var dwsectCUSize map[string]uint64 + +// getDwsectCUSize retrieves the corresponding package size inside the current section. +func getDwsectCUSize(sname string, pkgname string) uint64 { + return dwsectCUSize[sname+"."+pkgname] +} + +func addDwsectCUSize(sname string, pkgname string, size uint64) { + dwsectCUSizeMu.Lock() + defer dwsectCUSizeMu.Unlock() + dwsectCUSize[sname+"."+pkgname] += size +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e431427249db071a850a136afc3307458448b981 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/dwarf_test.go @@ -0,0 +1,2073 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "debug/dwarf" + "debug/pe" + "fmt" + "internal/platform" + "internal/testenv" + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + intdwarf "cmd/internal/dwarf" + objfilepkg "cmd/internal/objfile" // renamed to avoid conflict with objfile function + "cmd/link/internal/dwtest" +) + +func mustHaveDWARF(t testing.TB) { + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Helper() + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } +} + +const ( + DefaultOpt = "-gcflags=" + NoOpt = "-gcflags=-l -N" + OptInl4 = "-gcflags=-l=4" + OptAllInl4 = "-gcflags=all=-l=4" +) + +func TestRuntimeTypesPresent(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + dir := t.TempDir() + + f := gobuild(t, dir, `package main; func main() { }`, NoOpt) + defer f.Close() + + dwarf, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + want := map[string]bool{ + "internal/abi.Type": true, + "internal/abi.ArrayType": true, + "internal/abi.ChanType": true, + "internal/abi.FuncType": true, + "internal/abi.MapType": true, + "internal/abi.PtrType": true, + "internal/abi.SliceType": true, + "internal/abi.StructType": true, + "internal/abi.InterfaceType": true, + "runtime.itab": true, + } + + found := findTypes(t, dwarf, want) + if len(found) != len(want) { + t.Errorf("found %v, want %v", found, want) + } +} + +func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) { + found = make(map[string]bool) + rdr := dw.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + switch entry.Tag { + case dwarf.TagTypedef: + if name, ok := entry.Val(dwarf.AttrName).(string); ok && want[name] { + found[name] = true + } + } + } + return +} + +type builtFile struct { + *objfilepkg.File + path string +} + +func gobuild(t *testing.T, dir string, testfile string, gcflags string) *builtFile { + src := filepath.Join(dir, "test.go") + dst := filepath.Join(dir, "out.exe") + + if err := os.WriteFile(src, []byte(testfile), 0666); err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", gcflags, "-o", dst, src) + b, err := cmd.CombinedOutput() + if len(b) != 0 { + t.Logf("## build output:\n%s", b) + } + if err != nil { + t.Fatalf("build error: %v", err) + } + + f, err := objfilepkg.Open(dst) + if err != nil { + t.Fatal(err) + } + return &builtFile{f, dst} +} + +// Similar to gobuild() above, but uses a main package instead of a test.go file. + +func gobuildTestdata(t *testing.T, tdir string, pkgDir string, gcflags string) *builtFile { + dst := filepath.Join(tdir, "out.exe") + + // Run a build with an updated GOPATH + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", gcflags, "-o", dst) + cmd.Dir = pkgDir + if b, err := cmd.CombinedOutput(); err != nil { + t.Logf("build: %s\n", b) + t.Fatalf("build error: %v", err) + } + + f, err := objfilepkg.Open(dst) + if err != nil { + t.Fatal(err) + } + return &builtFile{f, dst} +} + +// Helper to build a snippet of source for examination with dwtest.Examiner. +func gobuildAndExamine(t *testing.T, source string, gcflags string) (*dwarf.Data, *dwtest.Examiner) { + dir := t.TempDir() + + f := gobuild(t, dir, source, gcflags) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF in program %q: %v", source, err) + } + + rdr := d.Reader() + ex := &dwtest.Examiner{} + if err := ex.Populate(rdr); err != nil { + t.Fatalf("error populating DWARF examiner for program %q: %v", source, err) + } + + return d, ex +} + +func findSubprogramDIE(t *testing.T, ex *dwtest.Examiner, sym string) *dwarf.Entry { + dies := ex.Named(sym) + if len(dies) == 0 { + t.Fatalf("unable to locate DIE for %s", sym) + } + if len(dies) != 1 { + t.Fatalf("more than one %s DIE: %+v", sym, dies) + } + die := dies[0] + + // Vet the DIE. + if die.Tag != dwarf.TagSubprogram { + t.Fatalf("unexpected tag %v on %s DIE", die.Tag, sym) + } + + return die +} + +func TestEmbeddedStructMarker(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + const prog = ` +package main + +import "fmt" + +type Foo struct { v int } +type Bar struct { + Foo + name string +} +type Baz struct { + *Foo + name string +} + +func main() { + bar := Bar{ Foo: Foo{v: 123}, name: "onetwothree"} + baz := Baz{ Foo: &bar.Foo, name: "123" } + fmt.Println(bar, baz) +}` + + want := map[string]map[string]bool{ + "main.Foo": {"v": false}, + "main.Bar": {"Foo": true, "name": false}, + "main.Baz": {"Foo": true, "name": false}, + } + + dir := t.TempDir() + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + switch entry.Tag { + case dwarf.TagStructType: + name, ok := entry.Val(dwarf.AttrName).(string) + if !ok { + continue + } + wantMembers := want[name] + if wantMembers == nil { + continue + } + gotMembers, err := findMembers(rdr) + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + if !reflect.DeepEqual(gotMembers, wantMembers) { + t.Errorf("type %v: got map[member]embedded = %+v, want %+v", name, wantMembers, gotMembers) + } + delete(want, name) + } + } + if len(want) != 0 { + t.Errorf("failed to check all expected types: missing types = %+v", want) + } +} + +func findMembers(rdr *dwarf.Reader) (map[string]bool, error) { + memberEmbedded := map[string]bool{} + // TODO(hyangah): define in debug/dwarf package + const goEmbeddedStruct = dwarf.Attr(intdwarf.DW_AT_go_embedded_field) + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + return nil, err + } + switch entry.Tag { + case dwarf.TagMember: + name := entry.Val(dwarf.AttrName).(string) + embedded := entry.Val(goEmbeddedStruct).(bool) + memberEmbedded[name] = embedded + case 0: + return memberEmbedded, nil + } + } + return memberEmbedded, nil +} + +func TestSizes(t *testing.T) { + mustHaveDWARF(t) + + // External linking may bring in C symbols with unknown size. Skip. + testenv.MustInternalLink(t, false) + + t.Parallel() + + // DWARF sizes should never be -1. + // See issue #21097 + const prog = ` +package main +var x func() +var y [4]func() +func main() { + x = nil + y[0] = nil +} +` + dir := t.TempDir() + + f := gobuild(t, dir, prog, NoOpt) + defer f.Close() + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + rdr := d.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + switch entry.Tag { + case dwarf.TagArrayType, dwarf.TagPointerType, dwarf.TagStructType, dwarf.TagBaseType, dwarf.TagSubroutineType, dwarf.TagTypedef: + default: + continue + } + typ, err := d.Type(entry.Offset) + if err != nil { + t.Fatalf("can't read type: %v", err) + } + if typ.Size() < 0 { + t.Errorf("subzero size %s %s %T", typ, entry.Tag, typ) + } + } +} + +func TestFieldOverlap(t *testing.T) { + mustHaveDWARF(t) + t.Parallel() + + // This test grew out of issue 21094, where specific sudog DWARF types + // had elem fields set to values instead of pointers. + const prog = ` +package main + +var c chan string + +func main() { + c <- "foo" +} +` + dir := t.TempDir() + + f := gobuild(t, dir, prog, NoOpt) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if entry.Tag != dwarf.TagStructType { + continue + } + typ, err := d.Type(entry.Offset) + if err != nil { + t.Fatalf("can't read type: %v", err) + } + s := typ.(*dwarf.StructType) + for i := 0; i < len(s.Field); i++ { + end := s.Field[i].ByteOffset + s.Field[i].Type.Size() + var limit int64 + if i == len(s.Field)-1 { + limit = s.Size() + } else { + limit = s.Field[i+1].ByteOffset + } + if end > limit { + name := entry.Val(dwarf.AttrName).(string) + t.Fatalf("field %s.%s overlaps next field", name, s.Field[i].Name) + } + } + } +} + +func TestSubprogramDeclFileLine(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + mustHaveDWARF(t) + + const prog = `package main +%s +func main() {} +` + tests := []struct { + name string + prog string + file string + line int64 + }{ + { + name: "normal", + prog: fmt.Sprintf(prog, ""), + file: "test.go", + line: 3, + }, + { + name: "line-directive", + prog: fmt.Sprintf(prog, "//line /foobar.go:200"), + file: "foobar.go", + line: 200, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + d, ex := gobuildAndExamine(t, tc.prog, NoOpt) + + maindie := findSubprogramDIE(t, ex, "main.main") + + mainIdx := ex.IdxFromOffset(maindie.Offset) + + fileIdx, fileIdxOK := maindie.Val(dwarf.AttrDeclFile).(int64) + if !fileIdxOK { + t.Errorf("missing or invalid DW_AT_decl_file for main") + } + file, err := ex.FileRef(d, mainIdx, fileIdx) + if err != nil { + t.Fatalf("FileRef: %v", err) + } + base := filepath.Base(file) + if base != tc.file { + t.Errorf("DW_AT_decl_file for main is %v, want %v", base, tc.file) + } + + line, lineOK := maindie.Val(dwarf.AttrDeclLine).(int64) + if !lineOK { + t.Errorf("missing or invalid DW_AT_decl_line for main") + } + if line != tc.line { + t.Errorf("DW_AT_decl_line for main is %v, want %d", line, tc.line) + } + }) + } +} + +func TestVarDeclLine(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + mustHaveDWARF(t) + + const prog = `package main +%s +func main() { + + var i int + i = i +} +` + tests := []struct { + name string + prog string + line int64 + }{ + { + name: "normal", + prog: fmt.Sprintf(prog, ""), + line: 5, + }, + { + name: "line-directive", + prog: fmt.Sprintf(prog, "//line /foobar.go:200"), + line: 202, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, ex := gobuildAndExamine(t, tc.prog, NoOpt) + + maindie := findSubprogramDIE(t, ex, "main.main") + + mainIdx := ex.IdxFromOffset(maindie.Offset) + childDies := ex.Children(mainIdx) + var iEntry *dwarf.Entry + for _, child := range childDies { + if child.Tag == dwarf.TagVariable && child.Val(dwarf.AttrName).(string) == "i" { + iEntry = child + break + } + } + if iEntry == nil { + t.Fatalf("didn't find DW_TAG_variable for i in main.main") + } + + // Verify line/file attributes. + line, lineOK := iEntry.Val(dwarf.AttrDeclLine).(int64) + if !lineOK { + t.Errorf("missing or invalid DW_AT_decl_line for i") + } + if line != tc.line { + t.Errorf("DW_AT_decl_line for i is %v, want %d", line, tc.line) + } + }) + } +} + +// TestInlinedRoutineCallFileLine tests the call file and line records for an +// inlined subroutine. +func TestInlinedRoutineCallFileLine(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + const prog = ` +package main + +var G int + +//go:noinline +func notinlined() int { + return 42 +} + +func inlined() int { + return notinlined() +} + +%s +func main() { + x := inlined() + G = x +} +` + tests := []struct { + name string + prog string + file string // basename + line int64 + }{ + { + name: "normal", + prog: fmt.Sprintf(prog, ""), + file: "test.go", + line: 17, + }, + { + name: "line-directive", + prog: fmt.Sprintf(prog, "//line /foobar.go:200"), + file: "foobar.go", + line: 201, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Note: this is a build with "-l=4", as opposed to "-l -N". The + // test is intended to verify DWARF that is only generated when + // the inliner is active. We're only going to look at the DWARF for + // main.main, however, hence we build with "-gcflags=-l=4" as opposed + // to "-gcflags=all=-l=4". + d, ex := gobuildAndExamine(t, tc.prog, OptInl4) + + maindie := findSubprogramDIE(t, ex, "main.main") + + // Walk main's children and pick out the inlined subroutines + mainIdx := ex.IdxFromOffset(maindie.Offset) + childDies := ex.Children(mainIdx) + found := false + for _, child := range childDies { + if child.Tag != dwarf.TagInlinedSubroutine { + continue + } + + // Found an inlined subroutine. + if found { + t.Fatalf("Found multiple inlined subroutines, expect only one") + } + found = true + + // Locate abstract origin. + ooff, originOK := child.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + t.Fatalf("no abstract origin attr for inlined subroutine at offset %v", child.Offset) + } + originDIE := ex.EntryFromOffset(ooff) + if originDIE == nil { + t.Fatalf("can't locate origin DIE at off %v", ooff) + } + + // Name should check out. + name, ok := originDIE.Val(dwarf.AttrName).(string) + if !ok { + t.Fatalf("no name attr for inlined subroutine at offset %v", child.Offset) + } + if name != "main.inlined" { + t.Fatalf("expected inlined routine %s got %s", "main.cand", name) + } + + // Verify that the call_file attribute for the inlined + // instance is ok. In this case it should match the file + // for the main routine. To do this we need to locate the + // compilation unit DIE that encloses what we're looking + // at; this can be done with the examiner. + cf, cfOK := child.Val(dwarf.AttrCallFile).(int64) + if !cfOK { + t.Fatalf("no call_file attr for inlined subroutine at offset %v", child.Offset) + } + file, err := ex.FileRef(d, mainIdx, cf) + if err != nil { + t.Errorf("FileRef: %v", err) + continue + } + base := filepath.Base(file) + if base != tc.file { + t.Errorf("bad call_file attribute, found '%s', want '%s'", + file, tc.file) + } + + // Verify that the call_line attribute for the inlined + // instance is ok. + cl, clOK := child.Val(dwarf.AttrCallLine).(int64) + if !clOK { + t.Fatalf("no call_line attr for inlined subroutine at offset %v", child.Offset) + } + if cl != tc.line { + t.Errorf("bad call_line attribute, found %d, want %d", cl, tc.line) + } + } + if !found { + t.Fatalf("not enough inlined subroutines found in main.main") + } + }) + } +} + +// TestInlinedRoutineArgsVars tests the argument and variable records for an inlined subroutine. +func TestInlinedRoutineArgsVars(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + const prog = ` +package main + +var G int + +func noinline(x int) int { + defer func() { G += x }() + return x +} + +func cand(x, y int) int { + return noinline(x+y) ^ (y - x) +} + +func main() { + x := cand(G*G,G|7%G) + G = x +} +` + // Note: this is a build with "-l=4", as opposed to "-l -N". The + // test is intended to verify DWARF that is only generated when + // the inliner is active. We're only going to look at the DWARF for + // main.main, however, hence we build with "-gcflags=-l=4" as opposed + // to "-gcflags=all=-l=4". + _, ex := gobuildAndExamine(t, prog, OptInl4) + + maindie := findSubprogramDIE(t, ex, "main.main") + + // Walk main's children and pick out the inlined subroutines + mainIdx := ex.IdxFromOffset(maindie.Offset) + childDies := ex.Children(mainIdx) + found := false + for _, child := range childDies { + if child.Tag != dwarf.TagInlinedSubroutine { + continue + } + + // Found an inlined subroutine. + if found { + t.Fatalf("Found multiple inlined subroutines, expect only one") + } + found = true + + // Locate abstract origin. + ooff, originOK := child.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + t.Fatalf("no abstract origin attr for inlined subroutine at offset %v", child.Offset) + } + originDIE := ex.EntryFromOffset(ooff) + if originDIE == nil { + t.Fatalf("can't locate origin DIE at off %v", ooff) + } + + // Name should check out. + name, ok := originDIE.Val(dwarf.AttrName).(string) + if !ok { + t.Fatalf("no name attr for inlined subroutine at offset %v", child.Offset) + } + if name != "main.cand" { + t.Fatalf("expected inlined routine %s got %s", "main.cand", name) + } + + // Walk the children of the abstract subroutine. We expect + // to see child variables there, even if (perhaps due to + // optimization) there are no references to them from the + // inlined subroutine DIE. + absFcnIdx := ex.IdxFromOffset(ooff) + absFcnChildDies := ex.Children(absFcnIdx) + if len(absFcnChildDies) != 2 { + t.Fatalf("expected abstract function: expected 2 children, got %d children", len(absFcnChildDies)) + } + formalCount := 0 + for _, absChild := range absFcnChildDies { + if absChild.Tag == dwarf.TagFormalParameter { + formalCount += 1 + continue + } + t.Fatalf("abstract function child DIE: expected formal, got %v", absChild.Tag) + } + if formalCount != 2 { + t.Fatalf("abstract function DIE: expected 2 formals, got %d", formalCount) + } + + omap := make(map[dwarf.Offset]bool) + + // Walk the child variables of the inlined routine. Each + // of them should have a distinct abstract origin-- if two + // vars point to the same origin things are definitely broken. + inlIdx := ex.IdxFromOffset(child.Offset) + inlChildDies := ex.Children(inlIdx) + for _, k := range inlChildDies { + ooff, originOK := k.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + t.Fatalf("no abstract origin attr for child of inlined subroutine at offset %v", k.Offset) + } + if _, found := omap[ooff]; found { + t.Fatalf("duplicate abstract origin at child of inlined subroutine at offset %v", k.Offset) + } + omap[ooff] = true + } + } + if !found { + t.Fatalf("not enough inlined subroutines found in main.main") + } +} + +func abstractOriginSanity(t *testing.T, pkgDir string, flags string) { + t.Parallel() + + dir := t.TempDir() + + // Build with inlining, to exercise DWARF inlining support. + f := gobuildTestdata(t, dir, filepath.Join(pkgDir, "main"), flags) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + rdr := d.Reader() + ex := dwtest.Examiner{} + if err := ex.Populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + // Make a pass through all DIEs looking for abstract origin + // references. + abscount := 0 + for i, die := range ex.DIEs() { + // Does it have an abstract origin? + ooff, originOK := die.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + continue + } + + // All abstract origin references should be resolvable. + abscount += 1 + originDIE := ex.EntryFromOffset(ooff) + if originDIE == nil { + ex.DumpEntry(i, false, 0) + t.Fatalf("unresolved abstract origin ref in DIE at offset 0x%x\n", die.Offset) + } + + // Suppose that DIE X has parameter/variable children {K1, + // K2, ... KN}. If X has an abstract origin of A, then for + // each KJ, the abstract origin of KJ should be a child of A. + // Note that this same rule doesn't hold for non-variable DIEs. + pidx := ex.IdxFromOffset(die.Offset) + if pidx < 0 { + t.Fatalf("can't locate DIE id") + } + kids := ex.Children(pidx) + for _, kid := range kids { + if kid.Tag != dwarf.TagVariable && + kid.Tag != dwarf.TagFormalParameter { + continue + } + kooff, originOK := kid.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + continue + } + childOriginDIE := ex.EntryFromOffset(kooff) + if childOriginDIE == nil { + ex.DumpEntry(i, false, 0) + t.Fatalf("unresolved abstract origin ref in DIE at offset %x", kid.Offset) + } + coidx := ex.IdxFromOffset(childOriginDIE.Offset) + childOriginParent := ex.Parent(coidx) + if childOriginParent != originDIE { + ex.DumpEntry(i, false, 0) + t.Fatalf("unexpected parent of abstract origin DIE at offset %v", childOriginDIE.Offset) + } + } + } + if abscount == 0 { + t.Fatalf("no abstract origin refs found, something is wrong") + } +} + +func TestAbstractOriginSanity(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + mustHaveDWARF(t) + + if wd, err := os.Getwd(); err == nil { + gopathdir := filepath.Join(wd, "testdata", "httptest") + abstractOriginSanity(t, gopathdir, OptAllInl4) + } else { + t.Fatalf("os.Getwd() failed %v", err) + } +} + +func TestAbstractOriginSanityIssue25459(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + if runtime.GOARCH != "amd64" && runtime.GOARCH != "386" { + t.Skip("skipping on not-amd64 not-386; location lists not supported") + } + + if wd, err := os.Getwd(); err == nil { + gopathdir := filepath.Join(wd, "testdata", "issue25459") + abstractOriginSanity(t, gopathdir, DefaultOpt) + } else { + t.Fatalf("os.Getwd() failed %v", err) + } +} + +func TestAbstractOriginSanityIssue26237(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + if wd, err := os.Getwd(); err == nil { + gopathdir := filepath.Join(wd, "testdata", "issue26237") + abstractOriginSanity(t, gopathdir, DefaultOpt) + } else { + t.Fatalf("os.Getwd() failed %v", err) + } +} + +func TestRuntimeTypeAttrInternal(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) + + mustHaveDWARF(t) + + testRuntimeTypeAttr(t, "-ldflags=-linkmode=internal") +} + +// External linking requires a host linker (https://golang.org/src/cmd/cgo/doc.go l.732) +func TestRuntimeTypeAttrExternal(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + mustHaveDWARF(t) + + // Explicitly test external linking, for dsymutil compatibility on Darwin. + if runtime.GOARCH == "ppc64" { + t.Skip("-linkmode=external not supported on ppc64") + } + + testRuntimeTypeAttr(t, "-ldflags=-linkmode=external") +} + +func testRuntimeTypeAttr(t *testing.T, flags string) { + t.Parallel() + + const prog = ` +package main + +import "unsafe" + +type X struct{ _ int } + +func main() { + var x interface{} = &X{} + p := *(*uintptr)(unsafe.Pointer(&x)) + print(p) +} +` + dir := t.TempDir() + + f := gobuild(t, dir, prog, flags) + defer f.Close() + + out, err := testenv.Command(t, f.path).CombinedOutput() + if err != nil { + t.Fatalf("could not run test program: %v", err) + } + addr, err := strconv.ParseUint(string(out), 10, 64) + if err != nil { + t.Fatalf("could not parse type address from program output %q: %v", out, err) + } + + symbols, err := f.Symbols() + if err != nil { + t.Fatalf("error reading symbols: %v", err) + } + var types *objfilepkg.Sym + for _, sym := range symbols { + if sym.Name == "runtime.types" { + types = &sym + break + } + } + if types == nil { + t.Fatal("couldn't find runtime.types in symbols") + } + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + ex := dwtest.Examiner{} + if err := ex.Populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + dies := ex.Named("*main.X") + if len(dies) != 1 { + t.Fatalf("wanted 1 DIE named *main.X, found %v", len(dies)) + } + rtAttr := dies[0].Val(intdwarf.DW_AT_go_runtime_type) + if rtAttr == nil { + t.Fatalf("*main.X DIE had no runtime type attr. DIE: %v", dies[0]) + } + + if platform.DefaultPIE(runtime.GOOS, runtime.GOARCH, false) { + return // everything is PIE, addresses are relocated + } + if rtAttr.(uint64)+types.Addr != addr { + t.Errorf("DWARF type offset was %#x+%#x, but test program said %#x", rtAttr.(uint64), types.Addr, addr) + } +} + +func TestIssue27614(t *testing.T) { + // Type references in debug_info should always use the DW_TAG_typedef_type + // for the type, when that's generated. + + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + dir := t.TempDir() + + const prog = `package main + +import "fmt" + +type astruct struct { + X int +} + +type bstruct struct { + X float32 +} + +var globalptr *astruct +var globalvar astruct +var bvar0, bvar1, bvar2 bstruct + +func main() { + fmt.Println(globalptr, globalvar, bvar0, bvar1, bvar2) +} +` + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + data, err := f.DWARF() + if err != nil { + t.Fatal(err) + } + + rdr := data.Reader() + + var astructTypeDIE, bstructTypeDIE, ptrastructTypeDIE *dwarf.Entry + var globalptrDIE, globalvarDIE *dwarf.Entry + var bvarDIE [3]*dwarf.Entry + + for { + e, err := rdr.Next() + if err != nil { + t.Fatal(err) + } + if e == nil { + break + } + + name, _ := e.Val(dwarf.AttrName).(string) + + switch e.Tag { + case dwarf.TagTypedef: + switch name { + case "main.astruct": + astructTypeDIE = e + case "main.bstruct": + bstructTypeDIE = e + } + case dwarf.TagPointerType: + if name == "*main.astruct" { + ptrastructTypeDIE = e + } + case dwarf.TagVariable: + switch name { + case "main.globalptr": + globalptrDIE = e + case "main.globalvar": + globalvarDIE = e + default: + const bvarprefix = "main.bvar" + if strings.HasPrefix(name, bvarprefix) { + i, _ := strconv.Atoi(name[len(bvarprefix):]) + bvarDIE[i] = e + } + } + } + } + + typedieof := func(e *dwarf.Entry) dwarf.Offset { + return e.Val(dwarf.AttrType).(dwarf.Offset) + } + + if off := typedieof(ptrastructTypeDIE); off != astructTypeDIE.Offset { + t.Errorf("type attribute of *main.astruct references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset) + } + + if off := typedieof(globalptrDIE); off != ptrastructTypeDIE.Offset { + t.Errorf("type attribute of main.globalptr references %#x, not *main.astruct DIE at %#x\n", off, ptrastructTypeDIE.Offset) + } + + if off := typedieof(globalvarDIE); off != astructTypeDIE.Offset { + t.Errorf("type attribute of main.globalvar1 references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset) + } + + for i := range bvarDIE { + if off := typedieof(bvarDIE[i]); off != bstructTypeDIE.Offset { + t.Errorf("type attribute of main.bvar%d references %#x, not main.bstruct DIE at %#x\n", i, off, bstructTypeDIE.Offset) + } + } +} + +func TestStaticTmp(t *testing.T) { + // Checks that statictmp variables do not appear in debug_info or the + // symbol table. + // Also checks that statictmp variables do not collide with user defined + // variables (issue #25113) + + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + dir := t.TempDir() + + const prog = `package main + +var stmp_0 string +var a []int + +func init() { + a = []int{ 7 } +} + +func main() { + println(a[0]) +} +` + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatal(err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagVariable { + continue + } + name, ok := e.Val(dwarf.AttrName).(string) + if !ok { + continue + } + if strings.Contains(name, "stmp") { + t.Errorf("statictmp variable found in debug_info: %s at %x", name, e.Offset) + } + } + + // When external linking, we put all symbols in the symbol table (so the + // external linker can find them). Skip the symbol table check. + // TODO: maybe there is some way to tell the external linker not to put + // those symbols in the executable's symbol table? Prefix the symbol name + // with "." or "L" to pretend it is a label? + if !testenv.CanInternalLink(false) { + return + } + + syms, err := f.Symbols() + if err != nil { + t.Fatalf("error reading symbols: %v", err) + } + for _, sym := range syms { + if strings.Contains(sym.Name, "stmp") { + t.Errorf("statictmp variable found in symbol table: %s", sym.Name) + } + } +} + +func TestPackageNameAttr(t *testing.T) { + const dwarfAttrGoPackageName = dwarf.Attr(0x2905) + const dwarfGoLanguage = 22 + + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + dir := t.TempDir() + + const prog = "package main\nfunc main() {\nprintln(\"hello world\")\n}\n" + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + runtimeUnitSeen := false + for { + e, err := rdr.Next() + if err != nil { + t.Fatal(err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + if lang, _ := e.Val(dwarf.AttrLanguage).(int64); lang != dwarfGoLanguage { + continue + } + + pn, ok := e.Val(dwarfAttrGoPackageName).(string) + if !ok { + name, _ := e.Val(dwarf.AttrName).(string) + t.Errorf("found compile unit without package name: %s", name) + + } + if pn == "" { + name, _ := e.Val(dwarf.AttrName).(string) + t.Errorf("found compile unit with empty package name: %s", name) + } else { + if pn == "runtime" { + runtimeUnitSeen = true + } + } + } + + // Something is wrong if there's no runtime compilation unit. + if !runtimeUnitSeen { + t.Errorf("no package name for runtime unit") + } +} + +func TestMachoIssue32233(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + if runtime.GOOS != "darwin" { + t.Skip("skipping; test only interesting on darwin") + } + + tmpdir := t.TempDir() + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("where am I? %v", err) + } + pdir := filepath.Join(wd, "testdata", "issue32233", "main") + f := gobuildTestdata(t, tmpdir, pdir, DefaultOpt) + f.Close() +} + +func TestWindowsIssue36495(t *testing.T) { + testenv.MustHaveGoBuild(t) + if runtime.GOOS != "windows" { + t.Skip("skipping: test only on windows") + } + + dir := t.TempDir() + + prog := ` +package main + +import "fmt" + +func main() { + fmt.Println("Hello World") +}` + f := gobuild(t, dir, prog, NoOpt) + defer f.Close() + exe, err := pe.Open(f.path) + if err != nil { + t.Fatalf("error opening pe file: %v", err) + } + defer exe.Close() + dw, err := exe.DWARF() + if err != nil { + t.Fatalf("error parsing DWARF: %v", err) + } + rdr := dw.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + lnrdr, err := dw.LineReader(e) + if err != nil { + t.Fatalf("error creating DWARF line reader: %v", err) + } + if lnrdr != nil { + var lne dwarf.LineEntry + for { + err := lnrdr.Next(&lne) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("error reading next DWARF line: %v", err) + } + if strings.Contains(lne.File.Name, `\`) { + t.Errorf("filename should not contain backslash: %v", lne.File.Name) + } + } + } + rdr.SkipChildren() + } +} + +func TestIssue38192(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + // Build a test program that contains a translation unit whose + // text (from am assembly source) contains only a single instruction. + tmpdir := t.TempDir() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("where am I? %v", err) + } + pdir := filepath.Join(wd, "testdata", "issue38192") + f := gobuildTestdata(t, tmpdir, pdir, DefaultOpt) + defer f.Close() + + // Open the resulting binary and examine the DWARF it contains. + // Look for the function of interest ("main.singleInstruction") + // and verify that the line table has an entry not just for the + // single instruction but also a dummy instruction following it, + // so as to test that whoever is emitting the DWARF doesn't + // emit an end-sequence op immediately after the last instruction + // in the translation unit. + // + // NB: another way to write this test would have been to run the + // resulting executable under GDB, set a breakpoint in + // "main.singleInstruction", then verify that GDB displays the + // correct line/file information. Given the headache and flakiness + // associated with GDB-based tests these days, a direct read of + // the line table seems more desirable. + rows := []dwarf.LineEntry{} + dw, err := f.DWARF() + if err != nil { + t.Fatalf("error parsing DWARF: %v", err) + } + rdr := dw.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + // NB: there can be multiple compile units named "main". + name := e.Val(dwarf.AttrName).(string) + if name != "main" { + continue + } + lnrdr, err := dw.LineReader(e) + if err != nil { + t.Fatalf("error creating DWARF line reader: %v", err) + } + if lnrdr != nil { + var lne dwarf.LineEntry + for { + err := lnrdr.Next(&lne) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("error reading next DWARF line: %v", err) + } + if !strings.HasSuffix(lne.File.Name, "ld/testdata/issue38192/oneline.s") { + continue + } + rows = append(rows, lne) + } + } + rdr.SkipChildren() + } + f.Close() + + // Make sure that: + // - main.singleInstruction appears in the line table + // - more than one PC value appears the line table for + // that compilation unit. + // - at least one row has the correct line number (8) + pcs := make(map[uint64]bool) + line8seen := false + for _, r := range rows { + pcs[r.Address] = true + if r.Line == 8 { + line8seen = true + } + } + failed := false + if len(pcs) < 2 { + failed = true + t.Errorf("not enough line table rows for main.singleInstruction (got %d, wanted > 1", len(pcs)) + } + if !line8seen { + failed = true + t.Errorf("line table does not contain correct line for main.singleInstruction") + } + if !failed { + return + } + for i, r := range rows { + t.Logf("row %d: A=%x F=%s L=%d\n", i, r.Address, r.File.Name, r.Line) + } +} + +func TestIssue39757(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + // In this bug the DWARF line table contents for the last couple of + // instructions in a function were incorrect (bad file/line). This + // test verifies that all of the line table rows for a function + // of interest have the same file (no "autogenerated"). + // + // Note: the function in this test was written with an eye towards + // ensuring that there are no inlined routines from other packages + // (which could introduce other source files into the DWARF); it's + // possible that at some point things could evolve in the + // compiler/runtime in ways that aren't happening now, so this + // might be something to check for if it does start failing. + + tmpdir := t.TempDir() + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("where am I? %v", err) + } + pdir := filepath.Join(wd, "testdata", "issue39757") + f := gobuildTestdata(t, tmpdir, pdir, DefaultOpt) + defer f.Close() + + syms, err := f.Symbols() + if err != nil { + t.Fatal(err) + } + + var addr uint64 + for _, sym := range syms { + if sym.Name == "main.main" { + addr = sym.Addr + break + } + } + if addr == 0 { + t.Fatal("cannot find main.main in symbols") + } + + // Open the resulting binary and examine the DWARF it contains. + // Look for the function of interest ("main.main") + // and verify that all line table entries show the same source + // file. + dw, err := f.DWARF() + if err != nil { + t.Fatalf("error parsing DWARF: %v", err) + } + rdr := dw.Reader() + ex := &dwtest.Examiner{} + if err := ex.Populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + maindie := findSubprogramDIE(t, ex, "main.main") + + // Collect the start/end PC for main.main + lowpc := maindie.Val(dwarf.AttrLowpc).(uint64) + highpc := maindie.Val(dwarf.AttrHighpc).(uint64) + + // Now read the line table for the 'main' compilation unit. + mainIdx := ex.IdxFromOffset(maindie.Offset) + cuentry := ex.Parent(mainIdx) + if cuentry == nil { + t.Fatalf("main.main DIE appears orphaned") + } + lnrdr, lerr := dw.LineReader(cuentry) + if lerr != nil { + t.Fatalf("error creating DWARF line reader: %v", err) + } + if lnrdr == nil { + t.Fatalf("no line table for main.main compilation unit") + } + rows := []dwarf.LineEntry{} + mainrows := 0 + var lne dwarf.LineEntry + for { + err := lnrdr.Next(&lne) + if err == io.EOF { + break + } + rows = append(rows, lne) + if err != nil { + t.Fatalf("error reading next DWARF line: %v", err) + } + if lne.Address < lowpc || lne.Address > highpc { + continue + } + if !strings.HasSuffix(lne.File.Name, "issue39757main.go") { + t.Errorf("found row with file=%s (not issue39757main.go)", lne.File.Name) + } + mainrows++ + } + f.Close() + + // Make sure we saw a few rows. + if mainrows < 3 { + t.Errorf("not enough line table rows for main.main (got %d, wanted > 3", mainrows) + for i, r := range rows { + t.Logf("row %d: A=%x F=%s L=%d\n", i, r.Address, r.File.Name, r.Line) + } + } +} + +func TestIssue42484(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) // Avoid spurious failures from external linkers. + + mustHaveDWARF(t) + + t.Parallel() + + tmpdir, err := os.MkdirTemp("", "TestIssue42484") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(tmpdir) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("where am I? %v", err) + } + pdir := filepath.Join(wd, "testdata", "issue42484") + f := gobuildTestdata(t, tmpdir, pdir, NoOpt) + + var lastAddr uint64 + var lastFile string + var lastLine int + + dw, err := f.DWARF() + if err != nil { + t.Fatalf("error parsing DWARF: %v", err) + } + rdr := dw.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + lnrdr, err := dw.LineReader(e) + if err != nil { + t.Fatalf("error creating DWARF line reader: %v", err) + } + if lnrdr != nil { + var lne dwarf.LineEntry + for { + err := lnrdr.Next(&lne) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("error reading next DWARF line: %v", err) + } + if lne.EndSequence { + continue + } + if lne.Address == lastAddr && (lne.File.Name != lastFile || lne.Line != lastLine) { + t.Errorf("address %#x is assigned to both %s:%d and %s:%d", lastAddr, lastFile, lastLine, lne.File.Name, lne.Line) + } + lastAddr = lne.Address + lastFile = lne.File.Name + lastLine = lne.Line + } + } + rdr.SkipChildren() + } + f.Close() +} + +// processParams examines the formal parameter children of subprogram +// DIE "die" using the explorer "ex" and returns a string that +// captures the name, order, and classification of the subprogram's +// input and output parameters. For example, for the go function +// +// func foo(i1 int, f1 float64) (string, bool) { +// +// this function would return a string something like +// +// i1:0:1 f1:1:1 ~r0:2:2 ~r1:3:2 +// +// where each chunk above is of the form NAME:ORDER:INOUTCLASSIFICATION +func processParams(die *dwarf.Entry, ex *dwtest.Examiner) string { + // Values in the returned map are of the form : + // where order is the order within the child DIE list of the + // param, and is an integer: + // + // -1: varparm attr not found + // 1: varparm found with value false + // 2: varparm found with value true + // + foundParams := make(map[string]string) + + // Walk the subprogram DIE's children looking for params. + pIdx := ex.IdxFromOffset(die.Offset) + childDies := ex.Children(pIdx) + idx := 0 + for _, child := range childDies { + if child.Tag == dwarf.TagFormalParameter { + // NB: a setting of DW_AT_variable_parameter indicates + // that the param in question is an output parameter; we + // want to see this attribute set to TRUE for all Go + // return params. It would be OK to have it missing for + // input parameters, but for the moment we verify that the + // attr is present but set to false. + st := -1 + if vp, ok := child.Val(dwarf.AttrVarParam).(bool); ok { + if vp { + st = 2 + } else { + st = 1 + } + } + if name, ok := child.Val(dwarf.AttrName).(string); ok { + foundParams[name] = fmt.Sprintf("%d:%d", idx, st) + idx++ + } + } + } + + found := make([]string, 0, len(foundParams)) + for k, v := range foundParams { + found = append(found, fmt.Sprintf("%s:%s", k, v)) + } + sort.Strings(found) + + return fmt.Sprintf("%+v", found) +} + +func TestOutputParamAbbrevAndAttr(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + // This test verifies that the compiler is selecting the correct + // DWARF abbreviation for output parameters, and that the + // variable parameter attribute is correct for in-params and + // out-params. + + const prog = ` +package main + +//go:noinline +func ABC(c1, c2, c3 int, d1, d2, d3, d4 string, f1, f2, f3 float32, g1 [1024]int) (r1 int, r2 int, r3 [1024]int, r4 byte, r5 string, r6 float32) { + g1[0] = 6 + r1, r2, r3, r4, r5, r6 = c3, c2+c1, g1, 'a', d1+d2+d3+d4, f1+f2+f3 + return +} + +func main() { + a := [1024]int{} + v1, v2, v3, v4, v5, v6 := ABC(1, 2, 3, "a", "b", "c", "d", 1.0, 2.0, 1.0, a) + println(v1, v2, v3[0], v4, v5, v6) +} +` + _, ex := gobuildAndExamine(t, prog, NoOpt) + + abcdie := findSubprogramDIE(t, ex, "main.ABC") + + // Call a helper to collect param info. + found := processParams(abcdie, ex) + + // Make sure we see all of the expected params in the proper + // order, that they have the varparam attr, and the varparam is + // set for the returns. + expected := "[c1:0:1 c2:1:1 c3:2:1 d1:3:1 d2:4:1 d3:5:1 d4:6:1 f1:7:1 f2:8:1 f3:9:1 g1:10:1 r1:11:2 r2:12:2 r3:13:2 r4:14:2 r5:15:2 r6:16:2]" + if found != expected { + t.Errorf("param check failed, wanted:\n%s\ngot:\n%s\n", + expected, found) + } +} + +func TestDictIndex(t *testing.T) { + // Check that variables with a parametric type have a dictionary index + // attribute and that types that are only referenced through dictionaries + // have DIEs. + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + const prog = ` +package main + +import "fmt" + +type CustomInt int + +func testfn[T any](arg T) { + var mapvar = make(map[int]T) + mapvar[0] = arg + fmt.Println(arg, mapvar) +} + +func main() { + testfn(CustomInt(3)) +} +` + + dir := t.TempDir() + f := gobuild(t, dir, prog, NoOpt) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + found := false + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + name, _ := entry.Val(dwarf.AttrName).(string) + if strings.HasPrefix(name, "main.testfn") { + found = true + break + } + } + + if !found { + t.Fatalf("could not find main.testfn") + } + + offs := []dwarf.Offset{} + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if entry.Tag == 0 { + break + } + name, _ := entry.Val(dwarf.AttrName).(string) + switch name { + case "arg", "mapvar": + offs = append(offs, entry.Val(dwarf.AttrType).(dwarf.Offset)) + } + } + if len(offs) != 2 { + t.Errorf("wrong number of variables found in main.testfn %d", len(offs)) + } + for _, off := range offs { + rdr.Seek(off) + entry, err := rdr.Next() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if _, ok := entry.Val(intdwarf.DW_AT_go_dict_index).(int64); !ok { + t.Errorf("could not find DW_AT_go_dict_index attribute offset %#x (%T)", off, entry.Val(intdwarf.DW_AT_go_dict_index)) + } + } + + rdr.Seek(0) + ex := dwtest.Examiner{} + if err := ex.Populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + for _, typeName := range []string{"main.CustomInt", "map[int]main.CustomInt"} { + dies := ex.Named(typeName) + if len(dies) != 1 { + t.Errorf("wanted 1 DIE named %s, found %v", typeName, len(dies)) + } + if dies[0].Val(intdwarf.DW_AT_go_runtime_type).(uint64) == 0 { + t.Errorf("type %s does not have DW_AT_go_runtime_type", typeName) + } + } +} + +func TestOptimizedOutParamHandling(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + // This test is intended to verify that the compiler emits DWARF + // DIE entries for all input and output parameters, and that: + // + // - attributes are set correctly for output params, + // - things appear in the proper order + // - things work properly for both register-resident + // params and params passed on the stack + // - things work for both referenced and unreferenced params + // - things work for named return values un-named return vals + // + // The scenarios below don't cover all possible permutations and + // combinations, but they hit a bunch of the high points. + + const prog = ` +package main + +// First testcase. All input params in registers, all params used. + +//go:noinline +func tc1(p1, p2 int, p3 string) (int, string) { + return p1 + p2, p3 + "foo" +} + +// Second testcase. Some params in registers, some on stack. + +//go:noinline +func tc2(p1 int, p2 [128]int, p3 string) (int, string, [128]int) { + return p1 + p2[p1], p3 + "foo", [128]int{p1} +} + +// Third testcase. Named return params. + +//go:noinline +func tc3(p1 int, p2 [128]int, p3 string) (r1 int, r2 bool, r3 string, r4 [128]int) { + if p1 == 101 { + r1 = p1 + p2[p1] + r2 = p3 == "foo" + r4 = [128]int{p1} + return + } else { + return p1 - p2[p1+3], false, "bar", [128]int{p1 + 2} + } +} + +// Fourth testcase. Some thing are used, some are unused. + +//go:noinline +func tc4(p1, p1un int, p2, p2un [128]int, p3, p3un string) (r1 int, r1un int, r2 bool, r3 string, r4, r4un [128]int) { + if p1 == 101 { + r1 = p1 + p2[p2[0]] + r2 = p3 == "foo" + r4 = [128]int{p1} + return + } else { + return p1, -1, true, "plex", [128]int{p1 + 2}, [128]int{-1} + } +} + +func main() { + { + r1, r2 := tc1(3, 4, "five") + println(r1, r2) + } + { + x := [128]int{9} + r1, r2, r3 := tc2(3, x, "five") + println(r1, r2, r3[0]) + } + { + x := [128]int{9} + r1, r2, r3, r4 := tc3(3, x, "five") + println(r1, r2, r3, r4[0]) + } + { + x := [128]int{3} + y := [128]int{7} + r1, r1u, r2, r3, r4, r4u := tc4(0, 1, x, y, "a", "b") + println(r1, r1u, r2, r3, r4[0], r4u[1]) + } + +} +` + _, ex := gobuildAndExamine(t, prog, DefaultOpt) + + testcases := []struct { + tag string + expected string + }{ + { + tag: "tc1", + expected: "[p1:0:1 p2:1:1 p3:2:1 ~r0:3:2 ~r1:4:2]", + }, + { + tag: "tc2", + expected: "[p1:0:1 p2:1:1 p3:2:1 ~r0:3:2 ~r1:4:2 ~r2:5:2]", + }, + { + tag: "tc3", + expected: "[p1:0:1 p2:1:1 p3:2:1 r1:3:2 r2:4:2 r3:5:2 r4:6:2]", + }, + { + tag: "tc4", + expected: "[p1:0:1 p1un:1:1 p2:2:1 p2un:3:1 p3:4:1 p3un:5:1 r1:6:2 r1un:7:2 r2:8:2 r3:9:2 r4:10:2 r4un:11:2]", + }, + } + + for _, tc := range testcases { + // Locate the proper DIE + which := fmt.Sprintf("main.%s", tc.tag) + die := findSubprogramDIE(t, ex, which) + + // Examine params for this subprogram. + foundParams := processParams(die, ex) + if foundParams != tc.expected { + t.Errorf("check failed for testcase %s -- wanted:\n%s\ngot:%s\n", + tc.tag, tc.expected, foundParams) + } + } +} +func TestIssue54320(t *testing.T) { + // Check that when trampolines are used, the DWARF LPT is correctly + // emitted in the final binary + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + + t.Parallel() + + const prog = ` +package main + +import "fmt" + +func main() { + fmt.Printf("Hello world\n"); +} +` + + dir := t.TempDir() + f := gobuild(t, dir, prog, "-ldflags=-debugtramp=2") + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + found := false + var entry *dwarf.Entry + for entry, err = rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if entry.Tag != dwarf.TagCompileUnit { + continue + } + name, _ := entry.Val(dwarf.AttrName).(string) + if name == "main" { + found = true + break + } + rdr.SkipChildren() + } + + if !found { + t.Fatalf("could not find main compile unit") + } + lr, err := d.LineReader(entry) + if err != nil { + t.Fatalf("error obtaining linereader: %v", err) + } + + var le dwarf.LineEntry + found = false + for { + if err := lr.Next(&le); err != nil { + if err == io.EOF { + break + } + t.Fatalf("error reading linentry: %v", err) + } + // check LE contains an entry to test.go + if le.File == nil { + continue + } + file := filepath.Base(le.File.Name) + if file == "test.go" { + found = true + break + } + } + if !found { + t.Errorf("no LPT entries for test.go") + } +} + +const zeroSizedVarProg = ` +package main + +import ( + "fmt" +) + +func main() { + zeroSizedVariable := struct{}{} + fmt.Println(zeroSizedVariable) +} +` + +func TestZeroSizedVariable(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // This test verifies that the compiler emits DIEs for zero sized variables + // (for example variables of type 'struct {}'). + // See go.dev/issues/54615. + + for _, opt := range []string{NoOpt, DefaultOpt} { + opt := opt + t.Run(opt, func(t *testing.T) { + _, ex := gobuildAndExamine(t, zeroSizedVarProg, opt) + + // Locate the main.zeroSizedVariable DIE + abcs := ex.Named("zeroSizedVariable") + if len(abcs) == 0 { + t.Fatalf("unable to locate DIE for zeroSizedVariable") + } + if len(abcs) != 1 { + t.Fatalf("more than one zeroSizedVariable DIE") + } + }) + } +} + +func TestConsistentGoKindAndRuntimeType(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // Ensure that if we emit a "go runtime type" attr on a type DIE, + // we also include the "go kind" attribute. See issue #64231. + _, ex := gobuildAndExamine(t, zeroSizedVarProg, DefaultOpt) + + // Walk all dies. + typesChecked := 0 + failures := 0 + for _, die := range ex.DIEs() { + // For any type DIE with DW_AT_go_runtime_type set... + rtt, hasRT := die.Val(intdwarf.DW_AT_go_runtime_type).(uint64) + if !hasRT || rtt == 0 { + continue + } + typesChecked++ + // ... we want to see a meaningful DW_AT_go_kind value. + if val, ok := die.Val(intdwarf.DW_AT_go_kind).(int64); !ok || val == 0 { + failures++ + // dump DIEs for first 10 failures. + if failures <= 10 { + idx := ex.IdxFromOffset(die.Offset) + t.Logf("type DIE has DW_AT_go_runtime_type but invalid DW_AT_go_kind:\n") + ex.DumpEntry(idx, false, 0) + } + t.Errorf("bad type DIE at offset %d\n", die.Offset) + } + } + if typesChecked == 0 { + t.Fatalf("something went wrong, 0 types checked") + } else { + t.Logf("%d types checked\n", typesChecked) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf.go new file mode 100644 index 0000000000000000000000000000000000000000..be9e22946a38684287b7e839e59c53b391dc1735 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf.go @@ -0,0 +1,2450 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/notsha256" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "encoding/binary" + "encoding/hex" + "fmt" + "internal/buildcfg" + "os" + "path/filepath" + "runtime" + "sort" + "strings" +) + +/* + * Derived from: + * $FreeBSD: src/sys/sys/elf32.h,v 1.8.14.1 2005/12/30 22:13:58 marcel Exp $ + * $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.1 2005/12/30 22:13:58 marcel Exp $ + * $FreeBSD: src/sys/sys/elf_common.h,v 1.15.8.1 2005/12/30 22:13:58 marcel Exp $ + * $FreeBSD: src/sys/alpha/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $ + * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $ + * $FreeBSD: src/sys/arm/include/elf.h,v 1.5.2.1 2006/06/30 21:42:52 cognet Exp $ + * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $ + * $FreeBSD: src/sys/powerpc/include/elf.h,v 1.7 2004/11/02 09:47:01 ssouhlal Exp $ + * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $ + * + * Copyright (c) 1996-1998 John D. Polstra. All rights reserved. + * Copyright (c) 2001 David E. O'Brien + * Portions Copyright 2009 The Go Authors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * ELF definitions that are independent of architecture or word size. + */ + +/* + * Note header. The ".note" section contains an array of notes. Each + * begins with this header, aligned to a word boundary. Immediately + * following the note header is n_namesz bytes of name, padded to the + * next word boundary. Then comes n_descsz bytes of descriptor, again + * padded to a word boundary. The values of n_namesz and n_descsz do + * not include the padding. + */ +type elfNote struct { + nNamesz uint32 + nDescsz uint32 + nType uint32 +} + +/* For accessing the fields of r_info. */ + +/* For constructing r_info from field values. */ + +/* + * Relocation types. + */ +const ( + ARM_MAGIC_TRAMP_NUMBER = 0x5c000003 +) + +/* + * Symbol table entries. + */ + +/* For accessing the fields of st_info. */ + +/* For constructing st_info from field values. */ + +/* For accessing the fields of st_other. */ + +/* + * ELF header. + */ +type ElfEhdr elf.Header64 + +/* + * Section header. + */ +type ElfShdr struct { + elf.Section64 + shnum elf.SectionIndex +} + +/* + * Program header. + */ +type ElfPhdr elf.ProgHeader + +/* For accessing the fields of r_info. */ + +/* For constructing r_info from field values. */ + +/* + * Symbol table entries. + */ + +/* For accessing the fields of st_info. */ + +/* For constructing st_info from field values. */ + +/* For accessing the fields of st_other. */ + +/* + * Go linker interface + */ +const ( + ELF64HDRSIZE = 64 + ELF64PHDRSIZE = 56 + ELF64SHDRSIZE = 64 + ELF64RELSIZE = 16 + ELF64RELASIZE = 24 + ELF64SYMSIZE = 24 + ELF32HDRSIZE = 52 + ELF32PHDRSIZE = 32 + ELF32SHDRSIZE = 40 + ELF32SYMSIZE = 16 + ELF32RELSIZE = 8 +) + +/* + * The interface uses the 64-bit structures always, + * to avoid code duplication. The writers know how to + * marshal a 32-bit representation from the 64-bit structure. + */ + +var elfstrdat, elfshstrdat []byte + +/* + * Total amount of space to reserve at the start of the file + * for Header, PHeaders, SHeaders, and interp. + * May waste some. + * On FreeBSD, cannot be larger than a page. + */ +const ( + ELFRESERVE = 4096 +) + +/* + * We use the 64-bit data structures on both 32- and 64-bit machines + * in order to write the code just once. The 64-bit data structure is + * written in the 32-bit format on the 32-bit machines. + */ +const ( + NSECT = 400 +) + +var ( + Nelfsym = 1 + + elf64 bool + // Either ".rel" or ".rela" depending on which type of relocation the + // target platform uses. + elfRelType string + + ehdr ElfEhdr + phdr [NSECT]*ElfPhdr + shdr [NSECT]*ElfShdr + + interp string +) + +// ELFArch includes target-specific hooks for ELF targets. +// This is initialized by the target-specific Init function +// called by the linker's main function in cmd/link/main.go. +type ELFArch struct { + // TODO: Document these fields. + + Androiddynld string + Linuxdynld string + LinuxdynldMusl string + Freebsddynld string + Netbsddynld string + Openbsddynld string + Dragonflydynld string + Solarisdynld string + + Reloc1 func(*Link, *OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int, int64) bool + RelocSize uint32 // size of an ELF relocation record, must match Reloc1. + SetupPLT func(ctxt *Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) + + // DynamicReadOnly can be set to true to make the .dynamic + // section read-only. By default it is writable. + // This is used by MIPS targets. + DynamicReadOnly bool +} + +type Elfstring struct { + s string + off int +} + +var elfstr [100]Elfstring + +var nelfstr int + +var buildinfo []byte + +/* +Initialize the global variable that describes the ELF header. It will be updated as +we write section and prog headers. +*/ +func Elfinit(ctxt *Link) { + ctxt.IsELF = true + + if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { + elfRelType = ".rela" + } else { + elfRelType = ".rel" + } + + switch ctxt.Arch.Family { + // 64-bit architectures + case sys.PPC64, sys.S390X: + if ctxt.Arch.ByteOrder == binary.BigEndian && ctxt.HeadType != objabi.Hopenbsd { + ehdr.Flags = 1 /* Version 1 ABI */ + } else { + ehdr.Flags = 2 /* Version 2 ABI */ + } + fallthrough + case sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.RISCV64: + if ctxt.Arch.Family == sys.MIPS64 { + ehdr.Flags = 0x20000004 /* MIPS 3 CPIC */ + } + if ctxt.Arch.Family == sys.Loong64 { + ehdr.Flags = 0x43 /* DOUBLE_FLOAT, OBJABI_V1 */ + } + if ctxt.Arch.Family == sys.RISCV64 { + ehdr.Flags = 0x4 /* RISCV Float ABI Double */ + } + elf64 = true + + ehdr.Phoff = ELF64HDRSIZE /* Must be ELF64HDRSIZE: first PHdr must follow ELF header */ + ehdr.Shoff = ELF64HDRSIZE /* Will move as we add PHeaders */ + ehdr.Ehsize = ELF64HDRSIZE /* Must be ELF64HDRSIZE */ + ehdr.Phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */ + ehdr.Shentsize = ELF64SHDRSIZE /* Must be ELF64SHDRSIZE */ + + // 32-bit architectures + case sys.ARM, sys.MIPS: + if ctxt.Arch.Family == sys.ARM { + // we use EABI on linux/arm, freebsd/arm, netbsd/arm. + if ctxt.HeadType == objabi.Hlinux || ctxt.HeadType == objabi.Hfreebsd || ctxt.HeadType == objabi.Hnetbsd { + // We set a value here that makes no indication of which + // float ABI the object uses, because this is information + // used by the dynamic linker to compare executables and + // shared libraries -- so it only matters for cgo calls, and + // the information properly comes from the object files + // produced by the host C compiler. parseArmAttributes in + // ldelf.go reads that information and updates this field as + // appropriate. + ehdr.Flags = 0x5000002 // has entry point, Version5 EABI + } + } else if ctxt.Arch.Family == sys.MIPS { + ehdr.Flags = 0x50001004 /* MIPS 32 CPIC O32*/ + } + fallthrough + default: + ehdr.Phoff = ELF32HDRSIZE + /* Must be ELF32HDRSIZE: first PHdr must follow ELF header */ + ehdr.Shoff = ELF32HDRSIZE /* Will move as we add PHeaders */ + ehdr.Ehsize = ELF32HDRSIZE /* Must be ELF32HDRSIZE */ + ehdr.Phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */ + ehdr.Shentsize = ELF32SHDRSIZE /* Must be ELF32SHDRSIZE */ + } +} + +// Make sure PT_LOAD is aligned properly and +// that there is no gap, +// correct ELF loaders will do this implicitly, +// but buggy ELF loaders like the one in some +// versions of QEMU and UPX won't. +func fixElfPhdr(e *ElfPhdr) { + frag := int(e.Vaddr & (e.Align - 1)) + + e.Off -= uint64(frag) + e.Vaddr -= uint64(frag) + e.Paddr -= uint64(frag) + e.Filesz += uint64(frag) + e.Memsz += uint64(frag) +} + +func elf64phdr(out *OutBuf, e *ElfPhdr) { + if e.Type == elf.PT_LOAD { + fixElfPhdr(e) + } + + out.Write32(uint32(e.Type)) + out.Write32(uint32(e.Flags)) + out.Write64(e.Off) + out.Write64(e.Vaddr) + out.Write64(e.Paddr) + out.Write64(e.Filesz) + out.Write64(e.Memsz) + out.Write64(e.Align) +} + +func elf32phdr(out *OutBuf, e *ElfPhdr) { + if e.Type == elf.PT_LOAD { + fixElfPhdr(e) + } + + out.Write32(uint32(e.Type)) + out.Write32(uint32(e.Off)) + out.Write32(uint32(e.Vaddr)) + out.Write32(uint32(e.Paddr)) + out.Write32(uint32(e.Filesz)) + out.Write32(uint32(e.Memsz)) + out.Write32(uint32(e.Flags)) + out.Write32(uint32(e.Align)) +} + +func elf64shdr(out *OutBuf, e *ElfShdr) { + out.Write32(e.Name) + out.Write32(uint32(e.Type)) + out.Write64(uint64(e.Flags)) + out.Write64(e.Addr) + out.Write64(e.Off) + out.Write64(e.Size) + out.Write32(e.Link) + out.Write32(e.Info) + out.Write64(e.Addralign) + out.Write64(e.Entsize) +} + +func elf32shdr(out *OutBuf, e *ElfShdr) { + out.Write32(e.Name) + out.Write32(uint32(e.Type)) + out.Write32(uint32(e.Flags)) + out.Write32(uint32(e.Addr)) + out.Write32(uint32(e.Off)) + out.Write32(uint32(e.Size)) + out.Write32(e.Link) + out.Write32(e.Info) + out.Write32(uint32(e.Addralign)) + out.Write32(uint32(e.Entsize)) +} + +func elfwriteshdrs(out *OutBuf) uint32 { + if elf64 { + for i := 0; i < int(ehdr.Shnum); i++ { + elf64shdr(out, shdr[i]) + } + return uint32(ehdr.Shnum) * ELF64SHDRSIZE + } + + for i := 0; i < int(ehdr.Shnum); i++ { + elf32shdr(out, shdr[i]) + } + return uint32(ehdr.Shnum) * ELF32SHDRSIZE +} + +func elfsetstring(ctxt *Link, s loader.Sym, str string, off int) { + if nelfstr >= len(elfstr) { + ctxt.Errorf(s, "too many elf strings") + errorexit() + } + + elfstr[nelfstr].s = str + elfstr[nelfstr].off = off + nelfstr++ +} + +func elfwritephdrs(out *OutBuf) uint32 { + if elf64 { + for i := 0; i < int(ehdr.Phnum); i++ { + elf64phdr(out, phdr[i]) + } + return uint32(ehdr.Phnum) * ELF64PHDRSIZE + } + + for i := 0; i < int(ehdr.Phnum); i++ { + elf32phdr(out, phdr[i]) + } + return uint32(ehdr.Phnum) * ELF32PHDRSIZE +} + +func newElfPhdr() *ElfPhdr { + e := new(ElfPhdr) + if ehdr.Phnum >= NSECT { + Errorf(nil, "too many phdrs") + } else { + phdr[ehdr.Phnum] = e + ehdr.Phnum++ + } + if elf64 { + ehdr.Shoff += ELF64PHDRSIZE + } else { + ehdr.Shoff += ELF32PHDRSIZE + } + return e +} + +func newElfShdr(name int64) *ElfShdr { + e := new(ElfShdr) + e.Name = uint32(name) + e.shnum = elf.SectionIndex(ehdr.Shnum) + if ehdr.Shnum >= NSECT { + Errorf(nil, "too many shdrs") + } else { + shdr[ehdr.Shnum] = e + ehdr.Shnum++ + } + + return e +} + +func getElfEhdr() *ElfEhdr { + return &ehdr +} + +func elf64writehdr(out *OutBuf) uint32 { + out.Write(ehdr.Ident[:]) + out.Write16(uint16(ehdr.Type)) + out.Write16(uint16(ehdr.Machine)) + out.Write32(uint32(ehdr.Version)) + out.Write64(ehdr.Entry) + out.Write64(ehdr.Phoff) + out.Write64(ehdr.Shoff) + out.Write32(ehdr.Flags) + out.Write16(ehdr.Ehsize) + out.Write16(ehdr.Phentsize) + out.Write16(ehdr.Phnum) + out.Write16(ehdr.Shentsize) + out.Write16(ehdr.Shnum) + out.Write16(ehdr.Shstrndx) + return ELF64HDRSIZE +} + +func elf32writehdr(out *OutBuf) uint32 { + out.Write(ehdr.Ident[:]) + out.Write16(uint16(ehdr.Type)) + out.Write16(uint16(ehdr.Machine)) + out.Write32(uint32(ehdr.Version)) + out.Write32(uint32(ehdr.Entry)) + out.Write32(uint32(ehdr.Phoff)) + out.Write32(uint32(ehdr.Shoff)) + out.Write32(ehdr.Flags) + out.Write16(ehdr.Ehsize) + out.Write16(ehdr.Phentsize) + out.Write16(ehdr.Phnum) + out.Write16(ehdr.Shentsize) + out.Write16(ehdr.Shnum) + out.Write16(ehdr.Shstrndx) + return ELF32HDRSIZE +} + +func elfwritehdr(out *OutBuf) uint32 { + if elf64 { + return elf64writehdr(out) + } + return elf32writehdr(out) +} + +/* Taken directly from the definition document for ELF64. */ +func elfhash(name string) uint32 { + var h uint32 + for i := 0; i < len(name); i++ { + h = (h << 4) + uint32(name[i]) + if g := h & 0xf0000000; g != 0 { + h ^= g >> 24 + } + h &= 0x0fffffff + } + return h +} + +func elfWriteDynEntSym(ctxt *Link, s *loader.SymbolBuilder, tag elf.DynTag, t loader.Sym) { + Elfwritedynentsymplus(ctxt, s, tag, t, 0) +} + +func Elfwritedynent(arch *sys.Arch, s *loader.SymbolBuilder, tag elf.DynTag, val uint64) { + if elf64 { + s.AddUint64(arch, uint64(tag)) + s.AddUint64(arch, val) + } else { + s.AddUint32(arch, uint32(tag)) + s.AddUint32(arch, uint32(val)) + } +} + +func Elfwritedynentsymplus(ctxt *Link, s *loader.SymbolBuilder, tag elf.DynTag, t loader.Sym, add int64) { + if elf64 { + s.AddUint64(ctxt.Arch, uint64(tag)) + } else { + s.AddUint32(ctxt.Arch, uint32(tag)) + } + s.AddAddrPlus(ctxt.Arch, t, add) +} + +func elfwritedynentsymsize(ctxt *Link, s *loader.SymbolBuilder, tag elf.DynTag, t loader.Sym) { + if elf64 { + s.AddUint64(ctxt.Arch, uint64(tag)) + } else { + s.AddUint32(ctxt.Arch, uint32(tag)) + } + s.AddSize(ctxt.Arch, t) +} + +func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int { + interp = p + n := len(interp) + 1 + sh.Addr = startva + resoff - uint64(n) + sh.Off = resoff - uint64(n) + sh.Size = uint64(n) + + return n +} + +func elfwriteinterp(out *OutBuf) int { + sh := elfshname(".interp") + out.SeekSet(int64(sh.Off)) + out.WriteString(interp) + out.Write8(0) + return int(sh.Size) +} + +// member of .gnu.attributes of MIPS for fpAbi +const ( + // No floating point is present in the module (default) + MIPS_FPABI_NONE = 0 + // FP code in the module uses the FP32 ABI for a 32-bit ABI + MIPS_FPABI_ANY = 1 + // FP code in the module only uses single precision ABI + MIPS_FPABI_SINGLE = 2 + // FP code in the module uses soft-float ABI + MIPS_FPABI_SOFT = 3 + // FP code in the module assumes an FPU with FR=1 and has 12 + // callee-saved doubles. Historic, no longer supported. + MIPS_FPABI_HIST = 4 + // FP code in the module uses the FPXX ABI + MIPS_FPABI_FPXX = 5 + // FP code in the module uses the FP64 ABI + MIPS_FPABI_FP64 = 6 + // FP code in the module uses the FP64A ABI + MIPS_FPABI_FP64A = 7 +) + +func elfMipsAbiFlags(sh *ElfShdr, startva uint64, resoff uint64) int { + n := 24 + sh.Addr = startva + resoff - uint64(n) + sh.Off = resoff - uint64(n) + sh.Size = uint64(n) + sh.Type = uint32(elf.SHT_MIPS_ABIFLAGS) + sh.Flags = uint64(elf.SHF_ALLOC) + + return n +} + +// Layout is given by this C definition: +// +// typedef struct +// { +// /* Version of flags structure. */ +// uint16_t version; +// /* The level of the ISA: 1-5, 32, 64. */ +// uint8_t isa_level; +// /* The revision of ISA: 0 for MIPS V and below, 1-n otherwise. */ +// uint8_t isa_rev; +// /* The size of general purpose registers. */ +// uint8_t gpr_size; +// /* The size of co-processor 1 registers. */ +// uint8_t cpr1_size; +// /* The size of co-processor 2 registers. */ +// uint8_t cpr2_size; +// /* The floating-point ABI. */ +// uint8_t fp_abi; +// /* Processor-specific extension. */ +// uint32_t isa_ext; +// /* Mask of ASEs used. */ +// uint32_t ases; +// /* Mask of general flags. */ +// uint32_t flags1; +// uint32_t flags2; +// } Elf_Internal_ABIFlags_v0; +func elfWriteMipsAbiFlags(ctxt *Link) int { + sh := elfshname(".MIPS.abiflags") + ctxt.Out.SeekSet(int64(sh.Off)) + ctxt.Out.Write16(0) // version + ctxt.Out.Write8(32) // isaLevel + ctxt.Out.Write8(1) // isaRev + ctxt.Out.Write8(1) // gprSize + ctxt.Out.Write8(1) // cpr1Size + ctxt.Out.Write8(0) // cpr2Size + if buildcfg.GOMIPS == "softfloat" { + ctxt.Out.Write8(MIPS_FPABI_SOFT) // fpAbi + } else { + // Go cannot make sure non odd-number-fpr is used (ie, in load a double from memory). + // So, we mark the object is MIPS I style paired float/double register scheme, + // aka MIPS_FPABI_ANY. If we mark the object as FPXX, the kernel may use FR=1 mode, + // then we meet some problem. + // Note: MIPS_FPABI_ANY is bad naming: in fact it is MIPS I style FPR usage. + // It is not for 'ANY'. + // TODO: switch to FPXX after be sure that no odd-number-fpr is used. + ctxt.Out.Write8(MIPS_FPABI_ANY) // fpAbi + } + ctxt.Out.Write32(0) // isaExt + ctxt.Out.Write32(0) // ases + ctxt.Out.Write32(0) // flags1 + ctxt.Out.Write32(0) // flags2 + return int(sh.Size) +} + +func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sizes ...int) int { + n := resoff % 4 + // if section contains multiple notes (as is the case with FreeBSD signature), + // multiple note sizes can be specified + for _, sz := range sizes { + n += 3*4 + uint64(sz) + } + + sh.Type = uint32(elf.SHT_NOTE) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 4 + sh.Addr = startva + resoff - n + sh.Off = resoff - n + sh.Size = n - resoff%4 + + return int(n) +} + +func elfwritenotehdr(out *OutBuf, str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr { + sh := elfshname(str) + + // Write Elf_Note header. + out.SeekSet(int64(sh.Off)) + + out.Write32(namesz) + out.Write32(descsz) + out.Write32(tag) + + return sh +} + +// NetBSD Signature (as per sys/exec_elf.h) +const ( + ELF_NOTE_NETBSD_NAMESZ = 7 + ELF_NOTE_NETBSD_DESCSZ = 4 + ELF_NOTE_NETBSD_TAG = 1 + ELF_NOTE_NETBSD_VERSION = 700000000 /* NetBSD 7.0 */ +) + +var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00") + +func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int { + n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4)) + return elfnote(sh, startva, resoff, n) +} + +func elfwritenetbsdsig(out *OutBuf) int { + // Write Elf_Note header. + sh := elfwritenotehdr(out, ".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG) + + if sh == nil { + return 0 + } + + // Followed by NetBSD string and version. + out.Write(ELF_NOTE_NETBSD_NAME) + out.Write8(0) + out.Write32(ELF_NOTE_NETBSD_VERSION) + + return int(sh.Size) +} + +// The race detector can't handle ASLR (address space layout randomization). +// ASLR is on by default for NetBSD, so we turn the ASLR off explicitly +// using a magic elf Note when building race binaries. + +func elfnetbsdpax(sh *ElfShdr, startva uint64, resoff uint64) int { + n := int(Rnd(4, 4) + Rnd(4, 4)) + return elfnote(sh, startva, resoff, n) +} + +func elfwritenetbsdpax(out *OutBuf) int { + sh := elfwritenotehdr(out, ".note.netbsd.pax", 4 /* length of PaX\x00 */, 4 /* length of flags */, 0x03 /* PaX type */) + if sh == nil { + return 0 + } + out.Write([]byte("PaX\x00")) + out.Write32(0x20) // 0x20 = Force disable ASLR + return int(sh.Size) +} + +// OpenBSD Signature +const ( + ELF_NOTE_OPENBSD_NAMESZ = 8 + ELF_NOTE_OPENBSD_DESCSZ = 4 + ELF_NOTE_OPENBSD_TAG = 1 + ELF_NOTE_OPENBSD_VERSION = 0 +) + +var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00") + +func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int { + n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ + return elfnote(sh, startva, resoff, n) +} + +func elfwriteopenbsdsig(out *OutBuf) int { + // Write Elf_Note header. + sh := elfwritenotehdr(out, ".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG) + + if sh == nil { + return 0 + } + + // Followed by OpenBSD string and version. + out.Write(ELF_NOTE_OPENBSD_NAME) + + out.Write32(ELF_NOTE_OPENBSD_VERSION) + + return int(sh.Size) +} + +// FreeBSD Signature (as per sys/elf_common.h) +const ( + ELF_NOTE_FREEBSD_NAMESZ = 8 + ELF_NOTE_FREEBSD_DESCSZ = 4 + ELF_NOTE_FREEBSD_ABI_TAG = 1 + ELF_NOTE_FREEBSD_NOINIT_TAG = 2 + ELF_NOTE_FREEBSD_FEATURE_CTL_TAG = 4 + ELF_NOTE_FREEBSD_VERSION = 1203000 // 12.3-RELEASE + ELF_NOTE_FREEBSD_FCTL_ASLR_DISABLE = 0x1 +) + +const ELF_NOTE_FREEBSD_NAME = "FreeBSD\x00" + +func elffreebsdsig(sh *ElfShdr, startva uint64, resoff uint64) int { + n := ELF_NOTE_FREEBSD_NAMESZ + ELF_NOTE_FREEBSD_DESCSZ + // FreeBSD signature section contains 3 equally sized notes + return elfnote(sh, startva, resoff, n, n, n) +} + +// elfwritefreebsdsig writes FreeBSD .note section. +// +// See https://www.netbsd.org/docs/kernel/elf-notes.html for the description of +// a Note element format and +// https://github.com/freebsd/freebsd-src/blob/main/sys/sys/elf_common.h#L790 +// for the FreeBSD-specific values. +func elfwritefreebsdsig(out *OutBuf) int { + sh := elfshname(".note.tag") + if sh == nil { + return 0 + } + out.SeekSet(int64(sh.Off)) + + // NT_FREEBSD_ABI_TAG + out.Write32(ELF_NOTE_FREEBSD_NAMESZ) + out.Write32(ELF_NOTE_FREEBSD_DESCSZ) + out.Write32(ELF_NOTE_FREEBSD_ABI_TAG) + out.WriteString(ELF_NOTE_FREEBSD_NAME) + out.Write32(ELF_NOTE_FREEBSD_VERSION) + + // NT_FREEBSD_NOINIT_TAG + out.Write32(ELF_NOTE_FREEBSD_NAMESZ) + out.Write32(ELF_NOTE_FREEBSD_DESCSZ) + out.Write32(ELF_NOTE_FREEBSD_NOINIT_TAG) + out.WriteString(ELF_NOTE_FREEBSD_NAME) + out.Write32(0) + + // NT_FREEBSD_FEATURE_CTL + out.Write32(ELF_NOTE_FREEBSD_NAMESZ) + out.Write32(ELF_NOTE_FREEBSD_DESCSZ) + out.Write32(ELF_NOTE_FREEBSD_FEATURE_CTL_TAG) + out.WriteString(ELF_NOTE_FREEBSD_NAME) + if *flagRace { + // The race detector can't handle ASLR, turn the ASLR off when compiling with -race. + out.Write32(ELF_NOTE_FREEBSD_FCTL_ASLR_DISABLE) + } else { + out.Write32(0) + } + + return int(sh.Size) +} + +func addbuildinfo(val string) { + if val == "gobuildid" { + buildID := *flagBuildid + if buildID == "" { + Exitf("-B gobuildid requires a Go build ID supplied via -buildid") + } + + hashedBuildID := notsha256.Sum256([]byte(buildID)) + buildinfo = hashedBuildID[:20] + + return + } + + if !strings.HasPrefix(val, "0x") { + Exitf("-B argument must start with 0x: %s", val) + } + + ov := val + val = val[2:] + + const maxLen = 32 + if hex.DecodedLen(len(val)) > maxLen { + Exitf("-B option too long (max %d digits): %s", maxLen, ov) + } + + b, err := hex.DecodeString(val) + if err != nil { + if err == hex.ErrLength { + Exitf("-B argument must have even number of digits: %s", ov) + } + if inv, ok := err.(hex.InvalidByteError); ok { + Exitf("-B argument contains invalid hex digit %c: %s", byte(inv), ov) + } + Exitf("-B argument contains invalid hex: %s", ov) + } + + buildinfo = b +} + +// Build info note +const ( + ELF_NOTE_BUILDINFO_NAMESZ = 4 + ELF_NOTE_BUILDINFO_TAG = 3 +) + +var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00") + +func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int { + n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4)) + return elfnote(sh, startva, resoff, n) +} + +func elfgobuildid(sh *ElfShdr, startva uint64, resoff uint64) int { + n := len(ELF_NOTE_GO_NAME) + int(Rnd(int64(len(*flagBuildid)), 4)) + return elfnote(sh, startva, resoff, n) +} + +func elfwritebuildinfo(out *OutBuf) int { + sh := elfwritenotehdr(out, ".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG) + if sh == nil { + return 0 + } + + out.Write(ELF_NOTE_BUILDINFO_NAME) + out.Write(buildinfo) + var zero = make([]byte, 4) + out.Write(zero[:int(Rnd(int64(len(buildinfo)), 4)-int64(len(buildinfo)))]) + + return int(sh.Size) +} + +func elfwritegobuildid(out *OutBuf) int { + sh := elfwritenotehdr(out, ".note.go.buildid", uint32(len(ELF_NOTE_GO_NAME)), uint32(len(*flagBuildid)), ELF_NOTE_GOBUILDID_TAG) + if sh == nil { + return 0 + } + + out.Write(ELF_NOTE_GO_NAME) + out.Write([]byte(*flagBuildid)) + var zero = make([]byte, 4) + out.Write(zero[:int(Rnd(int64(len(*flagBuildid)), 4)-int64(len(*flagBuildid)))]) + + return int(sh.Size) +} + +// Go specific notes +const ( + ELF_NOTE_GOPKGLIST_TAG = 1 + ELF_NOTE_GOABIHASH_TAG = 2 + ELF_NOTE_GODEPS_TAG = 3 + ELF_NOTE_GOBUILDID_TAG = 4 +) + +var ELF_NOTE_GO_NAME = []byte("Go\x00\x00") + +var elfverneed int + +type Elfaux struct { + next *Elfaux + num int + vers string +} + +type Elflib struct { + next *Elflib + aux *Elfaux + file string +} + +func addelflib(list **Elflib, file string, vers string) *Elfaux { + var lib *Elflib + + for lib = *list; lib != nil; lib = lib.next { + if lib.file == file { + goto havelib + } + } + lib = new(Elflib) + lib.next = *list + lib.file = file + *list = lib + +havelib: + for aux := lib.aux; aux != nil; aux = aux.next { + if aux.vers == vers { + return aux + } + } + aux := new(Elfaux) + aux.next = lib.aux + aux.vers = vers + lib.aux = aux + + return aux +} + +func elfdynhash(ctxt *Link) { + if !ctxt.IsELF { + return + } + + nsym := Nelfsym + ldr := ctxt.loader + s := ldr.CreateSymForUpdate(".hash", 0) + s.SetType(sym.SELFROSECT) + + i := nsym + nbucket := 1 + for i > 0 { + nbucket++ + i >>= 1 + } + + var needlib *Elflib + need := make([]*Elfaux, nsym) + chain := make([]uint32, nsym) + buckets := make([]uint32, nbucket) + + for _, sy := range ldr.DynidSyms() { + + dynid := ldr.SymDynid(sy) + if ldr.SymDynimpvers(sy) != "" { + need[dynid] = addelflib(&needlib, ldr.SymDynimplib(sy), ldr.SymDynimpvers(sy)) + } + + name := ldr.SymExtname(sy) + hc := elfhash(name) + + b := hc % uint32(nbucket) + chain[dynid] = buckets[b] + buckets[b] = uint32(dynid) + } + + // s390x (ELF64) hash table entries are 8 bytes + if ctxt.Arch.Family == sys.S390X { + s.AddUint64(ctxt.Arch, uint64(nbucket)) + s.AddUint64(ctxt.Arch, uint64(nsym)) + for i := 0; i < nbucket; i++ { + s.AddUint64(ctxt.Arch, uint64(buckets[i])) + } + for i := 0; i < nsym; i++ { + s.AddUint64(ctxt.Arch, uint64(chain[i])) + } + } else { + s.AddUint32(ctxt.Arch, uint32(nbucket)) + s.AddUint32(ctxt.Arch, uint32(nsym)) + for i := 0; i < nbucket; i++ { + s.AddUint32(ctxt.Arch, buckets[i]) + } + for i := 0; i < nsym; i++ { + s.AddUint32(ctxt.Arch, chain[i]) + } + } + + dynstr := ldr.CreateSymForUpdate(".dynstr", 0) + + // version symbols + gnuVersionR := ldr.CreateSymForUpdate(".gnu.version_r", 0) + s = gnuVersionR + i = 2 + nfile := 0 + for l := needlib; l != nil; l = l.next { + nfile++ + + // header + s.AddUint16(ctxt.Arch, 1) // table version + j := 0 + for x := l.aux; x != nil; x = x.next { + j++ + } + s.AddUint16(ctxt.Arch, uint16(j)) // aux count + s.AddUint32(ctxt.Arch, uint32(dynstr.Addstring(l.file))) // file string offset + s.AddUint32(ctxt.Arch, 16) // offset from header to first aux + if l.next != nil { + s.AddUint32(ctxt.Arch, 16+uint32(j)*16) // offset from this header to next + } else { + s.AddUint32(ctxt.Arch, 0) + } + + for x := l.aux; x != nil; x = x.next { + x.num = i + i++ + + // aux struct + s.AddUint32(ctxt.Arch, elfhash(x.vers)) // hash + s.AddUint16(ctxt.Arch, 0) // flags + s.AddUint16(ctxt.Arch, uint16(x.num)) // other - index we refer to this by + s.AddUint32(ctxt.Arch, uint32(dynstr.Addstring(x.vers))) // version string offset + if x.next != nil { + s.AddUint32(ctxt.Arch, 16) // offset from this aux to next + } else { + s.AddUint32(ctxt.Arch, 0) + } + } + } + + // version references + gnuVersion := ldr.CreateSymForUpdate(".gnu.version", 0) + s = gnuVersion + + for i := 0; i < nsym; i++ { + if i == 0 { + s.AddUint16(ctxt.Arch, 0) // first entry - no symbol + } else if need[i] == nil { + s.AddUint16(ctxt.Arch, 1) // global + } else { + s.AddUint16(ctxt.Arch, uint16(need[i].num)) + } + } + + s = ldr.CreateSymForUpdate(".dynamic", 0) + if ctxt.BuildMode == BuildModePIE { + // https://github.com/bminor/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/elf/elf.h#L986 + const DTFLAGS_1_PIE = 0x08000000 + Elfwritedynent(ctxt.Arch, s, elf.DT_FLAGS_1, uint64(DTFLAGS_1_PIE)) + } + elfverneed = nfile + if elfverneed != 0 { + elfWriteDynEntSym(ctxt, s, elf.DT_VERNEED, gnuVersionR.Sym()) + Elfwritedynent(ctxt.Arch, s, elf.DT_VERNEEDNUM, uint64(nfile)) + elfWriteDynEntSym(ctxt, s, elf.DT_VERSYM, gnuVersion.Sym()) + } + + sy := ldr.CreateSymForUpdate(elfRelType+".plt", 0) + if sy.Size() > 0 { + if elfRelType == ".rela" { + Elfwritedynent(ctxt.Arch, s, elf.DT_PLTREL, uint64(elf.DT_RELA)) + } else { + Elfwritedynent(ctxt.Arch, s, elf.DT_PLTREL, uint64(elf.DT_REL)) + } + elfwritedynentsymsize(ctxt, s, elf.DT_PLTRELSZ, sy.Sym()) + elfWriteDynEntSym(ctxt, s, elf.DT_JMPREL, sy.Sym()) + } + + Elfwritedynent(ctxt.Arch, s, elf.DT_NULL, 0) +} + +func elfphload(seg *sym.Segment) *ElfPhdr { + ph := newElfPhdr() + ph.Type = elf.PT_LOAD + if seg.Rwx&4 != 0 { + ph.Flags |= elf.PF_R + } + if seg.Rwx&2 != 0 { + ph.Flags |= elf.PF_W + } + if seg.Rwx&1 != 0 { + ph.Flags |= elf.PF_X + } + ph.Vaddr = seg.Vaddr + ph.Paddr = seg.Vaddr + ph.Memsz = seg.Length + ph.Off = seg.Fileoff + ph.Filesz = seg.Filelen + ph.Align = uint64(*FlagRound) + + return ph +} + +func elfphrelro(seg *sym.Segment) { + ph := newElfPhdr() + ph.Type = elf.PT_GNU_RELRO + ph.Vaddr = seg.Vaddr + ph.Paddr = seg.Vaddr + ph.Memsz = seg.Length + ph.Off = seg.Fileoff + ph.Filesz = seg.Filelen + ph.Align = uint64(*FlagRound) +} + +func elfshname(name string) *ElfShdr { + for i := 0; i < nelfstr; i++ { + if name != elfstr[i].s { + continue + } + off := elfstr[i].off + for i = 0; i < int(ehdr.Shnum); i++ { + sh := shdr[i] + if sh.Name == uint32(off) { + return sh + } + } + return newElfShdr(int64(off)) + } + Exitf("cannot find elf name %s", name) + return nil +} + +// Create an ElfShdr for the section with name. +// Create a duplicate if one already exists with that name. +func elfshnamedup(name string) *ElfShdr { + for i := 0; i < nelfstr; i++ { + if name == elfstr[i].s { + off := elfstr[i].off + return newElfShdr(int64(off)) + } + } + + Errorf(nil, "cannot find elf name %s", name) + errorexit() + return nil +} + +func elfshalloc(sect *sym.Section) *ElfShdr { + sh := elfshname(sect.Name) + sect.Elfsect = sh + return sh +} + +func elfshbits(linkmode LinkMode, sect *sym.Section) *ElfShdr { + var sh *ElfShdr + + if sect.Name == ".text" { + if sect.Elfsect == nil { + sect.Elfsect = elfshnamedup(sect.Name) + } + sh = sect.Elfsect.(*ElfShdr) + } else { + sh = elfshalloc(sect) + } + + // If this section has already been set up as a note, we assume type_ and + // flags are already correct, but the other fields still need filling in. + if sh.Type == uint32(elf.SHT_NOTE) { + if linkmode != LinkExternal { + // TODO(mwhudson): the approach here will work OK when + // linking internally for notes that we want to be included + // in a loadable segment (e.g. the abihash note) but not for + // notes that we do not want to be mapped (e.g. the package + // list note). The real fix is probably to define new values + // for Symbol.Type corresponding to mapped and unmapped notes + // and handle them in dodata(). + Errorf(nil, "sh.Type == SHT_NOTE in elfshbits when linking internally") + } + sh.Addralign = uint64(sect.Align) + sh.Size = sect.Length + sh.Off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr + return sh + } + if sh.Type > 0 { + return sh + } + + if sect.Vaddr < sect.Seg.Vaddr+sect.Seg.Filelen { + switch sect.Name { + case ".init_array": + sh.Type = uint32(elf.SHT_INIT_ARRAY) + default: + sh.Type = uint32(elf.SHT_PROGBITS) + } + } else { + sh.Type = uint32(elf.SHT_NOBITS) + } + sh.Flags = uint64(elf.SHF_ALLOC) + if sect.Rwx&1 != 0 { + sh.Flags |= uint64(elf.SHF_EXECINSTR) + } + if sect.Rwx&2 != 0 { + sh.Flags |= uint64(elf.SHF_WRITE) + } + if sect.Name == ".tbss" { + sh.Flags |= uint64(elf.SHF_TLS) + sh.Type = uint32(elf.SHT_NOBITS) + } + if linkmode != LinkExternal { + sh.Addr = sect.Vaddr + } + + if strings.HasPrefix(sect.Name, ".debug") || strings.HasPrefix(sect.Name, ".zdebug") { + sh.Flags = 0 + sh.Addr = 0 + if sect.Compressed { + sh.Flags |= uint64(elf.SHF_COMPRESSED) + } + } + + sh.Addralign = uint64(sect.Align) + sh.Size = sect.Length + if sect.Name != ".tbss" { + sh.Off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr + } + + return sh +} + +func elfshreloc(arch *sys.Arch, sect *sym.Section) *ElfShdr { + // If main section is SHT_NOBITS, nothing to relocate. + // Also nothing to relocate in .shstrtab or notes. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return nil + } + if sect.Name == ".shstrtab" || sect.Name == ".tbss" { + return nil + } + if sect.Elfsect.(*ElfShdr).Type == uint32(elf.SHT_NOTE) { + return nil + } + + typ := elf.SHT_REL + if elfRelType == ".rela" { + typ = elf.SHT_RELA + } + + sh := elfshname(elfRelType + sect.Name) + // There could be multiple text sections but each needs + // its own .rela.text. + + if sect.Name == ".text" { + if sh.Info != 0 && sh.Info != uint32(sect.Elfsect.(*ElfShdr).shnum) { + sh = elfshnamedup(elfRelType + sect.Name) + } + } + + sh.Type = uint32(typ) + sh.Entsize = uint64(arch.RegSize) * 2 + if typ == elf.SHT_RELA { + sh.Entsize += uint64(arch.RegSize) + } + sh.Link = uint32(elfshname(".symtab").shnum) + sh.Info = uint32(sect.Elfsect.(*ElfShdr).shnum) + sh.Off = sect.Reloff + sh.Size = sect.Rellen + sh.Addralign = uint64(arch.RegSize) + return sh +} + +func elfrelocsect(ctxt *Link, out *OutBuf, sect *sym.Section, syms []loader.Sym) { + // If main section is SHT_NOBITS, nothing to relocate. + // Also nothing to relocate in .shstrtab. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return + } + if sect.Name == ".shstrtab" { + return + } + + ldr := ctxt.loader + for i, s := range syms { + if !ldr.AttrReachable(s) { + panic("should never happen") + } + if uint64(ldr.SymValue(s)) >= sect.Vaddr { + syms = syms[i:] + break + } + } + + eaddr := sect.Vaddr + sect.Length + for _, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if ldr.SymValue(s) >= int64(eaddr) { + break + } + + // Compute external relocations on the go, and pass to + // ELF.Reloc1 to stream out. + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + rr, ok := extreloc(ctxt, ldr, s, r) + if !ok { + continue + } + if rr.Xsym == 0 { + ldr.Errorf(s, "missing xsym in relocation") + continue + } + esr := ElfSymForReloc(ctxt, rr.Xsym) + if esr == 0 { + ldr.Errorf(s, "reloc %d (%s) to non-elf symbol %s (outer=%s) %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type()), ldr.SymName(r.Sym()), ldr.SymName(rr.Xsym), ldr.SymType(r.Sym()), ldr.SymType(r.Sym()).String()) + } + if !ldr.AttrReachable(rr.Xsym) { + ldr.Errorf(s, "unreachable reloc %d (%s) target %v", r.Type(), sym.RelocName(ctxt.Arch, r.Type()), ldr.SymName(rr.Xsym)) + } + if !thearch.ELF.Reloc1(ctxt, out, ldr, s, rr, ri, int64(uint64(ldr.SymValue(s)+int64(r.Off()))-sect.Vaddr)) { + ldr.Errorf(s, "unsupported obj reloc %d (%s)/%d to %s", r.Type(), sym.RelocName(ctxt.Arch, r.Type()), r.Siz(), ldr.SymName(r.Sym())) + } + } + } + + // sanity check + if uint64(out.Offset()) != sect.Reloff+sect.Rellen { + panic(fmt.Sprintf("elfrelocsect: size mismatch %d != %d + %d", out.Offset(), sect.Reloff, sect.Rellen)) + } +} + +func elfEmitReloc(ctxt *Link) { + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) + } + + sizeExtRelocs(ctxt, thearch.ELF.RelocSize) + relocSect, wg := relocSectFn(ctxt, elfrelocsect) + + for _, sect := range Segtext.Sections { + if sect.Name == ".text" { + relocSect(ctxt, sect, ctxt.Textp) + } else { + relocSect(ctxt, sect, ctxt.datap) + } + } + + for _, sect := range Segrodata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } + for _, sect := range Segrelrodata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } + for _, sect := range Segdata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } + for i := 0; i < len(Segdwarf.Sections); i++ { + sect := Segdwarf.Sections[i] + si := dwarfp[i] + if si.secSym() != loader.Sym(sect.Sym) || + ctxt.loader.SymSect(si.secSym()) != sect { + panic("inconsistency between dwarfp and Segdwarf") + } + relocSect(ctxt, sect, si.syms) + } + wg.Wait() +} + +func addgonote(ctxt *Link, sectionName string, tag uint32, desc []byte) { + ldr := ctxt.loader + s := ldr.CreateSymForUpdate(sectionName, 0) + s.SetType(sym.SELFROSECT) + // namesz + s.AddUint32(ctxt.Arch, uint32(len(ELF_NOTE_GO_NAME))) + // descsz + s.AddUint32(ctxt.Arch, uint32(len(desc))) + // tag + s.AddUint32(ctxt.Arch, tag) + // name + padding + s.AddBytes(ELF_NOTE_GO_NAME) + for len(s.Data())%4 != 0 { + s.AddUint8(0) + } + // desc + padding + s.AddBytes(desc) + for len(s.Data())%4 != 0 { + s.AddUint8(0) + } + s.SetSize(int64(len(s.Data()))) + s.SetAlign(4) +} + +func (ctxt *Link) doelf() { + ldr := ctxt.loader + + /* predefine strings we need for section headers */ + + addshstr := func(s string) int { + off := len(elfshstrdat) + elfshstrdat = append(elfshstrdat, s...) + elfshstrdat = append(elfshstrdat, 0) + return off + } + + shstrtabAddstring := func(s string) { + off := addshstr(s) + elfsetstring(ctxt, 0, s, int(off)) + } + + shstrtabAddstring("") + shstrtabAddstring(".text") + shstrtabAddstring(".noptrdata") + shstrtabAddstring(".data") + shstrtabAddstring(".bss") + shstrtabAddstring(".noptrbss") + shstrtabAddstring(".go.fuzzcntrs") + shstrtabAddstring(".go.buildinfo") + if ctxt.IsMIPS() { + shstrtabAddstring(".MIPS.abiflags") + shstrtabAddstring(".gnu.attributes") + } + + // generate .tbss section for dynamic internal linker or external + // linking, so that various binutils could correctly calculate + // PT_TLS size. See https://golang.org/issue/5200. + if !*FlagD || ctxt.IsExternal() { + shstrtabAddstring(".tbss") + } + if ctxt.IsNetbsd() { + shstrtabAddstring(".note.netbsd.ident") + if *flagRace { + shstrtabAddstring(".note.netbsd.pax") + } + } + if ctxt.IsOpenbsd() { + shstrtabAddstring(".note.openbsd.ident") + } + if ctxt.IsFreebsd() { + shstrtabAddstring(".note.tag") + } + if len(buildinfo) > 0 { + shstrtabAddstring(".note.gnu.build-id") + } + if *flagBuildid != "" { + shstrtabAddstring(".note.go.buildid") + } + shstrtabAddstring(".elfdata") + shstrtabAddstring(".rodata") + // See the comment about data.rel.ro.FOO section names in data.go. + relro_prefix := "" + if ctxt.UseRelro() { + shstrtabAddstring(".data.rel.ro") + relro_prefix = ".data.rel.ro" + } + shstrtabAddstring(relro_prefix + ".typelink") + shstrtabAddstring(relro_prefix + ".itablink") + shstrtabAddstring(relro_prefix + ".gosymtab") + shstrtabAddstring(relro_prefix + ".gopclntab") + + if ctxt.IsExternal() { + *FlagD = true + + shstrtabAddstring(elfRelType + ".text") + shstrtabAddstring(elfRelType + ".rodata") + shstrtabAddstring(elfRelType + relro_prefix + ".typelink") + shstrtabAddstring(elfRelType + relro_prefix + ".itablink") + shstrtabAddstring(elfRelType + relro_prefix + ".gosymtab") + shstrtabAddstring(elfRelType + relro_prefix + ".gopclntab") + shstrtabAddstring(elfRelType + ".noptrdata") + shstrtabAddstring(elfRelType + ".data") + if ctxt.UseRelro() { + shstrtabAddstring(elfRelType + ".data.rel.ro") + } + shstrtabAddstring(elfRelType + ".go.buildinfo") + if ctxt.IsMIPS() { + shstrtabAddstring(elfRelType + ".MIPS.abiflags") + shstrtabAddstring(elfRelType + ".gnu.attributes") + } + + // add a .note.GNU-stack section to mark the stack as non-executable + shstrtabAddstring(".note.GNU-stack") + + if ctxt.IsShared() { + shstrtabAddstring(".note.go.abihash") + shstrtabAddstring(".note.go.pkg-list") + shstrtabAddstring(".note.go.deps") + } + } + + hasinitarr := ctxt.linkShared + + /* shared library initializer */ + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: + hasinitarr = true + } + + if hasinitarr { + shstrtabAddstring(".init_array") + shstrtabAddstring(elfRelType + ".init_array") + } + + if !*FlagS { + shstrtabAddstring(".symtab") + shstrtabAddstring(".strtab") + } + if !*FlagW { + dwarfaddshstrings(ctxt, shstrtabAddstring) + } + + shstrtabAddstring(".shstrtab") + + if !*FlagD { /* -d suppresses dynamic loader format */ + shstrtabAddstring(".interp") + shstrtabAddstring(".hash") + shstrtabAddstring(".got") + if ctxt.IsPPC64() { + shstrtabAddstring(".glink") + } + shstrtabAddstring(".got.plt") + shstrtabAddstring(".dynamic") + shstrtabAddstring(".dynsym") + shstrtabAddstring(".dynstr") + shstrtabAddstring(elfRelType) + shstrtabAddstring(elfRelType + ".plt") + + shstrtabAddstring(".plt") + shstrtabAddstring(".gnu.version") + shstrtabAddstring(".gnu.version_r") + + /* dynamic symbol table - first entry all zeros */ + dynsym := ldr.CreateSymForUpdate(".dynsym", 0) + + dynsym.SetType(sym.SELFROSECT) + if elf64 { + dynsym.SetSize(dynsym.Size() + ELF64SYMSIZE) + } else { + dynsym.SetSize(dynsym.Size() + ELF32SYMSIZE) + } + + /* dynamic string table */ + dynstr := ldr.CreateSymForUpdate(".dynstr", 0) + + dynstr.SetType(sym.SELFROSECT) + if dynstr.Size() == 0 { + dynstr.Addstring("") + } + + /* relocation table */ + s := ldr.CreateSymForUpdate(elfRelType, 0) + s.SetType(sym.SELFROSECT) + + /* global offset table */ + got := ldr.CreateSymForUpdate(".got", 0) + got.SetType(sym.SELFGOT) // writable + + /* ppc64 glink resolver */ + if ctxt.IsPPC64() { + s := ldr.CreateSymForUpdate(".glink", 0) + s.SetType(sym.SELFRXSECT) + } + + /* hash */ + hash := ldr.CreateSymForUpdate(".hash", 0) + hash.SetType(sym.SELFROSECT) + + gotplt := ldr.CreateSymForUpdate(".got.plt", 0) + gotplt.SetType(sym.SELFSECT) // writable + + plt := ldr.CreateSymForUpdate(".plt", 0) + if ctxt.IsPPC64() { + // In the ppc64 ABI, .plt is a data section + // written by the dynamic linker. + plt.SetType(sym.SELFSECT) + } else { + plt.SetType(sym.SELFRXSECT) + } + + s = ldr.CreateSymForUpdate(elfRelType+".plt", 0) + s.SetType(sym.SELFROSECT) + + s = ldr.CreateSymForUpdate(".gnu.version", 0) + s.SetType(sym.SELFROSECT) + + s = ldr.CreateSymForUpdate(".gnu.version_r", 0) + s.SetType(sym.SELFROSECT) + + /* define dynamic elf table */ + dynamic := ldr.CreateSymForUpdate(".dynamic", 0) + if thearch.ELF.DynamicReadOnly { + dynamic.SetType(sym.SELFROSECT) + } else { + dynamic.SetType(sym.SELFSECT) + } + + if ctxt.IsS390X() { + // S390X uses .got instead of .got.plt + gotplt = got + } + thearch.ELF.SetupPLT(ctxt, ctxt.loader, plt, gotplt, dynamic.Sym()) + + /* + * .dynamic table + */ + elfWriteDynEntSym(ctxt, dynamic, elf.DT_HASH, hash.Sym()) + + elfWriteDynEntSym(ctxt, dynamic, elf.DT_SYMTAB, dynsym.Sym()) + if elf64 { + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_SYMENT, ELF64SYMSIZE) + } else { + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_SYMENT, ELF32SYMSIZE) + } + elfWriteDynEntSym(ctxt, dynamic, elf.DT_STRTAB, dynstr.Sym()) + elfwritedynentsymsize(ctxt, dynamic, elf.DT_STRSZ, dynstr.Sym()) + if elfRelType == ".rela" { + rela := ldr.LookupOrCreateSym(".rela", 0) + elfWriteDynEntSym(ctxt, dynamic, elf.DT_RELA, rela) + elfwritedynentsymsize(ctxt, dynamic, elf.DT_RELASZ, rela) + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_RELAENT, ELF64RELASIZE) + } else { + rel := ldr.LookupOrCreateSym(".rel", 0) + elfWriteDynEntSym(ctxt, dynamic, elf.DT_REL, rel) + elfwritedynentsymsize(ctxt, dynamic, elf.DT_RELSZ, rel) + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_RELENT, ELF32RELSIZE) + } + + if rpath.val != "" { + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_RUNPATH, uint64(dynstr.Addstring(rpath.val))) + } + + if ctxt.IsPPC64() { + elfWriteDynEntSym(ctxt, dynamic, elf.DT_PLTGOT, plt.Sym()) + } else { + elfWriteDynEntSym(ctxt, dynamic, elf.DT_PLTGOT, gotplt.Sym()) + } + + if ctxt.IsPPC64() { + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_PPC64_OPT, 0) + } + + // Solaris dynamic linker can't handle an empty .rela.plt if + // DT_JMPREL is emitted so we have to defer generation of elf.DT_PLTREL, + // DT_PLTRELSZ, and elf.DT_JMPREL dynamic entries until after we know the + // size of .rel(a).plt section. + + Elfwritedynent(ctxt.Arch, dynamic, elf.DT_DEBUG, 0) + } + + if ctxt.IsShared() { + // The go.link.abihashbytes symbol will be pointed at the appropriate + // part of the .note.go.abihash section in data.go:func address(). + s := ldr.LookupOrCreateSym("go:link.abihashbytes", 0) + sb := ldr.MakeSymbolUpdater(s) + ldr.SetAttrLocal(s, true) + sb.SetType(sym.SRODATA) + ldr.SetAttrSpecial(s, true) + sb.SetReachable(true) + sb.SetSize(notsha256.Size) + + sort.Sort(byPkg(ctxt.Library)) + h := notsha256.New() + for _, l := range ctxt.Library { + h.Write(l.Fingerprint[:]) + } + addgonote(ctxt, ".note.go.abihash", ELF_NOTE_GOABIHASH_TAG, h.Sum([]byte{})) + addgonote(ctxt, ".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, pkglistfornote) + var deplist []string + for _, shlib := range ctxt.Shlibs { + deplist = append(deplist, filepath.Base(shlib.Path)) + } + addgonote(ctxt, ".note.go.deps", ELF_NOTE_GODEPS_TAG, []byte(strings.Join(deplist, "\n"))) + } + + if ctxt.LinkMode == LinkExternal && *flagBuildid != "" { + addgonote(ctxt, ".note.go.buildid", ELF_NOTE_GOBUILDID_TAG, []byte(*flagBuildid)) + } + + //type mipsGnuAttributes struct { + // version uint8 // 'A' + // length uint32 // 15 including itself + // gnu [4]byte // "gnu\0" + // tag uint8 // 1:file, 2: section, 3: symbol, 1 here + // taglen uint32 // tag length, including tag, 7 here + // tagfp uint8 // 4 + // fpAbi uint8 // see .MIPS.abiflags + //} + if ctxt.IsMIPS() { + gnuattributes := ldr.CreateSymForUpdate(".gnu.attributes", 0) + gnuattributes.SetType(sym.SELFROSECT) + gnuattributes.SetReachable(true) + gnuattributes.AddUint8('A') // version 'A' + gnuattributes.AddUint32(ctxt.Arch, 15) // length 15 including itself + gnuattributes.AddBytes([]byte("gnu\x00")) // "gnu\0" + gnuattributes.AddUint8(1) // 1:file, 2: section, 3: symbol, 1 here + gnuattributes.AddUint32(ctxt.Arch, 7) // tag length, including tag, 7 here + gnuattributes.AddUint8(4) // 4 for FP, 8 for MSA + if buildcfg.GOMIPS == "softfloat" { + gnuattributes.AddUint8(MIPS_FPABI_SOFT) + } else { + // Note: MIPS_FPABI_ANY is bad naming: in fact it is MIPS I style FPR usage. + // It is not for 'ANY'. + // TODO: switch to FPXX after be sure that no odd-number-fpr is used. + gnuattributes.AddUint8(MIPS_FPABI_ANY) + } + } +} + +// Do not write DT_NULL. elfdynhash will finish it. +func shsym(sh *ElfShdr, ldr *loader.Loader, s loader.Sym) { + if s == 0 { + panic("bad symbol in shsym2") + } + addr := ldr.SymValue(s) + if sh.Flags&uint64(elf.SHF_ALLOC) != 0 { + sh.Addr = uint64(addr) + } + sh.Off = uint64(datoff(ldr, s, addr)) + sh.Size = uint64(ldr.SymSize(s)) +} + +func phsh(ph *ElfPhdr, sh *ElfShdr) { + ph.Vaddr = sh.Addr + ph.Paddr = ph.Vaddr + ph.Off = sh.Off + ph.Filesz = sh.Size + ph.Memsz = sh.Size + ph.Align = sh.Addralign +} + +func Asmbelfsetup() { + /* This null SHdr must appear before all others */ + elfshname("") + + for _, sect := range Segtext.Sections { + // There could be multiple .text sections. Instead check the Elfsect + // field to determine if already has an ElfShdr and if not, create one. + if sect.Name == ".text" { + if sect.Elfsect == nil { + sect.Elfsect = elfshnamedup(sect.Name) + } + } else { + elfshalloc(sect) + } + } + for _, sect := range Segrodata.Sections { + elfshalloc(sect) + } + for _, sect := range Segrelrodata.Sections { + elfshalloc(sect) + } + for _, sect := range Segdata.Sections { + elfshalloc(sect) + } + for _, sect := range Segdwarf.Sections { + elfshalloc(sect) + } +} + +func asmbElf(ctxt *Link) { + var symo int64 + symo = int64(Segdwarf.Fileoff + Segdwarf.Filelen) + symo = Rnd(symo, int64(ctxt.Arch.PtrSize)) + ctxt.Out.SeekSet(symo) + if *FlagS { + ctxt.Out.Write(elfshstrdat) + } else { + ctxt.Out.SeekSet(symo) + asmElfSym(ctxt) + ctxt.Out.Write(elfstrdat) + ctxt.Out.Write(elfshstrdat) + if ctxt.IsExternal() { + elfEmitReloc(ctxt) + } + } + ctxt.Out.SeekSet(0) + + ldr := ctxt.loader + eh := getElfEhdr() + switch ctxt.Arch.Family { + default: + Exitf("unknown architecture in asmbelf: %v", ctxt.Arch.Family) + case sys.MIPS, sys.MIPS64: + eh.Machine = uint16(elf.EM_MIPS) + case sys.Loong64: + eh.Machine = uint16(elf.EM_LOONGARCH) + case sys.ARM: + eh.Machine = uint16(elf.EM_ARM) + case sys.AMD64: + eh.Machine = uint16(elf.EM_X86_64) + case sys.ARM64: + eh.Machine = uint16(elf.EM_AARCH64) + case sys.I386: + eh.Machine = uint16(elf.EM_386) + case sys.PPC64: + eh.Machine = uint16(elf.EM_PPC64) + case sys.RISCV64: + eh.Machine = uint16(elf.EM_RISCV) + case sys.S390X: + eh.Machine = uint16(elf.EM_S390) + } + + elfreserve := int64(ELFRESERVE) + + numtext := int64(0) + for _, sect := range Segtext.Sections { + if sect.Name == ".text" { + numtext++ + } + } + + // If there are multiple text sections, extra space is needed + // in the elfreserve for the additional .text and .rela.text + // section headers. It can handle 4 extra now. Headers are + // 64 bytes. + + if numtext > 4 { + elfreserve += elfreserve + numtext*64*2 + } + + startva := *FlagTextAddr - int64(HEADR) + resoff := elfreserve + + var pph *ElfPhdr + var pnote *ElfPhdr + getpnote := func() *ElfPhdr { + if pnote == nil { + pnote = newElfPhdr() + pnote.Type = elf.PT_NOTE + pnote.Flags = elf.PF_R + } + return pnote + } + if *flagRace && ctxt.IsNetbsd() { + sh := elfshname(".note.netbsd.pax") + resoff -= int64(elfnetbsdpax(sh, uint64(startva), uint64(resoff))) + phsh(getpnote(), sh) + } + if ctxt.LinkMode == LinkExternal { + /* skip program headers */ + eh.Phoff = 0 + + eh.Phentsize = 0 + + if ctxt.BuildMode == BuildModeShared { + sh := elfshname(".note.go.pkg-list") + sh.Type = uint32(elf.SHT_NOTE) + sh = elfshname(".note.go.abihash") + sh.Type = uint32(elf.SHT_NOTE) + sh.Flags = uint64(elf.SHF_ALLOC) + sh = elfshname(".note.go.deps") + sh.Type = uint32(elf.SHT_NOTE) + } + + if *flagBuildid != "" { + sh := elfshname(".note.go.buildid") + sh.Type = uint32(elf.SHT_NOTE) + sh.Flags = uint64(elf.SHF_ALLOC) + } + + goto elfobj + } + + /* program header info */ + pph = newElfPhdr() + + pph.Type = elf.PT_PHDR + pph.Flags = elf.PF_R + pph.Off = uint64(eh.Ehsize) + pph.Vaddr = uint64(*FlagTextAddr) - uint64(HEADR) + pph.Off + pph.Paddr = uint64(*FlagTextAddr) - uint64(HEADR) + pph.Off + pph.Align = uint64(*FlagRound) + + /* + * PHDR must be in a loaded segment. Adjust the text + * segment boundaries downwards to include it. + */ + { + o := int64(Segtext.Vaddr - pph.Vaddr) + Segtext.Vaddr -= uint64(o) + Segtext.Length += uint64(o) + o = int64(Segtext.Fileoff - pph.Off) + Segtext.Fileoff -= uint64(o) + Segtext.Filelen += uint64(o) + } + + if !*FlagD { /* -d suppresses dynamic loader format */ + /* interpreter */ + sh := elfshname(".interp") + + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 1 + + if interpreter == "" && buildcfg.GOOS == runtime.GOOS && buildcfg.GOARCH == runtime.GOARCH && buildcfg.GO_LDSO != "" { + interpreter = buildcfg.GO_LDSO + } + + if interpreter == "" { + switch ctxt.HeadType { + case objabi.Hlinux: + if buildcfg.GOOS == "android" { + interpreter = thearch.ELF.Androiddynld + if interpreter == "" { + Exitf("ELF interpreter not set") + } + } else { + interpreter = thearch.ELF.Linuxdynld + // If interpreter does not exist, try musl instead. + // This lets the same cmd/link binary work on + // both glibc-based and musl-based systems. + if _, err := os.Stat(interpreter); err != nil { + if musl := thearch.ELF.LinuxdynldMusl; musl != "" { + if _, err := os.Stat(musl); err == nil { + interpreter = musl + } + } + } + } + + case objabi.Hfreebsd: + interpreter = thearch.ELF.Freebsddynld + + case objabi.Hnetbsd: + interpreter = thearch.ELF.Netbsddynld + + case objabi.Hopenbsd: + interpreter = thearch.ELF.Openbsddynld + + case objabi.Hdragonfly: + interpreter = thearch.ELF.Dragonflydynld + + case objabi.Hsolaris: + interpreter = thearch.ELF.Solarisdynld + } + } + + resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter)) + + ph := newElfPhdr() + ph.Type = elf.PT_INTERP + ph.Flags = elf.PF_R + phsh(ph, sh) + } + + if ctxt.HeadType == objabi.Hnetbsd || ctxt.HeadType == objabi.Hopenbsd || ctxt.HeadType == objabi.Hfreebsd { + var sh *ElfShdr + switch ctxt.HeadType { + case objabi.Hnetbsd: + sh = elfshname(".note.netbsd.ident") + resoff -= int64(elfnetbsdsig(sh, uint64(startva), uint64(resoff))) + + case objabi.Hopenbsd: + sh = elfshname(".note.openbsd.ident") + resoff -= int64(elfopenbsdsig(sh, uint64(startva), uint64(resoff))) + + case objabi.Hfreebsd: + sh = elfshname(".note.tag") + resoff -= int64(elffreebsdsig(sh, uint64(startva), uint64(resoff))) + } + // NetBSD, OpenBSD and FreeBSD require ident in an independent segment. + pnotei := newElfPhdr() + pnotei.Type = elf.PT_NOTE + pnotei.Flags = elf.PF_R + phsh(pnotei, sh) + } + + if len(buildinfo) > 0 { + sh := elfshname(".note.gnu.build-id") + resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff))) + phsh(getpnote(), sh) + } + + if *flagBuildid != "" { + sh := elfshname(".note.go.buildid") + resoff -= int64(elfgobuildid(sh, uint64(startva), uint64(resoff))) + phsh(getpnote(), sh) + } + + // Additions to the reserved area must be above this line. + + elfphload(&Segtext) + if len(Segrodata.Sections) > 0 { + elfphload(&Segrodata) + } + if len(Segrelrodata.Sections) > 0 { + elfphload(&Segrelrodata) + elfphrelro(&Segrelrodata) + } + elfphload(&Segdata) + + /* Dynamic linking sections */ + if !*FlagD { + sh := elfshname(".dynsym") + sh.Type = uint32(elf.SHT_DYNSYM) + sh.Flags = uint64(elf.SHF_ALLOC) + if elf64 { + sh.Entsize = ELF64SYMSIZE + } else { + sh.Entsize = ELF32SYMSIZE + } + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Link = uint32(elfshname(".dynstr").shnum) + + // sh.info is the index of first non-local symbol (number of local symbols) + s := ldr.Lookup(".dynsym", 0) + i := uint32(0) + for sub := s; sub != 0; sub = ldr.SubSym(sub) { + i++ + if !ldr.AttrLocal(sub) { + break + } + } + sh.Info = i + shsym(sh, ldr, s) + + sh = elfshname(".dynstr") + sh.Type = uint32(elf.SHT_STRTAB) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 1 + shsym(sh, ldr, ldr.Lookup(".dynstr", 0)) + + if elfverneed != 0 { + sh := elfshname(".gnu.version") + sh.Type = uint32(elf.SHT_GNU_VERSYM) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 2 + sh.Link = uint32(elfshname(".dynsym").shnum) + sh.Entsize = 2 + shsym(sh, ldr, ldr.Lookup(".gnu.version", 0)) + + sh = elfshname(".gnu.version_r") + sh.Type = uint32(elf.SHT_GNU_VERNEED) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Info = uint32(elfverneed) + sh.Link = uint32(elfshname(".dynstr").shnum) + shsym(sh, ldr, ldr.Lookup(".gnu.version_r", 0)) + } + + if elfRelType == ".rela" { + sh := elfshname(".rela.plt") + sh.Type = uint32(elf.SHT_RELA) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Entsize = ELF64RELASIZE + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Link = uint32(elfshname(".dynsym").shnum) + sh.Info = uint32(elfshname(".plt").shnum) + shsym(sh, ldr, ldr.Lookup(".rela.plt", 0)) + + sh = elfshname(".rela") + sh.Type = uint32(elf.SHT_RELA) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Entsize = ELF64RELASIZE + sh.Addralign = 8 + sh.Link = uint32(elfshname(".dynsym").shnum) + shsym(sh, ldr, ldr.Lookup(".rela", 0)) + } else { + sh := elfshname(".rel.plt") + sh.Type = uint32(elf.SHT_REL) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Entsize = ELF32RELSIZE + sh.Addralign = 4 + sh.Link = uint32(elfshname(".dynsym").shnum) + shsym(sh, ldr, ldr.Lookup(".rel.plt", 0)) + + sh = elfshname(".rel") + sh.Type = uint32(elf.SHT_REL) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Entsize = ELF32RELSIZE + sh.Addralign = 4 + sh.Link = uint32(elfshname(".dynsym").shnum) + shsym(sh, ldr, ldr.Lookup(".rel", 0)) + } + + if elf.Machine(eh.Machine) == elf.EM_PPC64 { + sh := elfshname(".glink") + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_EXECINSTR) + sh.Addralign = 4 + shsym(sh, ldr, ldr.Lookup(".glink", 0)) + } + + sh = elfshname(".plt") + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_EXECINSTR) + if elf.Machine(eh.Machine) == elf.EM_X86_64 { + sh.Entsize = 16 + } else if elf.Machine(eh.Machine) == elf.EM_S390 { + sh.Entsize = 32 + } else if elf.Machine(eh.Machine) == elf.EM_PPC64 { + // On ppc64, this is just a table of addresses + // filled by the dynamic linker + sh.Type = uint32(elf.SHT_NOBITS) + + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_WRITE) + sh.Entsize = 8 + } else { + sh.Entsize = 4 + } + sh.Addralign = sh.Entsize + shsym(sh, ldr, ldr.Lookup(".plt", 0)) + + // On ppc64, .got comes from the input files, so don't + // create it here, and .got.plt is not used. + if elf.Machine(eh.Machine) != elf.EM_PPC64 { + sh := elfshname(".got") + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_WRITE) + sh.Entsize = uint64(ctxt.Arch.RegSize) + sh.Addralign = uint64(ctxt.Arch.RegSize) + shsym(sh, ldr, ldr.Lookup(".got", 0)) + + sh = elfshname(".got.plt") + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_WRITE) + sh.Entsize = uint64(ctxt.Arch.RegSize) + sh.Addralign = uint64(ctxt.Arch.RegSize) + shsym(sh, ldr, ldr.Lookup(".got.plt", 0)) + } + + sh = elfshname(".hash") + sh.Type = uint32(elf.SHT_HASH) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Entsize = 4 + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Link = uint32(elfshname(".dynsym").shnum) + shsym(sh, ldr, ldr.Lookup(".hash", 0)) + + /* sh and elf.PT_DYNAMIC for .dynamic section */ + sh = elfshname(".dynamic") + + sh.Type = uint32(elf.SHT_DYNAMIC) + sh.Flags = uint64(elf.SHF_ALLOC + elf.SHF_WRITE) + sh.Entsize = 2 * uint64(ctxt.Arch.RegSize) + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Link = uint32(elfshname(".dynstr").shnum) + shsym(sh, ldr, ldr.Lookup(".dynamic", 0)) + ph := newElfPhdr() + ph.Type = elf.PT_DYNAMIC + ph.Flags = elf.PF_R + elf.PF_W + phsh(ph, sh) + + /* + * Thread-local storage segment (really just size). + */ + tlssize := uint64(0) + for _, sect := range Segdata.Sections { + if sect.Name == ".tbss" { + tlssize = sect.Length + } + } + if tlssize != 0 { + ph := newElfPhdr() + ph.Type = elf.PT_TLS + ph.Flags = elf.PF_R + ph.Memsz = tlssize + ph.Align = uint64(ctxt.Arch.RegSize) + } + } + + if ctxt.HeadType == objabi.Hlinux || ctxt.HeadType == objabi.Hfreebsd { + ph := newElfPhdr() + ph.Type = elf.PT_GNU_STACK + ph.Flags = elf.PF_W + elf.PF_R + ph.Align = uint64(ctxt.Arch.RegSize) + } else if ctxt.HeadType == objabi.Hsolaris { + ph := newElfPhdr() + ph.Type = elf.PT_SUNWSTACK + ph.Flags = elf.PF_W + elf.PF_R + } + +elfobj: + sh := elfshname(".shstrtab") + eh.Shstrndx = uint16(sh.shnum) + + if ctxt.IsMIPS() { + sh = elfshname(".MIPS.abiflags") + sh.Type = uint32(elf.SHT_MIPS_ABIFLAGS) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 8 + resoff -= int64(elfMipsAbiFlags(sh, uint64(startva), uint64(resoff))) + + ph := newElfPhdr() + ph.Type = elf.PT_MIPS_ABIFLAGS + ph.Flags = elf.PF_R + phsh(ph, sh) + + sh = elfshname(".gnu.attributes") + sh.Type = uint32(elf.SHT_GNU_ATTRIBUTES) + sh.Addralign = 1 + ldr := ctxt.loader + shsym(sh, ldr, ldr.Lookup(".gnu.attributes", 0)) + } + + // put these sections early in the list + if !*FlagS { + elfshname(".symtab") + elfshname(".strtab") + } + elfshname(".shstrtab") + + for _, sect := range Segtext.Sections { + elfshbits(ctxt.LinkMode, sect) + } + for _, sect := range Segrodata.Sections { + elfshbits(ctxt.LinkMode, sect) + } + for _, sect := range Segrelrodata.Sections { + elfshbits(ctxt.LinkMode, sect) + } + for _, sect := range Segdata.Sections { + elfshbits(ctxt.LinkMode, sect) + } + for _, sect := range Segdwarf.Sections { + elfshbits(ctxt.LinkMode, sect) + } + + if ctxt.LinkMode == LinkExternal { + for _, sect := range Segtext.Sections { + elfshreloc(ctxt.Arch, sect) + } + for _, sect := range Segrodata.Sections { + elfshreloc(ctxt.Arch, sect) + } + for _, sect := range Segrelrodata.Sections { + elfshreloc(ctxt.Arch, sect) + } + for _, sect := range Segdata.Sections { + elfshreloc(ctxt.Arch, sect) + } + for _, si := range dwarfp { + sect := ldr.SymSect(si.secSym()) + elfshreloc(ctxt.Arch, sect) + } + // add a .note.GNU-stack section to mark the stack as non-executable + sh := elfshname(".note.GNU-stack") + + sh.Type = uint32(elf.SHT_PROGBITS) + sh.Addralign = 1 + sh.Flags = 0 + } + + var shstroff uint64 + if !*FlagS { + sh := elfshname(".symtab") + sh.Type = uint32(elf.SHT_SYMTAB) + sh.Off = uint64(symo) + sh.Size = uint64(symSize) + sh.Addralign = uint64(ctxt.Arch.RegSize) + sh.Entsize = 8 + 2*uint64(ctxt.Arch.RegSize) + sh.Link = uint32(elfshname(".strtab").shnum) + sh.Info = uint32(elfglobalsymndx) + + sh = elfshname(".strtab") + sh.Type = uint32(elf.SHT_STRTAB) + sh.Off = uint64(symo) + uint64(symSize) + sh.Size = uint64(len(elfstrdat)) + sh.Addralign = 1 + shstroff = sh.Off + sh.Size + } else { + shstroff = uint64(symo) + } + + sh = elfshname(".shstrtab") + sh.Type = uint32(elf.SHT_STRTAB) + sh.Off = shstroff + sh.Size = uint64(len(elfshstrdat)) + sh.Addralign = 1 + + /* Main header */ + copy(eh.Ident[:], elf.ELFMAG) + + var osabi elf.OSABI + switch ctxt.HeadType { + case objabi.Hfreebsd: + osabi = elf.ELFOSABI_FREEBSD + case objabi.Hnetbsd: + osabi = elf.ELFOSABI_NETBSD + case objabi.Hopenbsd: + osabi = elf.ELFOSABI_OPENBSD + case objabi.Hdragonfly: + osabi = elf.ELFOSABI_NONE + } + eh.Ident[elf.EI_OSABI] = byte(osabi) + + if elf64 { + eh.Ident[elf.EI_CLASS] = byte(elf.ELFCLASS64) + } else { + eh.Ident[elf.EI_CLASS] = byte(elf.ELFCLASS32) + } + if ctxt.Arch.ByteOrder == binary.BigEndian { + eh.Ident[elf.EI_DATA] = byte(elf.ELFDATA2MSB) + } else { + eh.Ident[elf.EI_DATA] = byte(elf.ELFDATA2LSB) + } + eh.Ident[elf.EI_VERSION] = byte(elf.EV_CURRENT) + + if ctxt.LinkMode == LinkExternal { + eh.Type = uint16(elf.ET_REL) + } else if ctxt.BuildMode == BuildModePIE { + eh.Type = uint16(elf.ET_DYN) + } else { + eh.Type = uint16(elf.ET_EXEC) + } + + if ctxt.LinkMode != LinkExternal { + eh.Entry = uint64(Entryvalue(ctxt)) + } + + eh.Version = uint32(elf.EV_CURRENT) + + if pph != nil { + pph.Filesz = uint64(eh.Phnum) * uint64(eh.Phentsize) + pph.Memsz = pph.Filesz + } + + ctxt.Out.SeekSet(0) + a := int64(0) + a += int64(elfwritehdr(ctxt.Out)) + a += int64(elfwritephdrs(ctxt.Out)) + a += int64(elfwriteshdrs(ctxt.Out)) + if !*FlagD { + a += int64(elfwriteinterp(ctxt.Out)) + } + if ctxt.IsMIPS() { + a += int64(elfWriteMipsAbiFlags(ctxt)) + } + + if ctxt.LinkMode != LinkExternal { + if ctxt.HeadType == objabi.Hnetbsd { + a += int64(elfwritenetbsdsig(ctxt.Out)) + } + if ctxt.HeadType == objabi.Hopenbsd { + a += int64(elfwriteopenbsdsig(ctxt.Out)) + } + if ctxt.HeadType == objabi.Hfreebsd { + a += int64(elfwritefreebsdsig(ctxt.Out)) + } + if len(buildinfo) > 0 { + a += int64(elfwritebuildinfo(ctxt.Out)) + } + if *flagBuildid != "" { + a += int64(elfwritegobuildid(ctxt.Out)) + } + } + if *flagRace && ctxt.IsNetbsd() { + a += int64(elfwritenetbsdpax(ctxt.Out)) + } + + if a > elfreserve { + Errorf(nil, "ELFRESERVE too small: %d > %d with %d text sections", a, elfreserve, numtext) + } + + // Verify the amount of space allocated for the elf header is sufficient. The file offsets are + // already computed in layout, so we could spill into another section. + if a > int64(HEADR) { + Errorf(nil, "HEADR too small: %d > %d with %d text sections", a, HEADR, numtext) + } +} + +func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.Sym) { + ldr.SetSymDynid(s, int32(Nelfsym)) + Nelfsym++ + d := ldr.MakeSymbolUpdater(syms.DynSym) + name := ldr.SymExtname(s) + dstru := ldr.MakeSymbolUpdater(syms.DynStr) + st := ldr.SymType(s) + cgoeStatic := ldr.AttrCgoExportStatic(s) + cgoeDynamic := ldr.AttrCgoExportDynamic(s) + cgoexp := (cgoeStatic || cgoeDynamic) + + d.AddUint32(target.Arch, uint32(dstru.Addstring(name))) + + if elf64 { + + /* type */ + var t uint8 + + if cgoexp && st == sym.STEXT { + t = elf.ST_INFO(elf.STB_GLOBAL, elf.STT_FUNC) + } else { + t = elf.ST_INFO(elf.STB_GLOBAL, elf.STT_OBJECT) + } + d.AddUint8(t) + + /* reserved */ + d.AddUint8(0) + + /* section where symbol is defined */ + if st == sym.SDYNIMPORT { + d.AddUint16(target.Arch, uint16(elf.SHN_UNDEF)) + } else { + d.AddUint16(target.Arch, 1) + } + + /* value */ + if st == sym.SDYNIMPORT { + d.AddUint64(target.Arch, 0) + } else { + d.AddAddrPlus(target.Arch, s, 0) + } + + /* size of object */ + d.AddUint64(target.Arch, uint64(len(ldr.Data(s)))) + + dil := ldr.SymDynimplib(s) + + if !cgoeDynamic && dil != "" && !seenlib[dil] { + du := ldr.MakeSymbolUpdater(syms.Dynamic) + Elfwritedynent(target.Arch, du, elf.DT_NEEDED, uint64(dstru.Addstring(dil))) + seenlib[dil] = true + } + } else { + + /* value */ + if st == sym.SDYNIMPORT { + d.AddUint32(target.Arch, 0) + } else { + d.AddAddrPlus(target.Arch, s, 0) + } + + /* size of object */ + d.AddUint32(target.Arch, uint32(len(ldr.Data(s)))) + + /* type */ + var t uint8 + + // TODO(mwhudson): presumably the behavior should actually be the same on both arm and 386. + if target.Arch.Family == sys.I386 && cgoexp && st == sym.STEXT { + t = elf.ST_INFO(elf.STB_GLOBAL, elf.STT_FUNC) + } else if target.Arch.Family == sys.ARM && cgoeDynamic && st == sym.STEXT { + t = elf.ST_INFO(elf.STB_GLOBAL, elf.STT_FUNC) + } else { + t = elf.ST_INFO(elf.STB_GLOBAL, elf.STT_OBJECT) + } + d.AddUint8(t) + d.AddUint8(0) + + /* shndx */ + if st == sym.SDYNIMPORT { + d.AddUint16(target.Arch, uint16(elf.SHN_UNDEF)) + } else { + d.AddUint16(target.Arch, 1) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e535af6a1c2e6d4db0f5e23f8ab4b4091e8a94ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/elf_test.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package ld + +import ( + "debug/elf" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +func TestDynSymShInfo(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + dir := t.TempDir() + + const prog = ` +package main + +import "net" + +func main() { + net.Dial("", "") +} +` + src := filepath.Join(dir, "issue33358.go") + if err := os.WriteFile(src, []byte(prog), 0666); err != nil { + t.Fatal(err) + } + + binFile := filepath.Join(dir, "issue33358") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", binFile, src) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + + fi, err := os.Open(binFile) + if err != nil { + t.Fatalf("failed to open built file: %v", err) + } + defer fi.Close() + + elfFile, err := elf.NewFile(fi) + if err != nil { + t.Skip("The system may not support ELF, skipped.") + } + + section := elfFile.Section(".dynsym") + if section == nil { + t.Fatal("no dynsym") + } + + symbols, err := elfFile.DynamicSymbols() + if err != nil { + t.Fatalf("failed to get dynamic symbols: %v", err) + } + + var numLocalSymbols uint32 + for i, s := range symbols { + if elf.ST_BIND(s.Info) != elf.STB_LOCAL { + numLocalSymbols = uint32(i + 1) + break + } + } + + if section.Info != numLocalSymbols { + t.Fatalf("Unexpected sh info, want greater than 0, got: %d", section.Info) + } +} + +func TestNoDuplicateNeededEntries(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + // run this test on just a small set of platforms (no need to test it + // across the board given the nature of the test). + pair := runtime.GOOS + "-" + runtime.GOARCH + switch pair { + case "linux-amd64", "linux-arm64", "freebsd-amd64", "openbsd-amd64": + default: + t.Skip("no need for test on " + pair) + } + + t.Parallel() + + dir := t.TempDir() + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get working directory: %v", err) + } + + path := filepath.Join(dir, "x") + argv := []string{"build", "-o", path, filepath.Join(wd, "testdata", "issue39256")} + out, err := testenv.Command(t, testenv.GoToolPath(t), argv...).CombinedOutput() + if err != nil { + t.Fatalf("Build failure: %s\n%s\n", err, string(out)) + } + + f, err := elf.Open(path) + if err != nil { + t.Fatalf("Failed to open ELF file: %v", err) + } + libs, err := f.ImportedLibraries() + if err != nil { + t.Fatalf("Failed to read imported libraries: %v", err) + } + + var count int + for _, lib := range libs { + if lib == "libc.so" || strings.HasPrefix(lib, "libc.so.") { + count++ + } + } + + if got, want := count, 1; got != want { + t.Errorf("Got %d entries for `libc.so`, want %d", got, want) + } +} + +func TestShStrTabAttributesIssue62600(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + dir := t.TempDir() + + const prog = ` +package main + +func main() { + println("whee") +} +` + src := filepath.Join(dir, "issue62600.go") + if err := os.WriteFile(src, []byte(prog), 0666); err != nil { + t.Fatal(err) + } + + binFile := filepath.Join(dir, "issue62600") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", binFile, src) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + + fi, err := os.Open(binFile) + if err != nil { + t.Fatalf("failed to open built file: %v", err) + } + defer fi.Close() + + elfFile, err := elf.NewFile(fi) + if err != nil { + t.Skip("The system may not support ELF, skipped.") + } + + section := elfFile.Section(".shstrtab") + if section == nil { + t.Fatal("no .shstrtab") + } + + // The .shstrtab section should have a zero address, non-zero + // size, no ALLOC flag, and the offset should not fall into any of + // the segments defined by the program headers. + if section.Addr != 0 { + t.Fatalf("expected Addr == 0 for .shstrtab got %x", section.Addr) + } + if section.Size == 0 { + t.Fatal("expected nonzero Size for .shstrtab got 0") + } + if section.Flags&elf.SHF_ALLOC != 0 { + t.Fatal("expected zero alloc flag got nonzero for .shstrtab") + } + for idx, p := range elfFile.Progs { + if section.Offset >= p.Off && section.Offset < p.Off+p.Filesz { + t.Fatalf("badly formed .shstrtab, is contained in segment %d", idx) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/errors.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..b553d682d94c6f0e84dea987b0fa79648554474f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/errors.go @@ -0,0 +1,67 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/obj" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "sync" +) + +type unresolvedSymKey struct { + from loader.Sym // Symbol that referenced unresolved "to" + to loader.Sym // Unresolved symbol referenced by "from" +} + +type symNameFn func(s loader.Sym) string + +// ErrorReporter is used to make error reporting thread safe. +type ErrorReporter struct { + loader.ErrorReporter + unresSyms map[unresolvedSymKey]bool + unresMutex sync.Mutex + SymName symNameFn +} + +// errorUnresolved prints unresolved symbol error for rs that is referenced from s. +func (reporter *ErrorReporter) errorUnresolved(ldr *loader.Loader, s, rs loader.Sym) { + reporter.unresMutex.Lock() + defer reporter.unresMutex.Unlock() + + if reporter.unresSyms == nil { + reporter.unresSyms = make(map[unresolvedSymKey]bool) + } + k := unresolvedSymKey{from: s, to: rs} + if !reporter.unresSyms[k] { + reporter.unresSyms[k] = true + name := ldr.SymName(rs) + + // Try to find symbol under another ABI. + var reqABI, haveABI obj.ABI + haveABI = ^obj.ABI(0) + reqABI, ok := sym.VersionToABI(ldr.SymVersion(rs)) + if ok { + for abi := obj.ABI(0); abi < obj.ABICount; abi++ { + v := sym.ABIToVersion(abi) + if v == -1 { + continue + } + if rs1 := ldr.Lookup(name, v); rs1 != 0 && ldr.SymType(rs1) != sym.Sxxx && ldr.SymType(rs1) != sym.SXREF { + haveABI = abi + } + } + } + + // Give a special error message for main symbol (see #24809). + if name == "main.main" { + reporter.Errorf(s, "function main is undeclared in the main package") + } else if haveABI != ^obj.ABI(0) { + reporter.Errorf(s, "relocation target %s not defined for %s (but is defined for %s)", name, reqABI, haveABI) + } else { + reporter.Errorf(s, "relocation target %s not defined", name) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive.go new file mode 100644 index 0000000000000000000000000000000000000000..7a19567a6c956c5899c0300010d9845f9aa56c90 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !wasm && !windows + +package ld + +import ( + "os" + "os/exec" + "path/filepath" + "syscall" +) + +const syscallExecSupported = true + +// execArchive invokes the archiver tool with syscall.Exec(), with +// the expectation that this is the last thing that takes place +// in the linking operation. +func (ctxt *Link) execArchive(argv []string) { + var err error + argv0 := argv[0] + if filepath.Base(argv0) == argv0 { + argv0, err = exec.LookPath(argv0) + if err != nil { + Exitf("cannot find %s: %v", argv[0], err) + } + } + if ctxt.Debugvlog != 0 { + ctxt.Logf("invoking archiver with syscall.Exec()\n") + } + err = syscall.Exec(argv0, argv, os.Environ()) + if err != nil { + Exitf("running %s failed: %v", argv[0], err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive_noexec.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive_noexec.go new file mode 100644 index 0000000000000000000000000000000000000000..ada3e9f74aa80ae5d663e747a81f0f70187eba06 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/execarchive_noexec.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm || windows + +package ld + +const syscallExecSupported = false + +func (ctxt *Link) execArchive(argv []string) { + panic("should never arrive here") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/fallocate_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/fallocate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d95fec788a616bf18d6ff700f622883e34717afc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/fallocate_test.go @@ -0,0 +1,65 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || (freebsd && go1.21) || linux + +package ld + +import ( + "errors" + "os" + "path/filepath" + "syscall" + "testing" +) + +func TestFallocate(t *testing.T) { + dir := t.TempDir() + filename := filepath.Join(dir, "a.out") + out := NewOutBuf(nil) + err := out.Open(filename) + if err != nil { + t.Fatalf("Open file failed: %v", err) + } + defer out.Close() + + // Try fallocate first. + for { + err = out.fallocate(1 << 10) + if errors.Is(err, errors.ErrUnsupported) || err == errNoFallocate { // The underlying file system may not support fallocate + t.Skip("fallocate is not supported") + } + if err == syscall.EINTR { + continue // try again + } + if err != nil { + t.Fatalf("fallocate failed: %v", err) + } + break + } + + // Mmap 1 MiB initially, and grow to 2 and 3 MiB. + // Check if the file size and disk usage is expected. + for _, sz := range []int64{1 << 20, 2 << 20, 3 << 20} { + err = out.Mmap(uint64(sz)) + if err != nil { + t.Fatalf("Mmap failed: %v", err) + } + stat, err := os.Stat(filename) + if err != nil { + t.Fatalf("Stat failed: %v", err) + } + if got := stat.Size(); got != sz { + t.Errorf("unexpected file size: got %d, want %d", got, sz) + } + // The number of blocks must be enough for the requested size. + // We used to require an exact match, but it appears that + // some file systems allocate a few extra blocks in some cases. + // See issue #41127. + if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { + t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + } + out.munmap() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go.go new file mode 100644 index 0000000000000000000000000000000000000000..a2db0bf6559198d324d6a3ffe9450f24f380e8b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go.go @@ -0,0 +1,450 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go-specific code shared across loaders (5l, 6l, 8l). + +package ld + +import ( + "cmd/internal/bio" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "encoding/json" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" +) + +// go-specific code shared across loaders (5l, 6l, 8l). + +// TODO: +// generate debugging section in binary. +// once the dust settles, try to move some code to +// libmach, so that other linkers and ar can share. + +func ldpkg(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, filename string) { + if *flagG { + return + } + + if int64(int(length)) != length { + fmt.Fprintf(os.Stderr, "%s: too much pkg data in %s\n", os.Args[0], filename) + return + } + + bdata := make([]byte, length) + if _, err := io.ReadFull(f, bdata); err != nil { + fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename) + return + } + data := string(bdata) + + // process header lines + for data != "" { + var line string + line, data, _ = strings.Cut(data, "\n") + if line == "main" { + lib.Main = true + } + if line == "" { + break + } + } + + // look for cgo section + p0 := strings.Index(data, "\n$$ // cgo") + var p1 int + if p0 >= 0 { + p0 += p1 + i := strings.IndexByte(data[p0+1:], '\n') + if i < 0 { + fmt.Fprintf(os.Stderr, "%s: found $$ // cgo but no newline in %s\n", os.Args[0], filename) + return + } + p0 += 1 + i + + p1 = strings.Index(data[p0:], "\n$$") + if p1 < 0 { + p1 = strings.Index(data[p0:], "\n!\n") + } + if p1 < 0 { + fmt.Fprintf(os.Stderr, "%s: cannot find end of // cgo section in %s\n", os.Args[0], filename) + return + } + p1 += p0 + loadcgo(ctxt, filename, objabi.PathToPrefix(lib.Pkg), data[p0:p1]) + } +} + +func loadcgo(ctxt *Link, file string, pkg string, p string) { + var directives [][]string + if err := json.NewDecoder(strings.NewReader(p)).Decode(&directives); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s: failed decoding cgo directives: %v\n", os.Args[0], file, err) + nerrors++ + return + } + + // Record the directives. We'll process them later after Symbols are created. + ctxt.cgodata = append(ctxt.cgodata, cgodata{file, pkg, directives}) +} + +// Set symbol attributes or flags based on cgo directives. +// Any newly discovered HOSTOBJ syms are added to 'hostObjSyms'. +func setCgoAttr(ctxt *Link, file string, pkg string, directives [][]string, hostObjSyms map[loader.Sym]struct{}) { + l := ctxt.loader + for _, f := range directives { + switch f[0] { + case "cgo_import_dynamic": + if len(f) < 2 || len(f) > 4 { + break + } + + local := f[1] + remote := local + if len(f) > 2 { + remote = f[2] + } + lib := "" + if len(f) > 3 { + lib = f[3] + } + + if *FlagD { + fmt.Fprintf(os.Stderr, "%s: %s: cannot use dynamic imports with -d flag\n", os.Args[0], file) + nerrors++ + return + } + + if local == "_" && remote == "_" { + // allow #pragma dynimport _ _ "foo.so" + // to force a link of foo.so. + havedynamic = 1 + + if ctxt.HeadType == objabi.Hdarwin { + machoadddynlib(lib, ctxt.LinkMode) + } else { + dynlib = append(dynlib, lib) + } + continue + } + + q := "" + if before, after, found := strings.Cut(remote, "#"); found { + remote, q = before, after + } + s := l.LookupOrCreateSym(local, 0) + st := l.SymType(s) + if st == 0 || st == sym.SXREF || st == sym.SBSS || st == sym.SNOPTRBSS || st == sym.SHOSTOBJ { + l.SetSymDynimplib(s, lib) + l.SetSymExtname(s, remote) + l.SetSymDynimpvers(s, q) + if st != sym.SHOSTOBJ { + su := l.MakeSymbolUpdater(s) + su.SetType(sym.SDYNIMPORT) + } else { + hostObjSyms[s] = struct{}{} + } + havedynamic = 1 + if lib != "" && ctxt.IsDarwin() { + machoadddynlib(lib, ctxt.LinkMode) + } + } + + continue + + case "cgo_import_static": + if len(f) != 2 { + break + } + local := f[1] + + s := l.LookupOrCreateSym(local, 0) + su := l.MakeSymbolUpdater(s) + su.SetType(sym.SHOSTOBJ) + su.SetSize(0) + hostObjSyms[s] = struct{}{} + continue + + case "cgo_export_static", "cgo_export_dynamic": + if len(f) < 2 || len(f) > 4 { + break + } + local := f[1] + remote := local + if len(f) > 2 { + remote = f[2] + } + // The compiler adds a fourth argument giving + // the definition ABI of function symbols. + abi := obj.ABI0 + if len(f) > 3 { + var ok bool + abi, ok = obj.ParseABI(f[3]) + if !ok { + fmt.Fprintf(os.Stderr, "%s: bad ABI in cgo_export directive %s\n", os.Args[0], f) + nerrors++ + return + } + } + + s := l.LookupOrCreateSym(local, sym.ABIToVersion(abi)) + + if l.SymType(s) == sym.SHOSTOBJ { + hostObjSyms[s] = struct{}{} + } + + switch ctxt.BuildMode { + case BuildModeCShared, BuildModeCArchive, BuildModePlugin: + if s == l.Lookup("main", 0) { + continue + } + } + + // export overrides import, for openbsd/cgo. + // see issue 4878. + if l.SymDynimplib(s) != "" { + l.SetSymDynimplib(s, "") + l.SetSymDynimpvers(s, "") + l.SetSymExtname(s, "") + var su *loader.SymbolBuilder + su = l.MakeSymbolUpdater(s) + su.SetType(0) + } + + if !(l.AttrCgoExportStatic(s) || l.AttrCgoExportDynamic(s)) { + l.SetSymExtname(s, remote) + } else if l.SymExtname(s) != remote { + fmt.Fprintf(os.Stderr, "%s: conflicting cgo_export directives: %s as %s and %s\n", os.Args[0], l.SymName(s), l.SymExtname(s), remote) + nerrors++ + return + } + + // Mark exported symbols and also add them to + // the lists used for roots in the deadcode pass. + if f[0] == "cgo_export_static" { + if ctxt.LinkMode == LinkExternal && !l.AttrCgoExportStatic(s) { + // Static cgo exports appear + // in the exported symbol table. + ctxt.dynexp = append(ctxt.dynexp, s) + } + if ctxt.LinkMode == LinkInternal { + // For internal linking, we're + // responsible for resolving + // relocations from host objects. + // Record the right Go symbol + // version to use. + l.AddCgoExport(s) + } + l.SetAttrCgoExportStatic(s, true) + } else { + if ctxt.LinkMode == LinkInternal && !l.AttrCgoExportDynamic(s) { + // Dynamic cgo exports appear + // in the exported symbol table. + ctxt.dynexp = append(ctxt.dynexp, s) + } + l.SetAttrCgoExportDynamic(s, true) + } + + continue + + case "cgo_dynamic_linker": + if len(f) != 2 { + break + } + + if *flagInterpreter == "" { + if interpreter != "" && interpreter != f[1] { + fmt.Fprintf(os.Stderr, "%s: conflict dynlinker: %s and %s\n", os.Args[0], interpreter, f[1]) + nerrors++ + return + } + + interpreter = f[1] + } + continue + + case "cgo_ldflag": + if len(f) != 2 { + break + } + ldflag = append(ldflag, f[1]) + continue + } + + fmt.Fprintf(os.Stderr, "%s: %s: invalid cgo directive: %q\n", os.Args[0], file, f) + nerrors++ + } + return +} + +// openbsdTrimLibVersion indicates whether a shared library is +// versioned and if it is, returns the unversioned name. The +// OpenBSD library naming scheme is lib.so.. +func openbsdTrimLibVersion(lib string) (string, bool) { + parts := strings.Split(lib, ".") + if len(parts) != 4 { + return "", false + } + if parts[1] != "so" { + return "", false + } + if _, err := strconv.Atoi(parts[2]); err != nil { + return "", false + } + if _, err := strconv.Atoi(parts[3]); err != nil { + return "", false + } + return fmt.Sprintf("%s.%s", parts[0], parts[1]), true +} + +// dedupLibrariesOpenBSD dedups a list of shared libraries, treating versioned +// and unversioned libraries as equivalents. Versioned libraries are preferred +// and retained over unversioned libraries. This avoids the situation where +// the use of cgo results in a DT_NEEDED for a versioned library (for example, +// libc.so.96.1), while a dynamic import specifies an unversioned library (for +// example, libc.so) - this would otherwise result in two DT_NEEDED entries +// for the same library, resulting in a failure when ld.so attempts to load +// the Go binary. +func dedupLibrariesOpenBSD(ctxt *Link, libs []string) []string { + libraries := make(map[string]string) + for _, lib := range libs { + if name, ok := openbsdTrimLibVersion(lib); ok { + // Record unversioned name as seen. + seenlib[name] = true + libraries[name] = lib + } else if _, ok := libraries[lib]; !ok { + libraries[lib] = lib + } + } + + libs = nil + for _, lib := range libraries { + libs = append(libs, lib) + } + sort.Strings(libs) + + return libs +} + +func dedupLibraries(ctxt *Link, libs []string) []string { + if ctxt.Target.IsOpenbsd() { + return dedupLibrariesOpenBSD(ctxt, libs) + } + return libs +} + +var seenlib = make(map[string]bool) + +func adddynlib(ctxt *Link, lib string) { + if seenlib[lib] || ctxt.LinkMode == LinkExternal { + return + } + seenlib[lib] = true + + if ctxt.IsELF { + dsu := ctxt.loader.MakeSymbolUpdater(ctxt.DynStr) + if dsu.Size() == 0 { + dsu.Addstring("") + } + du := ctxt.loader.MakeSymbolUpdater(ctxt.Dynamic) + Elfwritedynent(ctxt.Arch, du, elf.DT_NEEDED, uint64(dsu.Addstring(lib))) + } else { + Errorf(nil, "adddynlib: unsupported binary format") + } +} + +func Adddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.Sym) { + if ldr.SymDynid(s) >= 0 || target.LinkMode == LinkExternal { + return + } + + if target.IsELF { + elfadddynsym(ldr, target, syms, s) + } else if target.HeadType == objabi.Hdarwin { + ldr.Errorf(s, "adddynsym: missed symbol (Extname=%s)", ldr.SymExtname(s)) + } else if target.HeadType == objabi.Hwindows { + // already taken care of + } else { + ldr.Errorf(s, "adddynsym: unsupported binary format") + } +} + +func fieldtrack(arch *sys.Arch, l *loader.Loader) { + var buf strings.Builder + for i := loader.Sym(1); i < loader.Sym(l.NSym()); i++ { + if name := l.SymName(i); strings.HasPrefix(name, "go:track.") { + if l.AttrReachable(i) { + l.SetAttrSpecial(i, true) + l.SetAttrNotInSymbolTable(i, true) + buf.WriteString(name[9:]) + for p := l.Reachparent[i]; p != 0; p = l.Reachparent[p] { + buf.WriteString("\t") + buf.WriteString(l.SymName(p)) + } + buf.WriteString("\n") + } + } + } + l.Reachparent = nil // we are done with it + if *flagFieldTrack == "" { + return + } + s := l.Lookup(*flagFieldTrack, 0) + if s == 0 || !l.AttrReachable(s) { + return + } + bld := l.MakeSymbolUpdater(s) + bld.SetType(sym.SDATA) + addstrdata(arch, l, *flagFieldTrack, buf.String()) +} + +func (ctxt *Link) addexport() { + // Track undefined external symbols during external link. + if ctxt.LinkMode == LinkExternal { + for _, s := range ctxt.Textp { + if ctxt.loader.AttrSpecial(s) || ctxt.loader.AttrSubSymbol(s) { + continue + } + relocs := ctxt.loader.Relocs(s) + for i := 0; i < relocs.Count(); i++ { + if rs := relocs.At(i).Sym(); rs != 0 { + if ctxt.loader.SymType(rs) == sym.Sxxx && !ctxt.loader.AttrLocal(rs) { + // sanity check + if len(ctxt.loader.Data(rs)) != 0 { + panic("expected no data on undef symbol") + } + su := ctxt.loader.MakeSymbolUpdater(rs) + su.SetType(sym.SUNDEFEXT) + } + } + } + } + } + + // TODO(aix) + if ctxt.HeadType == objabi.Hdarwin || ctxt.HeadType == objabi.Haix { + return + } + + // Add dynamic symbols. + for _, s := range ctxt.dynexp { + // Consistency check. + if !ctxt.loader.AttrReachable(s) { + panic("dynexp entry not reachable") + } + + Adddynsym(ctxt.loader, &ctxt.Target, &ctxt.ArchSyms, s) + } + + for _, lib := range dedupLibraries(ctxt, dynlib) { + adddynlib(ctxt, lib) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go_test.go new file mode 100644 index 0000000000000000000000000000000000000000..836731a891e193dcd78389502fd799ce46e7fe52 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/go_test.go @@ -0,0 +1,116 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "internal/testenv" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "cmd/internal/objabi" +) + +func TestDedupLibraries(t *testing.T) { + ctxt := &Link{} + ctxt.Target.HeadType = objabi.Hlinux + + libs := []string{"libc.so", "libc.so.6"} + + got := dedupLibraries(ctxt, libs) + if !reflect.DeepEqual(got, libs) { + t.Errorf("dedupLibraries(%v) = %v, want %v", libs, got, libs) + } +} + +func TestDedupLibrariesOpenBSD(t *testing.T) { + ctxt := &Link{} + ctxt.Target.HeadType = objabi.Hopenbsd + + tests := []struct { + libs []string + want []string + }{ + { + libs: []string{"libc.so"}, + want: []string{"libc.so"}, + }, + { + libs: []string{"libc.so", "libc.so.96.1"}, + want: []string{"libc.so.96.1"}, + }, + { + libs: []string{"libc.so.96.1", "libc.so"}, + want: []string{"libc.so.96.1"}, + }, + { + libs: []string{"libc.a", "libc.so.96.1"}, + want: []string{"libc.a", "libc.so.96.1"}, + }, + { + libs: []string{"libpthread.so", "libc.so"}, + want: []string{"libc.so", "libpthread.so"}, + }, + { + libs: []string{"libpthread.so.26.1", "libpthread.so", "libc.so.96.1", "libc.so"}, + want: []string{"libc.so.96.1", "libpthread.so.26.1"}, + }, + { + libs: []string{"libpthread.so.26.1", "libpthread.so", "libc.so.96.1", "libc.so", "libfoo.so"}, + want: []string{"libc.so.96.1", "libfoo.so", "libpthread.so.26.1"}, + }, + } + + for _, test := range tests { + t.Run("dedup", func(t *testing.T) { + got := dedupLibraries(ctxt, test.libs) + if !reflect.DeepEqual(got, test.want) { + t.Errorf("dedupLibraries(%v) = %v, want %v", test.libs, got, test.want) + } + }) + } +} + +func TestDedupLibrariesOpenBSDLink(t *testing.T) { + // The behavior we're checking for is of interest only on OpenBSD. + if runtime.GOOS != "openbsd" { + t.Skip("test only useful on openbsd") + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + dir := t.TempDir() + + // cgo_import_dynamic both the unversioned libraries and pull in the + // net package to get a cgo package with a versioned library. + srcFile := filepath.Join(dir, "x.go") + src := `package main + +import ( + _ "net" +) + +//go:cgo_import_dynamic _ _ "libc.so" + +func main() {}` + if err := os.WriteFile(srcFile, []byte(src), 0644); err != nil { + t.Fatal(err) + } + + exe := filepath.Join(dir, "deduped.exe") + out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, srcFile).CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + // Result should be runnable. + if _, err = testenv.Command(t, exe).CombinedOutput(); err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap.go new file mode 100644 index 0000000000000000000000000000000000000000..286a61b78f2f9f6cbcad7b80282d2d30f86549ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap.go @@ -0,0 +1,101 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import "cmd/link/internal/loader" + +// Min-heap implementation, for the deadcode pass. +// Specialized for loader.Sym elements. + +type heap []loader.Sym + +func (h *heap) push(s loader.Sym) { + *h = append(*h, s) + // sift up + n := len(*h) - 1 + for n > 0 { + p := (n - 1) / 2 // parent + if (*h)[p] <= (*h)[n] { + break + } + (*h)[n], (*h)[p] = (*h)[p], (*h)[n] + n = p + } +} + +func (h *heap) pop() loader.Sym { + r := (*h)[0] + n := len(*h) - 1 + (*h)[0] = (*h)[n] + *h = (*h)[:n] + + // sift down + i := 0 + for { + c := 2*i + 1 // left child + if c >= n { + break + } + if c1 := c + 1; c1 < n && (*h)[c1] < (*h)[c] { + c = c1 // right child + } + if (*h)[i] <= (*h)[c] { + break + } + (*h)[i], (*h)[c] = (*h)[c], (*h)[i] + i = c + } + + return r +} + +func (h *heap) empty() bool { return len(*h) == 0 } + +// Same as heap, but sorts alphabetically instead of by index. +// (Note that performance is not so critical here, as it is +// in the case above. Some simplification might be in order.) +type lexHeap []loader.Sym + +func (h *lexHeap) push(ldr *loader.Loader, s loader.Sym) { + *h = append(*h, s) + // sift up + n := len(*h) - 1 + for n > 0 { + p := (n - 1) / 2 // parent + if ldr.SymName((*h)[p]) <= ldr.SymName((*h)[n]) { + break + } + (*h)[n], (*h)[p] = (*h)[p], (*h)[n] + n = p + } +} + +func (h *lexHeap) pop(ldr *loader.Loader) loader.Sym { + r := (*h)[0] + n := len(*h) - 1 + (*h)[0] = (*h)[n] + *h = (*h)[:n] + + // sift down + i := 0 + for { + c := 2*i + 1 // left child + if c >= n { + break + } + if c1 := c + 1; c1 < n && ldr.SymName((*h)[c1]) < ldr.SymName((*h)[c]) { + c = c1 // right child + } + if ldr.SymName((*h)[i]) <= ldr.SymName((*h)[c]) { + break + } + (*h)[i], (*h)[c] = (*h)[c], (*h)[i] + i = c + } + + return r +} + +func (h *lexHeap) empty() bool { return len(*h) == 0 } diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..08c90301e2462413727b45fccf4f6537fc3b8fce --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/heap_test.go @@ -0,0 +1,90 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/link/internal/loader" + "testing" +) + +func TestHeap(t *testing.T) { + tests := [][]loader.Sym{ + {10, 20, 30, 40, 50, 60, 70, 80, 90, 100}, + {100, 90, 80, 70, 60, 50, 40, 30, 20, 10}, + {30, 50, 80, 20, 60, 70, 10, 100, 90, 40}, + } + for _, s := range tests { + h := heap{} + for _, i := range s { + h.push(i) + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + } + for j := 0; j < len(s); j++ { + x := h.pop() + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + // pop should return elements in ascending order. + if want := loader.Sym((j + 1) * 10); x != want { + t.Errorf("pop returns wrong element: want %d, got %d", want, x) + } + } + if !h.empty() { + t.Errorf("heap is not empty after all pops") + } + } + + // Also check that mixed pushes and pops work correctly. + for _, s := range tests { + h := heap{} + for i := 0; i < len(s)/2; i++ { + // two pushes, one pop + h.push(s[2*i]) + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + h.push(s[2*i+1]) + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + h.pop() + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + } + for !h.empty() { // pop remaining elements + h.pop() + if !verify(&h, 0) { + t.Errorf("heap invariant violated: %v", h) + } + } + } +} + +// recursively verify heap-ness, starting at element i. +func verify(h *heap, i int) bool { + n := len(*h) + c1 := 2*i + 1 // left child + c2 := 2*i + 2 // right child + if c1 < n { + if (*h)[c1] < (*h)[i] { + return false + } + if !verify(h, c1) { + return false + } + } + if c2 < n { + if (*h)[c2] < (*h)[i] { + return false + } + if !verify(h, c2) { + return false + } + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/inittask.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/inittask.go new file mode 100644 index 0000000000000000000000000000000000000000..c4c5beb55ed99e4191888217d11fcd658b673091 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/inittask.go @@ -0,0 +1,189 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "sort" +) + +// Inittasks finds inittask records, figures out a good +// order to execute them in, and emits that order for the +// runtime to use. +// +// An inittask represents the initialization code that needs +// to be run for a package. For package p, the p..inittask +// symbol contains a list of init functions to run, both +// explicit user init functions and implicit compiler-generated +// init functions for initializing global variables like maps. +// +// In addition, inittask records have dependencies between each +// other, mirroring the import dependencies. So if package p +// imports package q, then there will be a dependency p -> q. +// We can't initialize package p until after package q has +// already been initialized. +// +// Package dependencies are encoded with relocations. If package +// p imports package q, then package p's inittask record will +// have a R_INITORDER relocation pointing to package q's inittask +// record. See cmd/compile/internal/pkginit/init.go. +// +// This function computes an ordering of all of the inittask +// records so that the order respects all the dependencies, +// and given that restriction, orders the inittasks in +// lexicographic order. +func (ctxt *Link) inittasks() { + switch ctxt.BuildMode { + case BuildModeExe, BuildModePIE, BuildModeCArchive, BuildModeCShared: + // Normally the inittask list will be run on program startup. + ctxt.mainInittasks = ctxt.inittaskSym([]string{"main..inittask"}, "go:main.inittasks") + case BuildModePlugin: + // For plugins, the list will be run on plugin load. + ctxt.mainInittasks = ctxt.inittaskSym([]string{fmt.Sprintf("%s..inittask", objabi.PathToPrefix(*flagPluginPath))}, "go:plugin.inittasks") + // Make symbol local so multiple plugins don't clobber each other's inittask list. + ctxt.loader.SetAttrLocal(ctxt.mainInittasks, true) + case BuildModeShared: + // For a shared library, all packages are roots. + var roots []string + for _, lib := range ctxt.Library { + roots = append(roots, fmt.Sprintf("%s..inittask", objabi.PathToPrefix(lib.Pkg))) + } + ctxt.mainInittasks = ctxt.inittaskSym(roots, "go:shlib.inittasks") + // Make symbol local so multiple plugins don't clobber each other's inittask list. + ctxt.loader.SetAttrLocal(ctxt.mainInittasks, true) + default: + Exitf("unhandled build mode %d", ctxt.BuildMode) + } + + // If the runtime is one of the packages we are building, + // initialize the runtime_inittasks variable. + ldr := ctxt.loader + if ldr.Lookup("runtime.runtime_inittasks", 0) != 0 { + t := ctxt.inittaskSym([]string{"runtime..inittask"}, "go:runtime.inittasks") + + // This slice header is already defined in runtime/proc.go, so we update it here with new contents. + sh := ldr.Lookup("runtime.runtime_inittasks", 0) + sb := ldr.MakeSymbolUpdater(sh) + sb.SetSize(0) + sb.SetType(sym.SNOPTRDATA) // Could be SRODATA, but see issue 58857. + sb.AddAddr(ctxt.Arch, t) + sb.AddUint(ctxt.Arch, uint64(ldr.SymSize(t)/int64(ctxt.Arch.PtrSize))) + sb.AddUint(ctxt.Arch, uint64(ldr.SymSize(t)/int64(ctxt.Arch.PtrSize))) + } +} + +// inittaskSym builds a symbol containing pointers to all the inittasks +// that need to be run, given a list of root inittask symbols. +func (ctxt *Link) inittaskSym(rootNames []string, symName string) loader.Sym { + ldr := ctxt.loader + var roots []loader.Sym + for _, n := range rootNames { + p := ldr.Lookup(n, 0) + if p != 0 { + roots = append(roots, p) + } + } + if len(roots) == 0 { + // Nothing to do + return 0 + } + + // Edges record dependencies between packages. + // {from,to} is in edges if from's package imports to's package. + // This list is used to implement reverse edge lookups. + type edge struct { + from, to loader.Sym + } + var edges []edge + + // List of packages that are ready to schedule. We use a lexicographic + // ordered heap to pick the lexically earliest uninitialized but + // inititalizeable package at each step. + var h lexHeap + + // m maps from an inittask symbol for package p to the number of + // p's direct imports that have not yet been scheduled. + m := map[loader.Sym]int{} + + // Find all reachable inittask records from the roots. + // Keep track of the dependency edges between them in edges. + // Keep track of how many imports each package has in m. + // q is the list of found but not yet explored packages. + var q []loader.Sym + for _, p := range roots { + m[p] = 0 + q = append(q, p) + } + for len(q) > 0 { + x := q[len(q)-1] + q = q[:len(q)-1] + relocs := ldr.Relocs(x) + n := relocs.Count() + ndeps := 0 + for i := 0; i < n; i++ { + r := relocs.At(i) + if r.Type() != objabi.R_INITORDER { + continue + } + ndeps++ + s := r.Sym() + edges = append(edges, edge{from: x, to: s}) + if _, ok := m[s]; ok { + continue // already found + } + q = append(q, s) + m[s] = 0 // mark as found + } + m[x] = ndeps + if ndeps == 0 { + h.push(ldr, x) + } + } + + // Sort edges so we can look them up by edge destination. + sort.Slice(edges, func(i, j int) bool { + return edges[i].to < edges[j].to + }) + + // Figure out the schedule. + sched := ldr.MakeSymbolBuilder(symName) + sched.SetType(sym.SNOPTRDATA) // Could be SRODATA, but see isue 58857. + for !h.empty() { + // Pick the lexicographically first initializable package. + s := h.pop(ldr) + + // Add s to the schedule. + if ldr.SymSize(s) > 8 { + // Note: don't add s if it has no functions to run. We need + // s during linking to compute an ordering, but the runtime + // doesn't need to know about it. About 1/2 of stdlib packages + // fit in this bucket. + sched.AddAddr(ctxt.Arch, s) + } + + // Find all incoming edges into s. + a := sort.Search(len(edges), func(i int) bool { return edges[i].to >= s }) + b := sort.Search(len(edges), func(i int) bool { return edges[i].to > s }) + + // Decrement the import count for all packages that import s. + // If the count reaches 0, that package is now ready to schedule. + for _, e := range edges[a:b] { + m[e.from]-- + if m[e.from] == 0 { + h.push(ldr, e.from) + } + } + } + + for s, n := range m { + if n != 0 { + Exitf("inittask for %s is not schedulable %d", ldr.SymName(s), n) + } + } + return sched.Sym() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/issue33808_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/issue33808_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43f4540a022aee1594143b88d378af526a791499 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/issue33808_test.go @@ -0,0 +1,49 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "internal/testenv" + "runtime" + "strings" + "testing" +) + +const prog = ` +package main + +import "log" + +func main() { + log.Fatalf("HERE") +} +` + +func TestIssue33808(t *testing.T) { + if runtime.GOOS != "darwin" { + return + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + dir := t.TempDir() + + f := gobuild(t, dir, prog, "-ldflags=-linkmode=external") + f.Close() + + syms, err := f.Symbols() + if err != nil { + t.Fatalf("Error reading symbols: %v", err) + } + + name := "log.Fatalf" + for _, sym := range syms { + if strings.Contains(sym.Name, name) { + return + } + } + t.Fatalf("Didn't find %v", name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld.go new file mode 100644 index 0000000000000000000000000000000000000000..77fde0b41d0ef105bacc504d796e6d202c4bd095 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld.go @@ -0,0 +1,254 @@ +// Derived from Inferno utils/6l/obj.c and utils/6l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "log" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "cmd/internal/goobj" + "cmd/link/internal/loader" + "cmd/link/internal/sym" +) + +func (ctxt *Link) readImportCfg(file string) { + ctxt.PackageFile = make(map[string]string) + ctxt.PackageShlib = make(map[string]string) + data, err := os.ReadFile(file) + if err != nil { + log.Fatalf("-importcfg: %v", err) + } + + for lineNum, line := range strings.Split(string(data), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" { + continue + } + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + verb, args, found := strings.Cut(line, " ") + if found { + args = strings.TrimSpace(args) + } + before, after, exist := strings.Cut(args, "=") + if !exist { + before = "" + } + switch verb { + default: + log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb) + case "packagefile": + if before == "" || after == "" { + log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum) + } + ctxt.PackageFile[before] = after + case "packageshlib": + if before == "" || after == "" { + log.Fatalf(`%s:%d: invalid packageshlib: syntax is "packageshlib path=filename"`, file, lineNum) + } + ctxt.PackageShlib[before] = after + case "modinfo": + s, err := strconv.Unquote(args) + if err != nil { + log.Fatalf("%s:%d: invalid modinfo: %v", file, lineNum, err) + } + addstrdata1(ctxt, "runtime.modinfo="+s) + } + } +} + +func pkgname(ctxt *Link, lib string) string { + return path.Clean(lib) +} + +func findlib(ctxt *Link, lib string) (string, bool) { + name := path.Clean(lib) + + var pname string + isshlib := false + + if ctxt.linkShared && ctxt.PackageShlib[name] != "" { + pname = ctxt.PackageShlib[name] + isshlib = true + } else if ctxt.PackageFile != nil { + pname = ctxt.PackageFile[name] + if pname == "" { + ctxt.Logf("cannot find package %s (using -importcfg)\n", name) + return "", false + } + } else { + pkg := pkgname(ctxt, lib) + + // search -L "libdir" directories + for _, dir := range ctxt.Libdir { + if ctxt.linkShared { + pname = filepath.Join(dir, pkg+".shlibname") + if _, err := os.Stat(pname); err == nil { + isshlib = true + break + } + } + pname = filepath.Join(dir, name+".a") + if _, err := os.Stat(pname); err == nil { + break + } + pname = filepath.Join(dir, name+".o") + if _, err := os.Stat(pname); err == nil { + break + } + } + pname = filepath.Clean(pname) + } + + return pname, isshlib +} + +func addlib(ctxt *Link, src, obj, lib string, fingerprint goobj.FingerprintType) *sym.Library { + pkg := pkgname(ctxt, lib) + + // already loaded? + if l := ctxt.LibraryByPkg[pkg]; l != nil && !l.Fingerprint.IsZero() { + // Normally, packages are loaded in dependency order, and if l != nil + // l is already loaded with the actual fingerprint. In shared build mode, + // however, packages may be added not in dependency order, and it is + // possible that l's fingerprint is not yet loaded -- exclude it in + // checking. + checkFingerprint(l, l.Fingerprint, src, fingerprint) + return l + } + + pname, isshlib := findlib(ctxt, lib) + + if ctxt.Debugvlog > 1 { + ctxt.Logf("addlib: %s %s pulls in %s isshlib %v\n", obj, src, pname, isshlib) + } + + if isshlib { + return addlibpath(ctxt, src, obj, "", pkg, pname, fingerprint) + } + return addlibpath(ctxt, src, obj, pname, pkg, "", fingerprint) +} + +/* + * add library to library list, return added library. + * srcref: src file referring to package + * objref: object file referring to package + * file: object file, e.g., /home/rsc/go/pkg/container/vector.a + * pkg: package import path, e.g. container/vector + * shlib: path to shared library, or .shlibname file holding path + * fingerprint: if not 0, expected fingerprint for import from srcref + * fingerprint is 0 if the library is not imported (e.g. main) + */ +func addlibpath(ctxt *Link, srcref, objref, file, pkg, shlib string, fingerprint goobj.FingerprintType) *sym.Library { + if l := ctxt.LibraryByPkg[pkg]; l != nil { + return l + } + + if ctxt.Debugvlog > 1 { + ctxt.Logf("addlibpath: srcref: %s objref: %s file: %s pkg: %s shlib: %s fingerprint: %x\n", srcref, objref, file, pkg, shlib, fingerprint) + } + + l := &sym.Library{} + ctxt.LibraryByPkg[pkg] = l + ctxt.Library = append(ctxt.Library, l) + l.Objref = objref + l.Srcref = srcref + l.File = file + l.Pkg = pkg + l.Fingerprint = fingerprint + if shlib != "" { + if strings.HasSuffix(shlib, ".shlibname") { + data, err := os.ReadFile(shlib) + if err != nil { + Errorf(nil, "cannot read %s: %v", shlib, err) + } + shlib = strings.TrimSpace(string(data)) + } + l.Shlib = shlib + } + return l +} + +func atolwhex(s string) int64 { + n, _ := strconv.ParseInt(s, 0, 64) + return n +} + +// PrepareAddmoduledata returns a symbol builder that target-specific +// code can use to build up the linker-generated go.link.addmoduledata +// function, along with the sym for runtime.addmoduledata itself. If +// this function is not needed (for example in cases where we're +// linking a module that contains the runtime) the returned builder +// will be nil. +func PrepareAddmoduledata(ctxt *Link) (*loader.SymbolBuilder, loader.Sym) { + if !ctxt.DynlinkingGo() { + return nil, 0 + } + amd := ctxt.loader.LookupOrCreateSym("runtime.addmoduledata", 0) + if ctxt.loader.SymType(amd) == sym.STEXT && ctxt.BuildMode != BuildModePlugin { + // we're linking a module containing the runtime -> no need for + // an init function + return nil, 0 + } + ctxt.loader.SetAttrReachable(amd, true) + + // Create a new init func text symbol. Caller will populate this + // sym with arch-specific content. + ifs := ctxt.loader.LookupOrCreateSym("go:link.addmoduledata", 0) + initfunc := ctxt.loader.MakeSymbolUpdater(ifs) + ctxt.loader.SetAttrReachable(ifs, true) + ctxt.loader.SetAttrLocal(ifs, true) + initfunc.SetType(sym.STEXT) + + // Add the init func and/or addmoduledata to Textp. + if ctxt.BuildMode == BuildModePlugin { + ctxt.Textp = append(ctxt.Textp, amd) + } + ctxt.Textp = append(ctxt.Textp, initfunc.Sym()) + + // Create an init array entry + amdi := ctxt.loader.LookupOrCreateSym("go:link.addmoduledatainit", 0) + initarray_entry := ctxt.loader.MakeSymbolUpdater(amdi) + ctxt.loader.SetAttrReachable(amdi, true) + ctxt.loader.SetAttrLocal(amdi, true) + initarray_entry.SetType(sym.SINITARR) + initarray_entry.AddAddr(ctxt.Arch, ifs) + + return initfunc, amd +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1767667759ec948734543449608c8d7870716c66 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/ld_test.go @@ -0,0 +1,414 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "debug/pe" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +func TestUndefinedRelocErrors(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // When external linking, symbols may be defined externally, so we allow + // undefined symbols and let external linker resolve. Skip the test. + testenv.MustInternalLink(t, false) + + t.Parallel() + + out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "./testdata/issue10978").CombinedOutput() + if err == nil { + t.Fatal("expected build to fail") + } + + wantErrors := map[string]int{ + // Main function has dedicated error message. + "function main is undeclared in the main package": 1, + + // Single error reporting per each symbol. + // This way, duplicated messages are not reported for + // multiple relocations with a same name. + "main.defined1: relocation target main.undefined not defined": 1, + "main.defined2: relocation target main.undefined not defined": 1, + } + unexpectedErrors := map[string]int{} + + for _, l := range strings.Split(string(out), "\n") { + if strings.HasPrefix(l, "#") || l == "" { + continue + } + matched := "" + for want := range wantErrors { + if strings.Contains(l, want) { + matched = want + break + } + } + if matched != "" { + wantErrors[matched]-- + } else { + unexpectedErrors[l]++ + } + } + + for want, n := range wantErrors { + switch { + case n > 0: + t.Errorf("unmatched error: %s (x%d)", want, n) + case n < 0: + if runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58807) + } + t.Errorf("extra errors: %s (x%d)", want, -n) + } + } + for unexpected, n := range unexpectedErrors { + t.Errorf("unexpected error: %s (x%d)", unexpected, n) + } +} + +const carchiveSrcText = ` +package main + +//export GoFunc +func GoFunc() { + println(42) +} + +func main() { +} +` + +func TestArchiveBuildInvokeWithExec(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + // run this test on just a small set of platforms (no need to test it + // across the board given the nature of the test). + pair := runtime.GOOS + "-" + runtime.GOARCH + switch pair { + case "darwin-amd64", "darwin-arm64", "linux-amd64", "freebsd-amd64": + default: + t.Skip("no need for test on " + pair) + } + switch runtime.GOOS { + case "openbsd", "windows": + t.Skip("c-archive unsupported") + } + dir := t.TempDir() + + srcfile := filepath.Join(dir, "test.go") + arfile := filepath.Join(dir, "test.a") + if err := os.WriteFile(srcfile, []byte(carchiveSrcText), 0666); err != nil { + t.Fatal(err) + } + + ldf := fmt.Sprintf("-ldflags=-v -tmpdir=%s", dir) + argv := []string{"build", "-buildmode=c-archive", "-o", arfile, ldf, srcfile} + out, err := testenv.Command(t, testenv.GoToolPath(t), argv...).CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + found := false + const want = "invoking archiver with syscall.Exec" + for _, l := range strings.Split(string(out), "\n") { + if strings.HasPrefix(l, want) { + found = true + break + } + } + + if !found { + t.Errorf("expected '%s' in -v output, got:\n%s\n", want, string(out)) + } +} + +func TestLargeTextSectionSplitting(t *testing.T) { + switch runtime.GOARCH { + case "ppc64", "ppc64le", "arm": + case "arm64": + if runtime.GOOS == "darwin" { + break + } + fallthrough + default: + t.Skipf("text section splitting is not done in %s/%s", runtime.GOOS, runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + dir := t.TempDir() + + // NB: the use of -ldflags=-debugtextsize=1048576 tells the linker to + // split text sections at a size threshold of 1M instead of the + // architected limit of 67M or larger. The choice of building cmd/go + // is arbitrary; we just need something sufficiently large that uses + // external linking. + exe := filepath.Join(dir, "go.exe") + out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, "-ldflags=-linkmode=external -debugtextsize=1048576", "cmd/go").CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + // Check that we did split text sections. + out, err = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe).CombinedOutput() + if err != nil { + t.Fatalf("nm failure: %s\n%s\n", err, string(out)) + } + if !bytes.Contains(out, []byte("runtime.text.1")) { + t.Errorf("runtime.text.1 not found, text section not split?") + } + + // Result should be runnable. + _, err = testenv.Command(t, exe, "version").CombinedOutput() + if err != nil { + t.Fatal(err) + } +} + +func TestWindowsBuildmodeCSharedASLR(t *testing.T) { + platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + switch platform { + case "windows/amd64", "windows/386": + default: + t.Skip("skipping windows amd64/386 only test") + } + + testenv.MustHaveCGO(t) + + t.Run("aslr", func(t *testing.T) { + testWindowsBuildmodeCSharedASLR(t, true) + }) + t.Run("no-aslr", func(t *testing.T) { + testWindowsBuildmodeCSharedASLR(t, false) + }) +} + +func testWindowsBuildmodeCSharedASLR(t *testing.T, useASLR bool) { + t.Parallel() + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + + srcfile := filepath.Join(dir, "test.go") + objfile := filepath.Join(dir, "test.dll") + if err := os.WriteFile(srcfile, []byte(`package main; func main() { print("hello") }`), 0666); err != nil { + t.Fatal(err) + } + argv := []string{"build", "-buildmode=c-shared"} + if !useASLR { + argv = append(argv, "-ldflags", "-aslr=false") + } + argv = append(argv, "-o", objfile, srcfile) + out, err := testenv.Command(t, testenv.GoToolPath(t), argv...).CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + f, err := pe.Open(objfile) + if err != nil { + t.Fatal(err) + } + defer f.Close() + var dc uint16 + switch oh := f.OptionalHeader.(type) { + case *pe.OptionalHeader32: + dc = oh.DllCharacteristics + case *pe.OptionalHeader64: + dc = oh.DllCharacteristics + hasHEVA := (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) != 0 + if useASLR && !hasHEVA { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") + } else if !useASLR && hasHEVA { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag should not be set") + } + default: + t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) + } + hasASLR := (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) != 0 + if useASLR && !hasASLR { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") + } else if !useASLR && hasASLR { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag should not be set") + } +} + +// TestMemProfileCheck tests that cmd/link sets +// runtime.disableMemoryProfiling if the runtime.MemProfile +// symbol is unreachable after deadcode (and not dynlinking). +// The runtime then uses that to set the default value of +// runtime.MemProfileRate, which this test checks. +func TestMemProfileCheck(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + tests := []struct { + name string + prog string + wantOut string + }{ + { + "no_memprofile", + ` +package main +import "runtime" +func main() { + println(runtime.MemProfileRate) +} +`, + "0", + }, + { + "with_memprofile", + ` +package main +import "runtime" +func main() { + runtime.MemProfile(nil, false) + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_indirect", + ` +package main +import "runtime" +var f = runtime.MemProfile +func main() { + if f == nil { + panic("no f") + } + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_runtime_pprof", + ` +package main +import "runtime" +import "runtime/pprof" +func main() { + _ = pprof.Profiles() + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_http_pprof", + ` +package main +import "runtime" +import _ "net/http/pprof" +func main() { + println(runtime.MemProfileRate) +} +`, + "524288", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + src := filepath.Join(tempDir, "x.go") + if err := os.WriteFile(src, []byte(tt.prog), 0644); err != nil { + t.Fatal(err) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + got := strings.TrimSpace(string(out)) + if got != tt.wantOut { + t.Errorf("got %q; want %q", got, tt.wantOut) + } + }) + } +} + +func TestRISCVTrampolines(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "x.s") + + // Calling b from a or c should not use trampolines, however + // calling from d to a will require one. + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "TEXT a(SB),$0-0\n") + for i := 0; i < 1<<17; i++ { + fmt.Fprintf(buf, "\tADD $0, X0, X0\n") + } + fmt.Fprintf(buf, "\tCALL b(SB)\n") + fmt.Fprintf(buf, "\tRET\n") + fmt.Fprintf(buf, "TEXT b(SB),$0-0\n") + fmt.Fprintf(buf, "\tRET\n") + fmt.Fprintf(buf, "TEXT c(SB),$0-0\n") + fmt.Fprintf(buf, "\tCALL b(SB)\n") + fmt.Fprintf(buf, "\tRET\n") + fmt.Fprintf(buf, "TEXT ·d(SB),0,$0-0\n") + for i := 0; i < 1<<17; i++ { + fmt.Fprintf(buf, "\tADD $0, X0, X0\n") + } + fmt.Fprintf(buf, "\tCALL a(SB)\n") + fmt.Fprintf(buf, "\tCALL c(SB)\n") + fmt.Fprintf(buf, "\tRET\n") + if err := os.WriteFile(tmpFile, buf.Bytes(), 0644); err != nil { + t.Fatalf("Failed to write assembly file: %v", err) + } + + if err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module riscvtramp"), 0644); err != nil { + t.Fatalf("Failed to write file: %v\n", err) + } + main := `package main +func main() { + d() +} + +func d() +` + if err := os.WriteFile(filepath.Join(tmpDir, "x.go"), []byte(main), 0644); err != nil { + t.Fatalf("failed to write main: %v\n", err) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-linkmode=internal") + cmd.Dir = tmpDir + cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Build failed: %v, output: %s", err, out) + } + + // Check what trampolines exist. + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", filepath.Join(tmpDir, "riscvtramp")) + cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux") + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("nm failure: %s\n%s\n", err, string(out)) + } + if !bytes.Contains(out, []byte(" T a-tramp0")) { + t.Errorf("Trampoline a-tramp0 is missing") + } + if bytes.Contains(out, []byte(" T b-tramp0")) { + t.Errorf("Trampoline b-tramp0 exists unnecessarily") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/lib.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/lib.go new file mode 100644 index 0000000000000000000000000000000000000000..eab74dc32864ed201b580584bb71a6d42a157c75 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/lib.go @@ -0,0 +1,2856 @@ +// Inferno utils/8l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "bytes" + "debug/elf" + "debug/macho" + "encoding/base64" + "encoding/binary" + "fmt" + "internal/buildcfg" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + + "cmd/internal/bio" + "cmd/internal/goobj" + "cmd/internal/notsha256" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loadelf" + "cmd/link/internal/loader" + "cmd/link/internal/loadmacho" + "cmd/link/internal/loadpe" + "cmd/link/internal/loadxcoff" + "cmd/link/internal/sym" +) + +// Data layout and relocation. + +// Derived from Inferno utils/6l/l.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// ArchSyms holds a number of architecture specific symbols used during +// relocation. Rather than allowing them universal access to all symbols, +// we keep a subset for relocation application. +type ArchSyms struct { + Rel loader.Sym + Rela loader.Sym + RelPLT loader.Sym + RelaPLT loader.Sym + + LinkEditGOT loader.Sym + LinkEditPLT loader.Sym + + TOC loader.Sym + DotTOC []loader.Sym // for each version + + GOT loader.Sym + PLT loader.Sym + GOTPLT loader.Sym + + Tlsg loader.Sym + Tlsoffset int + + Dynamic loader.Sym + DynSym loader.Sym + DynStr loader.Sym + + unreachableMethod loader.Sym + + // Symbol containing a list of all the inittasks that need + // to be run at startup. + mainInittasks loader.Sym +} + +// mkArchSym is a helper for setArchSyms, to set up a special symbol. +func (ctxt *Link) mkArchSym(name string, ver int, ls *loader.Sym) { + *ls = ctxt.loader.LookupOrCreateSym(name, ver) + ctxt.loader.SetAttrReachable(*ls, true) +} + +// mkArchSymVec is similar to setArchSyms, but operates on elements within +// a slice, where each element corresponds to some symbol version. +func (ctxt *Link) mkArchSymVec(name string, ver int, ls []loader.Sym) { + ls[ver] = ctxt.loader.LookupOrCreateSym(name, ver) + ctxt.loader.SetAttrReachable(ls[ver], true) +} + +// setArchSyms sets up the ArchSyms structure, and must be called before +// relocations are applied. +func (ctxt *Link) setArchSyms() { + ctxt.mkArchSym(".got", 0, &ctxt.GOT) + ctxt.mkArchSym(".plt", 0, &ctxt.PLT) + ctxt.mkArchSym(".got.plt", 0, &ctxt.GOTPLT) + ctxt.mkArchSym(".dynamic", 0, &ctxt.Dynamic) + ctxt.mkArchSym(".dynsym", 0, &ctxt.DynSym) + ctxt.mkArchSym(".dynstr", 0, &ctxt.DynStr) + ctxt.mkArchSym("runtime.unreachableMethod", abiInternalVer, &ctxt.unreachableMethod) + + if ctxt.IsPPC64() { + ctxt.mkArchSym("TOC", 0, &ctxt.TOC) + + ctxt.DotTOC = make([]loader.Sym, ctxt.MaxVersion()+1) + for i := 0; i <= ctxt.MaxVersion(); i++ { + if i >= sym.SymVerABICount && i < sym.SymVerStatic { // these versions are not used currently + continue + } + ctxt.mkArchSymVec(".TOC.", i, ctxt.DotTOC) + } + } + if ctxt.IsElf() { + ctxt.mkArchSym(".rel", 0, &ctxt.Rel) + ctxt.mkArchSym(".rela", 0, &ctxt.Rela) + ctxt.mkArchSym(".rel.plt", 0, &ctxt.RelPLT) + ctxt.mkArchSym(".rela.plt", 0, &ctxt.RelaPLT) + } + if ctxt.IsDarwin() { + ctxt.mkArchSym(".linkedit.got", 0, &ctxt.LinkEditGOT) + ctxt.mkArchSym(".linkedit.plt", 0, &ctxt.LinkEditPLT) + } +} + +type Arch struct { + Funcalign int + Maxalign int + Minalign int + Dwarfregsp int + Dwarfreglr int + + // Threshold of total text size, used for trampoline insertion. If the total + // text size is smaller than TrampLimit, we won't need to insert trampolines. + // It is pretty close to the offset range of a direct CALL machine instruction. + // We leave some room for extra stuff like PLT stubs. + TrampLimit uint64 + + // Empty spaces between codeblocks will be padded with this value. + // For example an architecture might want to pad with a trap instruction to + // catch wayward programs. Architectures that do not define a padding value + // are padded with zeros. + CodePad []byte + + // Plan 9 variables. + Plan9Magic uint32 + Plan9_64Bit bool + + Adddynrel func(*Target, *loader.Loader, *ArchSyms, loader.Sym, loader.Reloc, int) bool + Archinit func(*Link) + // Archreloc is an arch-specific hook that assists in relocation processing + // (invoked by 'relocsym'); it handles target-specific relocation tasks. + // Here "rel" is the current relocation being examined, "sym" is the symbol + // containing the chunk of data to which the relocation applies, and "off" + // is the contents of the to-be-relocated data item (from sym.P). Return + // value is the appropriately relocated value (to be written back to the + // same spot in sym.P), number of external _host_ relocations needed (i.e. + // ELF/Mach-O/etc. relocations, not Go relocations, this must match ELF.Reloc1, + // etc.), and a boolean indicating success/failure (a failing value indicates + // a fatal error). + Archreloc func(*Target, *loader.Loader, *ArchSyms, loader.Reloc, loader.Sym, + int64) (relocatedOffset int64, nExtReloc int, ok bool) + // Archrelocvariant is a second arch-specific hook used for + // relocation processing; it handles relocations where r.Type is + // insufficient to describe the relocation (r.Variant != + // sym.RV_NONE). Here "rel" is the relocation being applied, "sym" + // is the symbol containing the chunk of data to which the + // relocation applies, and "off" is the contents of the + // to-be-relocated data item (from sym.P). Return is an updated + // offset value. + Archrelocvariant func(target *Target, ldr *loader.Loader, rel loader.Reloc, + rv sym.RelocVariant, sym loader.Sym, offset int64, data []byte) (relocatedOffset int64) + + // Generate a trampoline for a call from s to rs if necessary. ri is + // index of the relocation. + Trampoline func(ctxt *Link, ldr *loader.Loader, ri int, rs, s loader.Sym) + + // Assembling the binary breaks into two phases, writing the code/data/ + // dwarf information (which is rather generic), and some more architecture + // specific work like setting up the elf headers/dynamic relocations, etc. + // The phases are called "Asmb" and "Asmb2". Asmb2 needs to be defined for + // every architecture, but only if architecture has an Asmb function will + // it be used for assembly. Otherwise a generic assembly Asmb function is + // used. + Asmb func(*Link, *loader.Loader) + Asmb2 func(*Link, *loader.Loader) + + // Extreloc is an arch-specific hook that converts a Go relocation to an + // external relocation. Return the external relocation and whether it is + // needed. + Extreloc func(*Target, *loader.Loader, loader.Reloc, loader.Sym) (loader.ExtReloc, bool) + + Gentext func(*Link, *loader.Loader) // Generate text before addressing has been performed. + Machoreloc1 func(*sys.Arch, *OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool + MachorelocSize uint32 // size of an Mach-O relocation record, must match Machoreloc1. + PEreloc1 func(*sys.Arch, *OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool + Xcoffreloc1 func(*sys.Arch, *OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool + + // Generate additional symbols for the native symbol table just prior to + // code generation. + GenSymsLate func(*Link, *loader.Loader) + + // TLSIEtoLE converts a TLS Initial Executable relocation to + // a TLS Local Executable relocation. + // + // This is possible when a TLS IE relocation refers to a local + // symbol in an executable, which is typical when internally + // linking PIE binaries. + TLSIEtoLE func(P []byte, off, size int) + + // optional override for assignAddress + AssignAddress func(ldr *loader.Loader, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp bool) (*sym.Section, int, uint64) + + // ELF specific information. + ELF ELFArch +} + +var ( + thearch Arch + lcSize int32 + rpath Rpath + spSize int32 + symSize int32 +) + +const ( + MINFUNC = 16 // minimum size for a function +) + +// Symbol version of ABIInternal symbols. It is sym.SymVerABIInternal if ABI wrappers +// are used, 0 otherwise. +var abiInternalVer = sym.SymVerABIInternal + +// DynlinkingGo reports whether we are producing Go code that can live +// in separate shared libraries linked together at runtime. +func (ctxt *Link) DynlinkingGo() bool { + if !ctxt.Loaded { + panic("DynlinkingGo called before all symbols loaded") + } + return ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin || ctxt.canUsePlugins +} + +// CanUsePlugins reports whether a plugins can be used +func (ctxt *Link) CanUsePlugins() bool { + if !ctxt.Loaded { + panic("CanUsePlugins called before all symbols loaded") + } + return ctxt.canUsePlugins +} + +// NeedCodeSign reports whether we need to code-sign the output binary. +func (ctxt *Link) NeedCodeSign() bool { + return ctxt.IsDarwin() && ctxt.IsARM64() +} + +var ( + dynlib []string + ldflag []string + havedynamic int + Funcalign int + iscgo bool + elfglobalsymndx int + interpreter string + + debug_s bool // backup old value of debug['s'] + HEADR int32 + + nerrors int + liveness int64 // size of liveness data (funcdata), printed if -v + + // See -strictdups command line flag. + checkStrictDups int // 0=off 1=warning 2=error + strictDupMsgCount int +) + +var ( + Segtext sym.Segment + Segrodata sym.Segment + Segrelrodata sym.Segment + Segdata sym.Segment + Segdwarf sym.Segment + Segpdata sym.Segment // windows-only + Segxdata sym.Segment // windows-only + + Segments = []*sym.Segment{&Segtext, &Segrodata, &Segrelrodata, &Segdata, &Segdwarf, &Segpdata, &Segxdata} +) + +const pkgdef = "__.PKGDEF" + +var ( + // externalobj is set to true if we see an object compiled by + // the host compiler that is not from a package that is known + // to support internal linking mode. + externalobj = false + + // dynimportfail is a list of packages for which generating + // the dynimport file, _cgo_import.go, failed. If there are + // any of these objects, we must link externally. Issue 52863. + dynimportfail []string + + // preferlinkext is a list of packages for which the Go command + // noticed use of peculiar C flags. If we see any of these, + // default to linking externally unless overridden by the + // user. See issues #58619, #58620, and #58848. + preferlinkext []string + + // unknownObjFormat is set to true if we see an object whose + // format we don't recognize. + unknownObjFormat = false + + theline string +) + +func Lflag(ctxt *Link, arg string) { + ctxt.Libdir = append(ctxt.Libdir, arg) +} + +/* + * Unix doesn't like it when we write to a running (or, sometimes, + * recently run) binary, so remove the output file before writing it. + * On Windows 7, remove() can force a subsequent create() to fail. + * S_ISREG() does not exist on Plan 9. + */ +func mayberemoveoutfile() { + if fi, err := os.Lstat(*flagOutfile); err == nil && !fi.Mode().IsRegular() { + return + } + os.Remove(*flagOutfile) +} + +func libinit(ctxt *Link) { + Funcalign = thearch.Funcalign + + // add goroot to the end of the libdir list. + suffix := "" + + suffixsep := "" + if *flagInstallSuffix != "" { + suffixsep = "_" + suffix = *flagInstallSuffix + } else if *flagRace { + suffixsep = "_" + suffix = "race" + } else if *flagMsan { + suffixsep = "_" + suffix = "msan" + } else if *flagAsan { + suffixsep = "_" + suffix = "asan" + } + + if buildcfg.GOROOT != "" { + Lflag(ctxt, filepath.Join(buildcfg.GOROOT, "pkg", fmt.Sprintf("%s_%s%s%s", buildcfg.GOOS, buildcfg.GOARCH, suffixsep, suffix))) + } + + mayberemoveoutfile() + + if err := ctxt.Out.Open(*flagOutfile); err != nil { + Exitf("cannot create %s: %v", *flagOutfile, err) + } + + if *flagEntrySymbol == "" { + switch ctxt.BuildMode { + case BuildModeCShared, BuildModeCArchive: + *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s_lib", buildcfg.GOARCH, buildcfg.GOOS) + case BuildModeExe, BuildModePIE: + *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s", buildcfg.GOARCH, buildcfg.GOOS) + case BuildModeShared, BuildModePlugin: + // No *flagEntrySymbol for -buildmode=shared and plugin + default: + Errorf(nil, "unknown *flagEntrySymbol for buildmode %v", ctxt.BuildMode) + } + } +} + +func exitIfErrors() { + if nerrors != 0 || checkStrictDups > 1 && strictDupMsgCount > 0 { + mayberemoveoutfile() + Exit(2) + } + +} + +func errorexit() { + exitIfErrors() + Exit(0) +} + +func loadinternal(ctxt *Link, name string) *sym.Library { + zerofp := goobj.FingerprintType{} + if ctxt.linkShared && ctxt.PackageShlib != nil { + if shlib := ctxt.PackageShlib[name]; shlib != "" { + return addlibpath(ctxt, "internal", "internal", "", name, shlib, zerofp) + } + } + if ctxt.PackageFile != nil { + if pname := ctxt.PackageFile[name]; pname != "" { + return addlibpath(ctxt, "internal", "internal", pname, name, "", zerofp) + } + ctxt.Logf("loadinternal: cannot find %s\n", name) + return nil + } + + for _, libdir := range ctxt.Libdir { + if ctxt.linkShared { + shlibname := filepath.Join(libdir, name+".shlibname") + if ctxt.Debugvlog != 0 { + ctxt.Logf("searching for %s.a in %s\n", name, shlibname) + } + if _, err := os.Stat(shlibname); err == nil { + return addlibpath(ctxt, "internal", "internal", "", name, shlibname, zerofp) + } + } + pname := filepath.Join(libdir, name+".a") + if ctxt.Debugvlog != 0 { + ctxt.Logf("searching for %s.a in %s\n", name, pname) + } + if _, err := os.Stat(pname); err == nil { + return addlibpath(ctxt, "internal", "internal", pname, name, "", zerofp) + } + } + + if name == "runtime" { + Exitf("error: unable to find runtime.a") + } + ctxt.Logf("warning: unable to find %s.a\n", name) + return nil +} + +// extld returns the current external linker. +func (ctxt *Link) extld() []string { + if len(flagExtld) == 0 { + // Return the default external linker for the platform. + // This only matters when link tool is called directly without explicit -extld, + // go tool already passes the correct linker in other cases. + switch buildcfg.GOOS { + case "darwin", "freebsd", "openbsd": + flagExtld = []string{"clang"} + default: + flagExtld = []string{"gcc"} + } + } + return flagExtld +} + +// findLibPathCmd uses cmd command to find gcc library libname. +// It returns library full path if found, or "none" if not found. +func (ctxt *Link) findLibPathCmd(cmd, libname string) string { + extld := ctxt.extld() + name, args := extld[0], extld[1:] + args = append(args, hostlinkArchArgs(ctxt.Arch)...) + args = append(args, cmd) + if ctxt.Debugvlog != 0 { + ctxt.Logf("%s %v\n", extld, args) + } + out, err := exec.Command(name, args...).Output() + if err != nil { + if ctxt.Debugvlog != 0 { + ctxt.Logf("not using a %s file because compiler failed\n%v\n%s\n", libname, err, out) + } + return "none" + } + return strings.TrimSpace(string(out)) +} + +// findLibPath searches for library libname. +// It returns library full path if found, or "none" if not found. +func (ctxt *Link) findLibPath(libname string) string { + return ctxt.findLibPathCmd("--print-file-name="+libname, libname) +} + +func (ctxt *Link) loadlib() { + var flags uint32 + switch *FlagStrictDups { + case 0: + // nothing to do + case 1, 2: + flags |= loader.FlagStrictDups + default: + log.Fatalf("invalid -strictdups flag value %d", *FlagStrictDups) + } + ctxt.loader = loader.NewLoader(flags, &ctxt.ErrorReporter.ErrorReporter) + ctxt.ErrorReporter.SymName = func(s loader.Sym) string { + return ctxt.loader.SymName(s) + } + + // ctxt.Library grows during the loop, so not a range loop. + i := 0 + for ; i < len(ctxt.Library); i++ { + lib := ctxt.Library[i] + if lib.Shlib == "" { + if ctxt.Debugvlog > 1 { + ctxt.Logf("autolib: %s (from %s)\n", lib.File, lib.Objref) + } + loadobjfile(ctxt, lib) + } + } + + // load internal packages, if not already + if *flagRace { + loadinternal(ctxt, "runtime/race") + } + if *flagMsan { + loadinternal(ctxt, "runtime/msan") + } + if *flagAsan { + loadinternal(ctxt, "runtime/asan") + } + loadinternal(ctxt, "runtime") + for ; i < len(ctxt.Library); i++ { + lib := ctxt.Library[i] + if lib.Shlib == "" { + loadobjfile(ctxt, lib) + } + } + // At this point, the Go objects are "preloaded". Not all the symbols are + // added to the symbol table (only defined package symbols are). Looking + // up symbol by name may not get expected result. + + iscgo = ctxt.LibraryByPkg["runtime/cgo"] != nil + + // Plugins a require cgo support to function. Similarly, plugins may require additional + // internal linker support on some platforms which may not be implemented. + ctxt.canUsePlugins = ctxt.LibraryByPkg["plugin"] != nil && iscgo + + // We now have enough information to determine the link mode. + determineLinkMode(ctxt) + + if ctxt.LinkMode == LinkExternal && !iscgo && !(buildcfg.GOOS == "darwin" && ctxt.BuildMode != BuildModePlugin && ctxt.Arch.Family == sys.AMD64) { + // This indicates a user requested -linkmode=external. + // The startup code uses an import of runtime/cgo to decide + // whether to initialize the TLS. So give it one. This could + // be handled differently but it's an unusual case. + if lib := loadinternal(ctxt, "runtime/cgo"); lib != nil && lib.Shlib == "" { + if ctxt.BuildMode == BuildModeShared || ctxt.linkShared { + Exitf("cannot implicitly include runtime/cgo in a shared library") + } + for ; i < len(ctxt.Library); i++ { + lib := ctxt.Library[i] + if lib.Shlib == "" { + loadobjfile(ctxt, lib) + } + } + } + } + + // Add non-package symbols and references of externally defined symbols. + ctxt.loader.LoadSyms(ctxt.Arch) + + // Load symbols from shared libraries, after all Go object symbols are loaded. + for _, lib := range ctxt.Library { + if lib.Shlib != "" { + if ctxt.Debugvlog > 1 { + ctxt.Logf("autolib: %s (from %s)\n", lib.Shlib, lib.Objref) + } + ldshlibsyms(ctxt, lib.Shlib) + } + } + + // Process cgo directives (has to be done before host object loading). + ctxt.loadcgodirectives() + + // Conditionally load host objects, or setup for external linking. + hostobjs(ctxt) + hostlinksetup(ctxt) + + if ctxt.LinkMode == LinkInternal && len(hostobj) != 0 { + // If we have any undefined symbols in external + // objects, try to read them from the libgcc file. + any := false + undefs, froms := ctxt.loader.UndefinedRelocTargets(1) + if len(undefs) > 0 { + any = true + if ctxt.Debugvlog > 1 { + ctxt.Logf("loadlib: first unresolved is %s [%d] from %s [%d]\n", + ctxt.loader.SymName(undefs[0]), undefs[0], + ctxt.loader.SymName(froms[0]), froms[0]) + } + } + if any { + if *flagLibGCC == "" { + *flagLibGCC = ctxt.findLibPathCmd("--print-libgcc-file-name", "libgcc") + } + if runtime.GOOS == "openbsd" && *flagLibGCC == "libgcc.a" { + // On OpenBSD `clang --print-libgcc-file-name` returns "libgcc.a". + // In this case we fail to load libgcc.a and can encounter link + // errors - see if we can find libcompiler_rt.a instead. + *flagLibGCC = ctxt.findLibPathCmd("--print-file-name=libcompiler_rt.a", "libcompiler_rt") + } + if ctxt.HeadType == objabi.Hwindows { + loadWindowsHostArchives(ctxt) + } + if *flagLibGCC != "none" { + hostArchive(ctxt, *flagLibGCC) + } + // For glibc systems, the linker setup used by GCC + // looks like + // + // GROUP ( /lib/x86_64-linux-gnu/libc.so.6 + // /usr/lib/x86_64-linux-gnu/libc_nonshared.a + // AS_NEEDED ( /lib64/ld-linux-x86-64.so.2 ) ) + // + // where libc_nonshared.a contains a small set of + // symbols including "__stack_chk_fail_local" and a + // few others. Thus if we are doing internal linking + // and "__stack_chk_fail_local" is unresolved (most + // likely due to the use of -fstack-protector), try + // loading libc_nonshared.a to resolve it. + // + // On Alpine Linux (musl-based), the library providing + // this symbol is called libssp_nonshared.a. + isunresolved := symbolsAreUnresolved(ctxt, []string{"__stack_chk_fail_local"}) + if isunresolved[0] { + if p := ctxt.findLibPath("libc_nonshared.a"); p != "none" { + hostArchive(ctxt, p) + } + if p := ctxt.findLibPath("libssp_nonshared.a"); p != "none" { + hostArchive(ctxt, p) + } + } + } + } + + // We've loaded all the code now. + ctxt.Loaded = true + + strictDupMsgCount = ctxt.loader.NStrictDupMsgs() +} + +// loadWindowsHostArchives loads in host archives and objects when +// doing internal linking on windows. Older toolchains seem to require +// just a single pass through the various archives, but some modern +// toolchains when linking a C program with mingw pass library paths +// multiple times to the linker, e.g. "... -lmingwex -lmingw32 ... +// -lmingwex -lmingw32 ...". To accommodate this behavior, we make two +// passes over the host archives below. +func loadWindowsHostArchives(ctxt *Link) { + any := true + for i := 0; any && i < 2; i++ { + // Link crt2.o (if present) to resolve "atexit" when + // using LLVM-based compilers. + isunresolved := symbolsAreUnresolved(ctxt, []string{"atexit"}) + if isunresolved[0] { + if p := ctxt.findLibPath("crt2.o"); p != "none" { + hostObject(ctxt, "crt2", p) + } + } + if *flagRace { + if p := ctxt.findLibPath("libsynchronization.a"); p != "none" { + hostArchive(ctxt, p) + } + } + if p := ctxt.findLibPath("libmingwex.a"); p != "none" { + hostArchive(ctxt, p) + } + if p := ctxt.findLibPath("libmingw32.a"); p != "none" { + hostArchive(ctxt, p) + } + // Link libmsvcrt.a to resolve '__acrt_iob_func' symbol + // (see https://golang.org/issue/23649 for details). + if p := ctxt.findLibPath("libmsvcrt.a"); p != "none" { + hostArchive(ctxt, p) + } + any = false + undefs, froms := ctxt.loader.UndefinedRelocTargets(1) + if len(undefs) > 0 { + any = true + if ctxt.Debugvlog > 1 { + ctxt.Logf("loadWindowsHostArchives: remaining unresolved is %s [%d] from %s [%d]\n", + ctxt.loader.SymName(undefs[0]), undefs[0], + ctxt.loader.SymName(froms[0]), froms[0]) + } + } + } + // If needed, create the __CTOR_LIST__ and __DTOR_LIST__ + // symbols (referenced by some of the mingw support library + // routines). Creation of these symbols is normally done by the + // linker if not already present. + want := []string{"__CTOR_LIST__", "__DTOR_LIST__"} + isunresolved := symbolsAreUnresolved(ctxt, want) + for k, w := range want { + if isunresolved[k] { + sb := ctxt.loader.CreateSymForUpdate(w, 0) + sb.SetType(sym.SDATA) + sb.AddUint64(ctxt.Arch, 0) + sb.SetReachable(true) + ctxt.loader.SetAttrSpecial(sb.Sym(), true) + } + } + + // Fix up references to DLL import symbols now that we're done + // pulling in new objects. + if err := loadpe.PostProcessImports(); err != nil { + Errorf(nil, "%v", err) + } + + // TODO: maybe do something similar to peimporteddlls to collect + // all lib names and try link them all to final exe just like + // libmingwex.a and libmingw32.a: + /* + for: + #cgo windows LDFLAGS: -lmsvcrt -lm + import: + libmsvcrt.a libm.a + */ +} + +// loadcgodirectives reads the previously discovered cgo directives, creating +// symbols in preparation for host object loading or use later in the link. +func (ctxt *Link) loadcgodirectives() { + l := ctxt.loader + hostObjSyms := make(map[loader.Sym]struct{}) + for _, d := range ctxt.cgodata { + setCgoAttr(ctxt, d.file, d.pkg, d.directives, hostObjSyms) + } + ctxt.cgodata = nil + + if ctxt.LinkMode == LinkInternal { + // Drop all the cgo_import_static declarations. + // Turns out we won't be needing them. + for symIdx := range hostObjSyms { + if l.SymType(symIdx) == sym.SHOSTOBJ { + // If a symbol was marked both + // cgo_import_static and cgo_import_dynamic, + // then we want to make it cgo_import_dynamic + // now. + su := l.MakeSymbolUpdater(symIdx) + if l.SymExtname(symIdx) != "" && l.SymDynimplib(symIdx) != "" && !(l.AttrCgoExportStatic(symIdx) || l.AttrCgoExportDynamic(symIdx)) { + su.SetType(sym.SDYNIMPORT) + } else { + su.SetType(0) + } + } + } + } +} + +// Set up flags and special symbols depending on the platform build mode. +// This version works with loader.Loader. +func (ctxt *Link) linksetup() { + switch ctxt.BuildMode { + case BuildModeCShared, BuildModePlugin: + symIdx := ctxt.loader.LookupOrCreateSym("runtime.islibrary", 0) + sb := ctxt.loader.MakeSymbolUpdater(symIdx) + sb.SetType(sym.SNOPTRDATA) + sb.AddUint8(1) + case BuildModeCArchive: + symIdx := ctxt.loader.LookupOrCreateSym("runtime.isarchive", 0) + sb := ctxt.loader.MakeSymbolUpdater(symIdx) + sb.SetType(sym.SNOPTRDATA) + sb.AddUint8(1) + } + + // Recalculate pe parameters now that we have ctxt.LinkMode set. + if ctxt.HeadType == objabi.Hwindows { + Peinit(ctxt) + } + + if ctxt.LinkMode == LinkExternal { + // When external linking, we are creating an object file. The + // absolute address is irrelevant. + *FlagTextAddr = 0 + } + + // If there are no dynamic libraries needed, gcc disables dynamic linking. + // Because of this, glibc's dynamic ELF loader occasionally (like in version 2.13) + // assumes that a dynamic binary always refers to at least one dynamic library. + // Rather than be a source of test cases for glibc, disable dynamic linking + // the same way that gcc would. + // + // Exception: on OS X, programs such as Shark only work with dynamic + // binaries, so leave it enabled on OS X (Mach-O) binaries. + // Also leave it enabled on Solaris which doesn't support + // statically linked binaries. + if ctxt.BuildMode == BuildModeExe { + if havedynamic == 0 && ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Hsolaris { + *FlagD = true + } + } + + if ctxt.LinkMode == LinkExternal && ctxt.Arch.Family == sys.PPC64 && buildcfg.GOOS != "aix" { + toc := ctxt.loader.LookupOrCreateSym(".TOC.", 0) + sb := ctxt.loader.MakeSymbolUpdater(toc) + sb.SetType(sym.SDYNIMPORT) + } + + // The Android Q linker started to complain about underalignment of the our TLS + // section. We don't actually use the section on android, so don't + // generate it. + if buildcfg.GOOS != "android" { + tlsg := ctxt.loader.LookupOrCreateSym("runtime.tlsg", 0) + sb := ctxt.loader.MakeSymbolUpdater(tlsg) + + // runtime.tlsg is used for external linking on platforms that do not define + // a variable to hold g in assembly (currently only intel). + if sb.Type() == 0 { + sb.SetType(sym.STLSBSS) + sb.SetSize(int64(ctxt.Arch.PtrSize)) + } else if sb.Type() != sym.SDYNIMPORT { + Errorf(nil, "runtime declared tlsg variable %v", sb.Type()) + } + ctxt.loader.SetAttrReachable(tlsg, true) + ctxt.Tlsg = tlsg + } + + var moduledata loader.Sym + var mdsb *loader.SymbolBuilder + if ctxt.BuildMode == BuildModePlugin { + moduledata = ctxt.loader.LookupOrCreateSym("local.pluginmoduledata", 0) + mdsb = ctxt.loader.MakeSymbolUpdater(moduledata) + ctxt.loader.SetAttrLocal(moduledata, true) + } else { + moduledata = ctxt.loader.LookupOrCreateSym("runtime.firstmoduledata", 0) + mdsb = ctxt.loader.MakeSymbolUpdater(moduledata) + } + if mdsb.Type() != 0 && mdsb.Type() != sym.SDYNIMPORT { + // If the module (toolchain-speak for "executable or shared + // library") we are linking contains the runtime package, it + // will define the runtime.firstmoduledata symbol and we + // truncate it back to 0 bytes so we can define its entire + // contents in symtab.go:symtab(). + mdsb.SetSize(0) + + // In addition, on ARM, the runtime depends on the linker + // recording the value of GOARM. + if ctxt.Arch.Family == sys.ARM { + goarm := ctxt.loader.LookupOrCreateSym("runtime.goarm", 0) + sb := ctxt.loader.MakeSymbolUpdater(goarm) + sb.SetType(sym.SDATA) + sb.SetSize(0) + sb.AddUint8(uint8(buildcfg.GOARM.Version)) + + goarmsoftfp := ctxt.loader.LookupOrCreateSym("runtime.goarmsoftfp", 0) + sb2 := ctxt.loader.MakeSymbolUpdater(goarmsoftfp) + sb2.SetType(sym.SDATA) + sb2.SetSize(0) + if buildcfg.GOARM.SoftFloat { + sb2.AddUint8(1) + } else { + sb2.AddUint8(0) + } + } + + // Set runtime.disableMemoryProfiling bool if + // runtime.MemProfile is not retained in the binary after + // deadcode (and we're not dynamically linking). + memProfile := ctxt.loader.Lookup("runtime.MemProfile", abiInternalVer) + if memProfile != 0 && !ctxt.loader.AttrReachable(memProfile) && !ctxt.DynlinkingGo() { + memProfSym := ctxt.loader.LookupOrCreateSym("runtime.disableMemoryProfiling", 0) + sb := ctxt.loader.MakeSymbolUpdater(memProfSym) + sb.SetType(sym.SDATA) + sb.SetSize(0) + sb.AddUint8(1) // true bool + } + } else { + // If OTOH the module does not contain the runtime package, + // create a local symbol for the moduledata. + moduledata = ctxt.loader.LookupOrCreateSym("local.moduledata", 0) + mdsb = ctxt.loader.MakeSymbolUpdater(moduledata) + ctxt.loader.SetAttrLocal(moduledata, true) + } + // In all cases way we mark the moduledata as noptrdata to hide it from + // the GC. + mdsb.SetType(sym.SNOPTRDATA) + ctxt.loader.SetAttrReachable(moduledata, true) + ctxt.Moduledata = moduledata + + if ctxt.Arch == sys.Arch386 && ctxt.HeadType != objabi.Hwindows { + if (ctxt.BuildMode == BuildModeCArchive && ctxt.IsELF) || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE || ctxt.DynlinkingGo() { + got := ctxt.loader.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0) + sb := ctxt.loader.MakeSymbolUpdater(got) + sb.SetType(sym.SDYNIMPORT) + ctxt.loader.SetAttrReachable(got, true) + } + } + + // DWARF-gen and other phases require that the unit Textp slices + // be populated, so that it can walk the functions in each unit. + // Call into the loader to do this (requires that we collect the + // set of internal libraries first). NB: might be simpler if we + // moved isRuntimeDepPkg to cmd/internal and then did the test in + // loader.AssignTextSymbolOrder. + ctxt.Library = postorder(ctxt.Library) + intlibs := []bool{} + for _, lib := range ctxt.Library { + intlibs = append(intlibs, isRuntimeDepPkg(lib.Pkg)) + } + ctxt.Textp = ctxt.loader.AssignTextSymbolOrder(ctxt.Library, intlibs, ctxt.Textp) +} + +// mangleTypeSym shortens the names of symbols that represent Go types +// if they are visible in the symbol table. +// +// As the names of these symbols are derived from the string of +// the type, they can run to many kilobytes long. So we shorten +// them using a SHA-1 when the name appears in the final binary. +// This also removes characters that upset external linkers. +// +// These are the symbols that begin with the prefix 'type.' and +// contain run-time type information used by the runtime and reflect +// packages. All Go binaries contain these symbols, but only +// those programs loaded dynamically in multiple parts need these +// symbols to have entries in the symbol table. +func (ctxt *Link) mangleTypeSym() { + if ctxt.BuildMode != BuildModeShared && !ctxt.linkShared && ctxt.BuildMode != BuildModePlugin && !ctxt.CanUsePlugins() { + return + } + + ldr := ctxt.loader + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) && !ctxt.linkShared { + // If -linkshared, the GCProg generation code may need to reach + // out to the shared library for the type descriptor's data, even + // the type descriptor itself is not actually needed at run time + // (therefore not reachable). We still need to mangle its name, + // so it is consistent with the one stored in the shared library. + continue + } + name := ldr.SymName(s) + newName := typeSymbolMangle(name) + if newName != name { + ldr.SetSymExtname(s, newName) + + // When linking against a shared library, the Go object file may + // have reference to the original symbol name whereas the shared + // library provides a symbol with the mangled name. We need to + // copy the payload of mangled to original. + // XXX maybe there is a better way to do this. + dup := ldr.Lookup(newName, ldr.SymVersion(s)) + if dup != 0 { + st := ldr.SymType(s) + dt := ldr.SymType(dup) + if st == sym.Sxxx && dt != sym.Sxxx { + ldr.CopySym(dup, s) + } + } + } + } +} + +// typeSymbolMangle mangles the given symbol name into something shorter. +// +// Keep the type:. prefix, which parts of the linker (like the +// DWARF generator) know means the symbol is not decodable. +// Leave type:runtime. symbols alone, because other parts of +// the linker manipulates them. +func typeSymbolMangle(name string) string { + isType := strings.HasPrefix(name, "type:") + if !isType && !strings.Contains(name, "@") { + // Issue 58800: instantiated symbols may include a type name, which may contain "@" + return name + } + if strings.HasPrefix(name, "type:runtime.") { + return name + } + if strings.HasPrefix(name, "go:string.") { + // String symbols will be grouped to a single go:string.* symbol. + // No need to mangle individual symbol names. + return name + } + if len(name) <= 14 && !strings.Contains(name, "@") { // Issue 19529 + return name + } + if isType { + hash := notsha256.Sum256([]byte(name[5:])) + prefix := "type:" + if name[5] == '.' { + prefix = "type:." + } + return prefix + base64.StdEncoding.EncodeToString(hash[:6]) + } + // instantiated symbol, replace type name in [] + i := strings.IndexByte(name, '[') + j := strings.LastIndexByte(name, ']') + if j == -1 || j <= i { + j = len(name) + } + hash := notsha256.Sum256([]byte(name[i+1 : j])) + return name[:i+1] + base64.StdEncoding.EncodeToString(hash[:6]) + name[j:] +} + +/* + * look for the next file in an archive. + * adapted from libmach. + */ +func nextar(bp *bio.Reader, off int64, a *ArHdr) int64 { + if off&1 != 0 { + off++ + } + bp.MustSeek(off, 0) + var buf [SAR_HDR]byte + if n, err := io.ReadFull(bp, buf[:]); err != nil { + if n == 0 && err != io.EOF { + return -1 + } + return 0 + } + + a.name = artrim(buf[0:16]) + a.date = artrim(buf[16:28]) + a.uid = artrim(buf[28:34]) + a.gid = artrim(buf[34:40]) + a.mode = artrim(buf[40:48]) + a.size = artrim(buf[48:58]) + a.fmag = artrim(buf[58:60]) + + arsize := atolwhex(a.size) + if arsize&1 != 0 { + arsize++ + } + return arsize + SAR_HDR +} + +func loadobjfile(ctxt *Link, lib *sym.Library) { + pkg := objabi.PathToPrefix(lib.Pkg) + + if ctxt.Debugvlog > 1 { + ctxt.Logf("ldobj: %s (%s)\n", lib.File, pkg) + } + f, err := bio.Open(lib.File) + if err != nil { + Exitf("cannot open file %s: %v", lib.File, err) + } + defer f.Close() + defer func() { + if pkg == "main" && !lib.Main { + Exitf("%s: not package main", lib.File) + } + }() + + for i := 0; i < len(ARMAG); i++ { + if c, err := f.ReadByte(); err == nil && c == ARMAG[i] { + continue + } + + /* load it as a regular file */ + l := f.MustSeek(0, 2) + f.MustSeek(0, 0) + ldobj(ctxt, f, lib, l, lib.File, lib.File) + return + } + + /* + * load all the object files from the archive now. + * this gives us sequential file access and keeps us + * from needing to come back later to pick up more + * objects. it breaks the usual C archive model, but + * this is Go, not C. the common case in Go is that + * we need to load all the objects, and then we throw away + * the individual symbols that are unused. + * + * loading every object will also make it possible to + * load foreign objects not referenced by __.PKGDEF. + */ + var arhdr ArHdr + off := f.Offset() + for { + l := nextar(f, off, &arhdr) + if l == 0 { + break + } + if l < 0 { + Exitf("%s: malformed archive", lib.File) + } + off += l + + // __.PKGDEF isn't a real Go object file, and it's + // absent in -linkobj builds anyway. Skipping it + // ensures consistency between -linkobj and normal + // build modes. + if arhdr.name == pkgdef { + continue + } + + if arhdr.name == "dynimportfail" { + dynimportfail = append(dynimportfail, lib.Pkg) + } + if arhdr.name == "preferlinkext" { + // Ignore this directive if -linkmode has been + // set explicitly. + if ctxt.LinkMode == LinkAuto { + preferlinkext = append(preferlinkext, lib.Pkg) + } + } + + // Skip other special (non-object-file) sections that + // build tools may have added. Such sections must have + // short names so that the suffix is not truncated. + if len(arhdr.name) < 16 { + if ext := filepath.Ext(arhdr.name); ext != ".o" && ext != ".syso" { + continue + } + } + + pname := fmt.Sprintf("%s(%s)", lib.File, arhdr.name) + l = atolwhex(arhdr.size) + ldobj(ctxt, f, lib, l, pname, lib.File) + } +} + +type Hostobj struct { + ld func(*Link, *bio.Reader, string, int64, string) + pkg string + pn string + file string + off int64 + length int64 +} + +var hostobj []Hostobj + +// These packages can use internal linking mode. +// Others trigger external mode. +var internalpkg = []string{ + "crypto/internal/boring", + "crypto/internal/boring/syso", + "crypto/x509", + "net", + "os/user", + "runtime/cgo", + "runtime/race", + "runtime/race/internal/amd64v1", + "runtime/race/internal/amd64v3", + "runtime/msan", + "runtime/asan", +} + +func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), headType objabi.HeadType, f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj { + isinternal := false + for _, intpkg := range internalpkg { + if pkg == intpkg { + isinternal = true + break + } + } + + // DragonFly declares errno with __thread, which results in a symbol + // type of R_386_TLS_GD or R_X86_64_TLSGD. The Go linker does not + // currently know how to handle TLS relocations, hence we have to + // force external linking for any libraries that link in code that + // uses errno. This can be removed if the Go linker ever supports + // these relocation types. + if headType == objabi.Hdragonfly { + if pkg == "net" || pkg == "os/user" { + isinternal = false + } + } + + if !isinternal { + externalobj = true + } + + hostobj = append(hostobj, Hostobj{}) + h := &hostobj[len(hostobj)-1] + h.ld = ld + h.pkg = pkg + h.pn = pn + h.file = file + h.off = f.Offset() + h.length = length + return h +} + +func hostobjs(ctxt *Link) { + if ctxt.LinkMode != LinkInternal { + return + } + var h *Hostobj + + for i := 0; i < len(hostobj); i++ { + h = &hostobj[i] + f, err := bio.Open(h.file) + if err != nil { + Exitf("cannot reopen %s: %v", h.pn, err) + } + f.MustSeek(h.off, 0) + if h.ld == nil { + Errorf(nil, "%s: unrecognized object file format", h.pn) + continue + } + h.ld(ctxt, f, h.pkg, h.length, h.pn) + if *flagCaptureHostObjs != "" { + captureHostObj(h) + } + f.Close() + } +} + +func hostlinksetup(ctxt *Link) { + if ctxt.LinkMode != LinkExternal { + return + } + + // For external link, record that we need to tell the external linker -s, + // and turn off -s internally: the external linker needs the symbol + // information for its final link. + debug_s = *FlagS + *FlagS = false + + // create temporary directory and arrange cleanup + if *flagTmpdir == "" { + dir, err := os.MkdirTemp("", "go-link-") + if err != nil { + log.Fatal(err) + } + *flagTmpdir = dir + ownTmpDir = true + AtExit(func() { + os.RemoveAll(*flagTmpdir) + }) + } + + // change our output to temporary object file + if err := ctxt.Out.Close(); err != nil { + Exitf("error closing output file") + } + mayberemoveoutfile() + + p := filepath.Join(*flagTmpdir, "go.o") + if err := ctxt.Out.Open(p); err != nil { + Exitf("cannot create %s: %v", p, err) + } +} + +// hostobjCopy creates a copy of the object files in hostobj in a +// temporary directory. +func (ctxt *Link) hostobjCopy() (paths []string) { + var wg sync.WaitGroup + sema := make(chan struct{}, runtime.NumCPU()) // limit open file descriptors + for i, h := range hostobj { + h := h + dst := filepath.Join(*flagTmpdir, fmt.Sprintf("%06d.o", i)) + paths = append(paths, dst) + if ctxt.Debugvlog != 0 { + ctxt.Logf("host obj copy: %s from pkg %s -> %s\n", h.pn, h.pkg, dst) + } + + wg.Add(1) + go func() { + sema <- struct{}{} + defer func() { + <-sema + wg.Done() + }() + f, err := os.Open(h.file) + if err != nil { + Exitf("cannot reopen %s: %v", h.pn, err) + } + defer f.Close() + if _, err := f.Seek(h.off, 0); err != nil { + Exitf("cannot seek %s: %v", h.pn, err) + } + + w, err := os.Create(dst) + if err != nil { + Exitf("cannot create %s: %v", dst, err) + } + if _, err := io.CopyN(w, f, h.length); err != nil { + Exitf("cannot write %s: %v", dst, err) + } + if err := w.Close(); err != nil { + Exitf("cannot close %s: %v", dst, err) + } + }() + } + wg.Wait() + return paths +} + +// writeGDBLinkerScript creates gcc linker script file in temp +// directory. writeGDBLinkerScript returns created file path. +// The script is used to work around gcc bug +// (see https://golang.org/issue/20183 for details). +func writeGDBLinkerScript() string { + name := "fix_debug_gdb_scripts.ld" + path := filepath.Join(*flagTmpdir, name) + src := `SECTIONS +{ + .debug_gdb_scripts BLOCK(__section_alignment__) (NOLOAD) : + { + *(.debug_gdb_scripts) + } +} +INSERT AFTER .debug_types; +` + err := os.WriteFile(path, []byte(src), 0666) + if err != nil { + Errorf(nil, "WriteFile %s failed: %v", name, err) + } + return path +} + +// archive builds a .a archive from the hostobj object files. +func (ctxt *Link) archive() { + if ctxt.BuildMode != BuildModeCArchive { + return + } + + exitIfErrors() + + if *flagExtar == "" { + *flagExtar = "ar" + } + + mayberemoveoutfile() + + // Force the buffer to flush here so that external + // tools will see a complete file. + if err := ctxt.Out.Close(); err != nil { + Exitf("error closing %v", *flagOutfile) + } + + argv := []string{*flagExtar, "-q", "-c", "-s"} + if ctxt.HeadType == objabi.Haix { + argv = append(argv, "-X64") + } + argv = append(argv, *flagOutfile) + argv = append(argv, filepath.Join(*flagTmpdir, "go.o")) + argv = append(argv, ctxt.hostobjCopy()...) + + if ctxt.Debugvlog != 0 { + ctxt.Logf("archive: %s\n", strings.Join(argv, " ")) + } + + // If supported, use syscall.Exec() to invoke the archive command, + // which should be the final remaining step needed for the link. + // This will reduce peak RSS for the link (and speed up linking of + // large applications), since when the archive command runs we + // won't be holding onto all of the linker's live memory. + if syscallExecSupported && !ownTmpDir { + runAtExitFuncs() + ctxt.execArchive(argv) + panic("should not get here") + } + + // Otherwise invoke 'ar' in the usual way (fork + exec). + if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil { + Exitf("running %s failed: %v\n%s", argv[0], err, out) + } +} + +func (ctxt *Link) hostlink() { + if ctxt.LinkMode != LinkExternal || nerrors > 0 { + return + } + if ctxt.BuildMode == BuildModeCArchive { + return + } + + var argv []string + argv = append(argv, ctxt.extld()...) + argv = append(argv, hostlinkArchArgs(ctxt.Arch)...) + + if *FlagS || debug_s { + if ctxt.HeadType == objabi.Hdarwin { + // Recent versions of macOS print + // ld: warning: option -s is obsolete and being ignored + // so do not pass any arguments (but we strip symbols below). + } else { + argv = append(argv, "-s") + } + } + + // On darwin, whether to combine DWARF into executable. + // Only macOS supports unmapped segments such as our __DWARF segment. + combineDwarf := ctxt.IsDarwin() && !*FlagW && machoPlatform == PLATFORM_MACOS + + switch ctxt.HeadType { + case objabi.Hdarwin: + if combineDwarf { + // Leave room for DWARF combining. + // -headerpad is incompatible with -fembed-bitcode. + argv = append(argv, "-Wl,-headerpad,1144") + } + if ctxt.DynlinkingGo() && buildcfg.GOOS != "ios" { + // -flat_namespace is deprecated on iOS. + // It is useful for supporting plugins. We don't support plugins on iOS. + // -flat_namespace may cause the dynamic linker to hang at forkExec when + // resolving a lazy binding. See issue 38824. + // Force eager resolution to work around. + argv = append(argv, "-Wl,-flat_namespace", "-Wl,-bind_at_load") + } + if !combineDwarf { + argv = append(argv, "-Wl,-S") // suppress STAB (symbolic debugging) symbols + if debug_s { + // We are generating a binary with symbol table suppressed. + // Suppress local symbols. We need to keep dynamically exported + // and referenced symbols so the dynamic linker can resolve them. + argv = append(argv, "-Wl,-x") + } + } + case objabi.Hopenbsd: + argv = append(argv, "-Wl,-nopie") + argv = append(argv, "-pthread") + if ctxt.Arch.InFamily(sys.ARM64) { + // Disable execute-only on openbsd/arm64 - the Go arm64 assembler + // currently stores constants in the text section rather than in rodata. + // See issue #59615. + argv = append(argv, "-Wl,--no-execute-only") + } + case objabi.Hwindows: + if windowsgui { + argv = append(argv, "-mwindows") + } else { + argv = append(argv, "-mconsole") + } + // Mark as having awareness of terminal services, to avoid + // ancient compatibility hacks. + argv = append(argv, "-Wl,--tsaware") + + // Enable DEP + argv = append(argv, "-Wl,--nxcompat") + + argv = append(argv, fmt.Sprintf("-Wl,--major-os-version=%d", PeMinimumTargetMajorVersion)) + argv = append(argv, fmt.Sprintf("-Wl,--minor-os-version=%d", PeMinimumTargetMinorVersion)) + argv = append(argv, fmt.Sprintf("-Wl,--major-subsystem-version=%d", PeMinimumTargetMajorVersion)) + argv = append(argv, fmt.Sprintf("-Wl,--minor-subsystem-version=%d", PeMinimumTargetMinorVersion)) + case objabi.Haix: + argv = append(argv, "-pthread") + // prevent ld to reorder .text functions to keep the same + // first/last functions for moduledata. + argv = append(argv, "-Wl,-bnoobjreorder") + // mcmodel=large is needed for every gcc generated files, but + // ld still need -bbigtoc in order to allow larger TOC. + argv = append(argv, "-mcmodel=large") + argv = append(argv, "-Wl,-bbigtoc") + } + + // On PPC64, verify the external toolchain supports Power10. This is needed when + // PC relative relocations might be generated by Go. Only targets compiling ELF + // binaries might generate these relocations. + if ctxt.IsPPC64() && ctxt.IsElf() && buildcfg.GOPPC64 >= 10 { + if !linkerFlagSupported(ctxt.Arch, argv[0], "", "-mcpu=power10") { + Exitf("The external toolchain does not support -mcpu=power10. " + + " This is required to externally link GOPPC64 >= power10") + } + } + + // Enable/disable ASLR on Windows. + addASLRargs := func(argv []string, val bool) []string { + // Old/ancient versions of GCC support "--dynamicbase" and + // "--high-entropy-va" but don't enable it by default. In + // addition, they don't accept "--disable-dynamicbase" or + // "--no-dynamicbase", so the only way to disable ASLR is to + // not pass any flags at all. + // + // More modern versions of GCC (and also clang) enable ASLR + // by default. With these compilers, however you can turn it + // off if you want using "--disable-dynamicbase" or + // "--no-dynamicbase". + // + // The strategy below is to try using "--disable-dynamicbase"; + // if this succeeds, then assume we're working with more + // modern compilers and act accordingly. If it fails, assume + // an ancient compiler with ancient defaults. + var dbopt string + var heopt string + dbon := "--dynamicbase" + heon := "--high-entropy-va" + dboff := "--disable-dynamicbase" + heoff := "--disable-high-entropy-va" + if val { + dbopt = dbon + heopt = heon + } else { + // Test to see whether "--disable-dynamicbase" works. + newer := linkerFlagSupported(ctxt.Arch, argv[0], "", "-Wl,"+dboff) + if newer { + // Newer compiler, which supports both on/off options. + dbopt = dboff + heopt = heoff + } else { + // older toolchain: we have to say nothing in order to + // get a no-ASLR binary. + dbopt = "" + heopt = "" + } + } + if dbopt != "" { + argv = append(argv, "-Wl,"+dbopt) + } + // enable high-entropy ASLR on 64-bit. + if ctxt.Arch.PtrSize >= 8 && heopt != "" { + argv = append(argv, "-Wl,"+heopt) + } + return argv + } + + switch ctxt.BuildMode { + case BuildModeExe: + if ctxt.HeadType == objabi.Hdarwin { + if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { + argv = append(argv, "-Wl,-no_pie") + } + } + if *flagRace && ctxt.HeadType == objabi.Hwindows { + // Current windows/amd64 race detector tsan support + // library can't handle PIE mode (see #53539 for more details). + // For now, explicitly disable PIE (since some compilers + // default to it) if -race is in effect. + argv = addASLRargs(argv, false) + } + case BuildModePIE: + switch ctxt.HeadType { + case objabi.Hdarwin, objabi.Haix: + case objabi.Hwindows: + if *flagAslr && *flagRace { + // Current windows/amd64 race detector tsan support + // library can't handle PIE mode (see #53539 for more details). + // Disable alsr if -race in effect. + *flagAslr = false + } + argv = addASLRargs(argv, *flagAslr) + default: + // ELF. + if ctxt.UseRelro() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-pie") + } + case BuildModeCShared: + if ctxt.HeadType == objabi.Hdarwin { + argv = append(argv, "-dynamiclib") + } else { + if ctxt.UseRelro() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") + if ctxt.HeadType == objabi.Hwindows { + argv = addASLRargs(argv, *flagAslr) + } else { + // Pass -z nodelete to mark the shared library as + // non-closeable: a dlclose will do nothing. + argv = append(argv, "-Wl,-z,nodelete") + // Only pass Bsymbolic on non-Windows. + argv = append(argv, "-Wl,-Bsymbolic") + } + } + case BuildModeShared: + if ctxt.UseRelro() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") + case BuildModePlugin: + if ctxt.HeadType == objabi.Hdarwin { + argv = append(argv, "-dynamiclib") + } else { + if ctxt.UseRelro() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") + } + } + + var altLinker string + if ctxt.IsELF && ctxt.DynlinkingGo() { + // We force all symbol resolution to be done at program startup + // because lazy PLT resolution can use large amounts of stack at + // times we cannot allow it to do so. + argv = append(argv, "-Wl,-z,now") + + // Do not let the host linker generate COPY relocations. These + // can move symbols out of sections that rely on stable offsets + // from the beginning of the section (like sym.STYPE). + argv = append(argv, "-Wl,-z,nocopyreloc") + + if buildcfg.GOOS == "android" { + // Use lld to avoid errors from default linker (issue #38838) + altLinker = "lld" + } + + if ctxt.Arch.InFamily(sys.ARM64) && buildcfg.GOOS == "linux" { + // On ARM64, the GNU linker will fail with + // -znocopyreloc if it thinks a COPY relocation is + // required. Switch to gold. + // https://sourceware.org/bugzilla/show_bug.cgi?id=19962 + // https://go.dev/issue/22040 + altLinker = "gold" + + // If gold is not installed, gcc will silently switch + // back to ld.bfd. So we parse the version information + // and provide a useful error if gold is missing. + name, args := flagExtld[0], flagExtld[1:] + args = append(args, "-fuse-ld=gold", "-Wl,--version") + cmd := exec.Command(name, args...) + if out, err := cmd.CombinedOutput(); err == nil { + if !bytes.Contains(out, []byte("GNU gold")) { + log.Fatalf("ARM64 external linker must be gold (issue #15696, 22040), but is not: %s", out) + } + } + } + } + if ctxt.Arch.Family == sys.ARM64 && buildcfg.GOOS == "freebsd" { + // Switch to ld.bfd on freebsd/arm64. + altLinker = "bfd" + + // Provide a useful error if ld.bfd is missing. + name, args := flagExtld[0], flagExtld[1:] + args = append(args, "-fuse-ld=bfd", "-Wl,--version") + cmd := exec.Command(name, args...) + if out, err := cmd.CombinedOutput(); err == nil { + if !bytes.Contains(out, []byte("GNU ld")) { + log.Fatalf("ARM64 external linker must be ld.bfd (issue #35197), please install devel/binutils") + } + } + } + if altLinker != "" { + argv = append(argv, "-fuse-ld="+altLinker) + } + + if ctxt.IsELF && len(buildinfo) > 0 { + argv = append(argv, fmt.Sprintf("-Wl,--build-id=0x%x", buildinfo)) + } + + // On Windows, given -o foo, GCC will append ".exe" to produce + // "foo.exe". We have decided that we want to honor the -o + // option. To make this work, we append a '.' so that GCC + // will decide that the file already has an extension. We + // only want to do this when producing a Windows output file + // on a Windows host. + outopt := *flagOutfile + if buildcfg.GOOS == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" { + outopt += "." + } + argv = append(argv, "-o") + argv = append(argv, outopt) + + if rpath.val != "" { + argv = append(argv, fmt.Sprintf("-Wl,-rpath,%s", rpath.val)) + } + + if *flagInterpreter != "" { + // Many linkers support both -I and the --dynamic-linker flags + // to set the ELF interpreter, but lld only supports + // --dynamic-linker so prefer that (ld on very old Solaris only + // supports -I but that seems less important). + argv = append(argv, fmt.Sprintf("-Wl,--dynamic-linker,%s", *flagInterpreter)) + } + + // Force global symbols to be exported for dlopen, etc. + if ctxt.IsELF { + if ctxt.DynlinkingGo() || ctxt.BuildMode == BuildModeCShared || !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "-Wl,--export-dynamic-symbol=main") { + argv = append(argv, "-rdynamic") + } else { + var exports []string + ctxt.loader.ForAllCgoExportDynamic(func(s loader.Sym) { + exports = append(exports, "-Wl,--export-dynamic-symbol="+ctxt.loader.SymExtname(s)) + }) + sort.Strings(exports) + argv = append(argv, exports...) + } + } + if ctxt.HeadType == objabi.Haix { + fileName := xcoffCreateExportFile(ctxt) + argv = append(argv, "-Wl,-bE:"+fileName) + } + + const unusedArguments = "-Qunused-arguments" + if linkerFlagSupported(ctxt.Arch, argv[0], altLinker, unusedArguments) { + argv = append(argv, unusedArguments) + } + + if ctxt.IsWindows() { + // Suppress generation of the PE file header timestamp, + // so as to avoid spurious build ID differences between + // linked binaries that are otherwise identical other than + // the date/time they were linked. + const noTimeStamp = "-Wl,--no-insert-timestamp" + if linkerFlagSupported(ctxt.Arch, argv[0], altLinker, noTimeStamp) { + argv = append(argv, noTimeStamp) + } + } + + const compressDWARF = "-Wl,--compress-debug-sections=zlib" + if ctxt.compressDWARF && linkerFlagSupported(ctxt.Arch, argv[0], altLinker, compressDWARF) { + argv = append(argv, compressDWARF) + } + + argv = append(argv, filepath.Join(*flagTmpdir, "go.o")) + argv = append(argv, ctxt.hostobjCopy()...) + if ctxt.HeadType == objabi.Haix { + // We want to have C files after Go files to remove + // trampolines csects made by ld. + argv = append(argv, "-nostartfiles") + argv = append(argv, "/lib/crt0_64.o") + + extld := ctxt.extld() + name, args := extld[0], extld[1:] + // Get starting files. + getPathFile := func(file string) string { + args := append(args, "-maix64", "--print-file-name="+file) + out, err := exec.Command(name, args...).CombinedOutput() + if err != nil { + log.Fatalf("running %s failed: %v\n%s", extld, err, out) + } + return strings.Trim(string(out), "\n") + } + // Since GCC version 11, the 64-bit version of GCC starting files + // are now suffixed by "_64". Even under "-maix64" multilib directory + // "crtcxa.o" is 32-bit. + crtcxa := getPathFile("crtcxa_64.o") + if !filepath.IsAbs(crtcxa) { + crtcxa = getPathFile("crtcxa.o") + } + crtdbase := getPathFile("crtdbase_64.o") + if !filepath.IsAbs(crtdbase) { + crtdbase = getPathFile("crtdbase.o") + } + argv = append(argv, crtcxa) + argv = append(argv, crtdbase) + } + + if ctxt.linkShared { + seenDirs := make(map[string]bool) + seenLibs := make(map[string]bool) + addshlib := func(path string) { + dir, base := filepath.Split(path) + if !seenDirs[dir] { + argv = append(argv, "-L"+dir) + if !rpath.set { + argv = append(argv, "-Wl,-rpath="+dir) + } + seenDirs[dir] = true + } + base = strings.TrimSuffix(base, ".so") + base = strings.TrimPrefix(base, "lib") + if !seenLibs[base] { + argv = append(argv, "-l"+base) + seenLibs[base] = true + } + } + for _, shlib := range ctxt.Shlibs { + addshlib(shlib.Path) + for _, dep := range shlib.Deps { + if dep == "" { + continue + } + libpath := findshlib(ctxt, dep) + if libpath != "" { + addshlib(libpath) + } + } + } + } + + // clang, unlike GCC, passes -rdynamic to the linker + // even when linking with -static, causing a linker + // error when using GNU ld. So take out -rdynamic if + // we added it. We do it in this order, rather than + // only adding -rdynamic later, so that -extldflags + // can override -rdynamic without using -static. + // Similarly for -Wl,--dynamic-linker. + checkStatic := func(arg string) { + if ctxt.IsELF && arg == "-static" { + for i := range argv { + if argv[i] == "-rdynamic" || strings.HasPrefix(argv[i], "-Wl,--dynamic-linker,") { + argv[i] = "-static" + } + } + } + } + + for _, p := range ldflag { + argv = append(argv, p) + checkStatic(p) + } + + // When building a program with the default -buildmode=exe the + // gc compiler generates code requires DT_TEXTREL in a + // position independent executable (PIE). On systems where the + // toolchain creates PIEs by default, and where DT_TEXTREL + // does not work, the resulting programs will not run. See + // issue #17847. To avoid this problem pass -no-pie to the + // toolchain if it is supported. + if ctxt.BuildMode == BuildModeExe && !ctxt.linkShared && !(ctxt.IsDarwin() && ctxt.IsARM64()) { + // GCC uses -no-pie, clang uses -nopie. + for _, nopie := range []string{"-no-pie", "-nopie"} { + if linkerFlagSupported(ctxt.Arch, argv[0], altLinker, nopie) { + argv = append(argv, nopie) + break + } + } + } + + for _, p := range flagExtldflags { + argv = append(argv, p) + checkStatic(p) + } + if ctxt.HeadType == objabi.Hwindows { + // Determine which linker we're using. Add in the extldflags in + // case used has specified "-fuse-ld=...". + extld := ctxt.extld() + name, args := extld[0], extld[1:] + args = append(args, trimLinkerArgv(flagExtldflags)...) + args = append(args, "-Wl,--version") + cmd := exec.Command(name, args...) + usingLLD := false + if out, err := cmd.CombinedOutput(); err == nil { + if bytes.Contains(out, []byte("LLD ")) { + usingLLD = true + } + } + + // use gcc linker script to work around gcc bug + // (see https://golang.org/issue/20183 for details). + if !usingLLD { + p := writeGDBLinkerScript() + argv = append(argv, "-Wl,-T,"+p) + } + if *flagRace { + if p := ctxt.findLibPath("libsynchronization.a"); p != "libsynchronization.a" { + argv = append(argv, "-lsynchronization") + } + } + // libmingw32 and libmingwex have some inter-dependencies, + // so must use linker groups. + argv = append(argv, "-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group") + argv = append(argv, peimporteddlls()...) + } + + argv = ctxt.passLongArgsInResponseFile(argv, altLinker) + + if ctxt.Debugvlog != 0 { + ctxt.Logf("host link:") + for _, v := range argv { + ctxt.Logf(" %q", v) + } + ctxt.Logf("\n") + } + + out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput() + if err != nil { + Exitf("running %s failed: %v\n%s", argv[0], err, out) + } + + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/go/internal/work/exec.go's gccld method. + var save [][]byte + var skipLines int + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + + if skipLines > 0 { + skipLines-- + continue + } + + // Remove TOC overflow warning on AIX. + if bytes.Contains(line, []byte("ld: 0711-783")) { + skipLines = 2 + continue + } + + save = append(save, line) + } + out = bytes.Join(save, nil) + + if len(out) > 0 { + // always print external output even if the command is successful, so that we don't + // swallow linker warnings (see https://golang.org/issue/17935). + if ctxt.IsDarwin() && ctxt.IsAMD64() { + const noPieWarning = "ld: warning: -no_pie is deprecated when targeting new OS versions\n" + if i := bytes.Index(out, []byte(noPieWarning)); i >= 0 { + // swallow -no_pie deprecation warning, issue 54482 + out = append(out[:i], out[i+len(noPieWarning):]...) + } + } + if ctxt.IsDarwin() { + const bindAtLoadWarning = "ld: warning: -bind_at_load is deprecated on macOS\n" + if i := bytes.Index(out, []byte(bindAtLoadWarning)); i >= 0 { + // -bind_at_load is deprecated with ld-prime, but needed for + // correctness with older versions of ld64. Swallow the warning. + // TODO: maybe pass -bind_at_load conditionally based on C + // linker version. + out = append(out[:i], out[i+len(bindAtLoadWarning):]...) + } + } + ctxt.Logf("%s", out) + } + + if combineDwarf { + // Find "dsymutils" and "strip" tools using CC --print-prog-name. + var cc []string + cc = append(cc, ctxt.extld()...) + cc = append(cc, hostlinkArchArgs(ctxt.Arch)...) + cc = append(cc, "--print-prog-name", "dsymutil") + out, err := exec.Command(cc[0], cc[1:]...).CombinedOutput() + if err != nil { + Exitf("%s: finding dsymutil failed: %v\n%s", os.Args[0], err, out) + } + dsymutilCmd := strings.TrimSuffix(string(out), "\n") + + cc[len(cc)-1] = "strip" + out, err = exec.Command(cc[0], cc[1:]...).CombinedOutput() + if err != nil { + Exitf("%s: finding strip failed: %v\n%s", os.Args[0], err, out) + } + stripCmd := strings.TrimSuffix(string(out), "\n") + + dsym := filepath.Join(*flagTmpdir, "go.dwarf") + cmd := exec.Command(dsymutilCmd, "-f", *flagOutfile, "-o", dsym) + // dsymutil may not clean up its temp directory at exit. + // Set DSYMUTIL_REPRODUCER_PATH to work around. see issue 59026. + cmd.Env = append(os.Environ(), "DSYMUTIL_REPRODUCER_PATH="+*flagTmpdir) + if ctxt.Debugvlog != 0 { + ctxt.Logf("host link dsymutil:") + for _, v := range cmd.Args { + ctxt.Logf(" %q", v) + } + ctxt.Logf("\n") + } + if out, err := cmd.CombinedOutput(); err != nil { + Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out) + } + // Remove STAB (symbolic debugging) symbols after we are done with them (by dsymutil). + // They contain temporary file paths and make the build not reproducible. + var stripArgs = []string{"-S"} + if debug_s { + // We are generating a binary with symbol table suppressed. + // Suppress local symbols. We need to keep dynamically exported + // and referenced symbols so the dynamic linker can resolve them. + stripArgs = append(stripArgs, "-x") + } + stripArgs = append(stripArgs, *flagOutfile) + if ctxt.Debugvlog != 0 { + ctxt.Logf("host link strip: %q", stripCmd) + for _, v := range stripArgs { + ctxt.Logf(" %q", v) + } + ctxt.Logf("\n") + } + if out, err := exec.Command(stripCmd, stripArgs...).CombinedOutput(); err != nil { + Exitf("%s: running strip failed: %v\n%s", os.Args[0], err, out) + } + // Skip combining if `dsymutil` didn't generate a file. See #11994. + if _, err := os.Stat(dsym); os.IsNotExist(err) { + return + } + // For os.Rename to work reliably, must be in same directory as outfile. + combinedOutput := *flagOutfile + "~" + exef, err := os.Open(*flagOutfile) + if err != nil { + Exitf("%s: combining dwarf failed: %v", os.Args[0], err) + } + defer exef.Close() + exem, err := macho.NewFile(exef) + if err != nil { + Exitf("%s: parsing Mach-O header failed: %v", os.Args[0], err) + } + if err := machoCombineDwarf(ctxt, exef, exem, dsym, combinedOutput); err != nil { + Exitf("%s: combining dwarf failed: %v", os.Args[0], err) + } + os.Remove(*flagOutfile) + if err := os.Rename(combinedOutput, *flagOutfile); err != nil { + Exitf("%s: %v", os.Args[0], err) + } + } + if ctxt.NeedCodeSign() { + err := machoCodeSign(ctxt, *flagOutfile) + if err != nil { + Exitf("%s: code signing failed: %v", os.Args[0], err) + } + } +} + +// passLongArgsInResponseFile writes the arguments into a file if they +// are very long. +func (ctxt *Link) passLongArgsInResponseFile(argv []string, altLinker string) []string { + c := 0 + for _, arg := range argv { + c += len(arg) + } + + if c < sys.ExecArgLengthLimit { + return argv + } + + // Only use response files if they are supported. + response := filepath.Join(*flagTmpdir, "response") + if err := os.WriteFile(response, nil, 0644); err != nil { + log.Fatalf("failed while testing response file: %v", err) + } + if !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "@"+response) { + if ctxt.Debugvlog != 0 { + ctxt.Logf("not using response file because linker does not support one") + } + return argv + } + + var buf bytes.Buffer + for _, arg := range argv[1:] { + // The external linker response file supports quoted strings. + fmt.Fprintf(&buf, "%q\n", arg) + } + if err := os.WriteFile(response, buf.Bytes(), 0644); err != nil { + log.Fatalf("failed while writing response file: %v", err) + } + if ctxt.Debugvlog != 0 { + ctxt.Logf("response file %s contents:\n%s", response, buf.Bytes()) + } + return []string{ + argv[0], + "@" + response, + } +} + +var createTrivialCOnce sync.Once + +func linkerFlagSupported(arch *sys.Arch, linker, altLinker, flag string) bool { + createTrivialCOnce.Do(func() { + src := filepath.Join(*flagTmpdir, "trivial.c") + if err := os.WriteFile(src, []byte("int main() { return 0; }"), 0666); err != nil { + Errorf(nil, "WriteFile trivial.c failed: %v", err) + } + }) + + flags := hostlinkArchArgs(arch) + + moreFlags := trimLinkerArgv(append(flagExtldflags, ldflag...)) + flags = append(flags, moreFlags...) + + if altLinker != "" { + flags = append(flags, "-fuse-ld="+altLinker) + } + trivialPath := filepath.Join(*flagTmpdir, "trivial.c") + outPath := filepath.Join(*flagTmpdir, "a.out") + flags = append(flags, "-o", outPath, flag, trivialPath) + + cmd := exec.Command(linker, flags...) + cmd.Env = append([]string{"LC_ALL=C"}, os.Environ()...) + out, err := cmd.CombinedOutput() + // GCC says "unrecognized command line option ‘-no-pie’" + // clang says "unknown argument: '-no-pie'" + return err == nil && !bytes.Contains(out, []byte("unrecognized")) && !bytes.Contains(out, []byte("unknown")) +} + +// trimLinkerArgv returns a new copy of argv that does not include flags +// that are not relevant for testing whether some linker option works. +func trimLinkerArgv(argv []string) []string { + flagsWithNextArgSkip := []string{ + "-F", + "-l", + "-L", + "-framework", + "-Wl,-framework", + "-Wl,-rpath", + "-Wl,-undefined", + } + flagsWithNextArgKeep := []string{ + "-arch", + "-isysroot", + "--sysroot", + "-target", + } + prefixesToKeep := []string{ + "-f", + "-m", + "-p", + "-Wl,", + "-arch", + "-isysroot", + "--sysroot", + "-target", + } + + var flags []string + keep := false + skip := false + for _, f := range argv { + if keep { + flags = append(flags, f) + keep = false + } else if skip { + skip = false + } else if f == "" || f[0] != '-' { + } else if contains(flagsWithNextArgSkip, f) { + skip = true + } else if contains(flagsWithNextArgKeep, f) { + flags = append(flags, f) + keep = true + } else { + for _, p := range prefixesToKeep { + if strings.HasPrefix(f, p) { + flags = append(flags, f) + break + } + } + } + } + return flags +} + +// hostlinkArchArgs returns arguments to pass to the external linker +// based on the architecture. +func hostlinkArchArgs(arch *sys.Arch) []string { + switch arch.Family { + case sys.I386: + return []string{"-m32"} + case sys.AMD64: + if buildcfg.GOOS == "darwin" { + return []string{"-arch", "x86_64", "-m64"} + } + return []string{"-m64"} + case sys.S390X: + return []string{"-m64"} + case sys.ARM: + return []string{"-marm"} + case sys.ARM64: + if buildcfg.GOOS == "darwin" { + return []string{"-arch", "arm64"} + } + case sys.Loong64: + return []string{"-mabi=lp64d"} + case sys.MIPS64: + return []string{"-mabi=64"} + case sys.MIPS: + return []string{"-mabi=32"} + case sys.PPC64: + if buildcfg.GOOS == "aix" { + return []string{"-maix64"} + } else { + return []string{"-m64"} + } + + } + return nil +} + +var wantHdr = objabi.HeaderString() + +// ldobj loads an input object. If it is a host object (an object +// compiled by a non-Go compiler) it returns the Hostobj pointer. If +// it is a Go object, it returns nil. +func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, file string) *Hostobj { + pkg := objabi.PathToPrefix(lib.Pkg) + + eof := f.Offset() + length + start := f.Offset() + c1 := bgetc(f) + c2 := bgetc(f) + c3 := bgetc(f) + c4 := bgetc(f) + f.MustSeek(start, 0) + + unit := &sym.CompilationUnit{Lib: lib} + lib.Units = append(lib.Units, unit) + + magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4) + if magic == 0x7f454c46 { // \x7F E L F + ldelf := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, flags, err := loadelf.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn, ehdr.Flags) + if err != nil { + Errorf(nil, "%v", err) + return + } + ehdr.Flags = flags + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldelf, ctxt.HeadType, f, pkg, length, pn, file) + } + + if magic&^1 == 0xfeedface || magic&^0x01000000 == 0xcefaedfe { + ldmacho := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, err := loadmacho.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldmacho, ctxt.HeadType, f, pkg, length, pn, file) + } + + switch c1<<8 | c2 { + case 0x4c01, // 386 + 0x6486, // amd64 + 0xc401, // arm + 0x64aa: // arm64 + ldpe := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + ls, err := loadpe.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + if len(ls.Resources) != 0 { + setpersrc(ctxt, ls.Resources) + } + if ls.PData != 0 { + sehp.pdata = append(sehp.pdata, ls.PData) + } + if ls.XData != 0 { + sehp.xdata = append(sehp.xdata, ls.XData) + } + ctxt.Textp = append(ctxt.Textp, ls.Textp...) + } + return ldhostobj(ldpe, ctxt.HeadType, f, pkg, length, pn, file) + } + + if c1 == 0x01 && (c2 == 0xD7 || c2 == 0xF7) { + ldxcoff := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, err := loadxcoff.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldxcoff, ctxt.HeadType, f, pkg, length, pn, file) + } + + if c1 != 'g' || c2 != 'o' || c3 != ' ' || c4 != 'o' { + // An unrecognized object is just passed to the external linker. + // If we try to read symbols from this object, we will + // report an error at that time. + unknownObjFormat = true + return ldhostobj(nil, ctxt.HeadType, f, pkg, length, pn, file) + } + + /* check the header */ + line, err := f.ReadString('\n') + if err != nil { + Errorf(nil, "truncated object file: %s: %v", pn, err) + return nil + } + + if !strings.HasPrefix(line, "go object ") { + if strings.HasSuffix(pn, ".go") { + Exitf("%s: uncompiled .go source file", pn) + return nil + } + + if line == ctxt.Arch.Name { + // old header format: just $GOOS + Errorf(nil, "%s: stale object file", pn) + return nil + } + + Errorf(nil, "%s: not an object file: @%d %q", pn, start, line) + return nil + } + + // First, check that the basic GOOS, GOARCH, and Version match. + if line != wantHdr { + Errorf(nil, "%s: linked object header mismatch:\nhave %q\nwant %q\n", pn, line, wantHdr) + } + + // Skip over exports and other info -- ends with \n!\n. + // + // Note: It's possible for "\n!\n" to appear within the binary + // package export data format. To avoid truncating the package + // definition prematurely (issue 21703), we keep track of + // how many "$$" delimiters we've seen. + + import0 := f.Offset() + + c1 = '\n' // the last line ended in \n + c2 = bgetc(f) + c3 = bgetc(f) + markers := 0 + for { + if c1 == '\n' { + if markers%2 == 0 && c2 == '!' && c3 == '\n' { + break + } + if c2 == '$' && c3 == '$' { + markers++ + } + } + + c1 = c2 + c2 = c3 + c3 = bgetc(f) + if c3 == -1 { + Errorf(nil, "truncated object file: %s", pn) + return nil + } + } + + import1 := f.Offset() + + f.MustSeek(import0, 0) + ldpkg(ctxt, f, lib, import1-import0-2, pn) // -2 for !\n + f.MustSeek(import1, 0) + + fingerprint := ctxt.loader.Preload(ctxt.IncVersion(), f, lib, unit, eof-f.Offset()) + if !fingerprint.IsZero() { // Assembly objects don't have fingerprints. Ignore them. + // Check fingerprint, to ensure the importing and imported packages + // have consistent view of symbol indices. + // Normally the go command should ensure this. But in case something + // goes wrong, it could lead to obscure bugs like run-time crash. + // Check it here to be sure. + if lib.Fingerprint.IsZero() { // Not yet imported. Update its fingerprint. + lib.Fingerprint = fingerprint + } + checkFingerprint(lib, fingerprint, lib.Srcref, lib.Fingerprint) + } + + addImports(ctxt, lib, pn) + return nil +} + +// symbolsAreUnresolved scans through the loader's list of unresolved +// symbols and checks to see whether any of them match the names of the +// symbols in 'want'. Return value is a list of bools, with list[K] set +// to true if there is an unresolved reference to the symbol in want[K]. +func symbolsAreUnresolved(ctxt *Link, want []string) []bool { + returnAllUndefs := -1 + undefs, _ := ctxt.loader.UndefinedRelocTargets(returnAllUndefs) + seen := make(map[loader.Sym]struct{}) + rval := make([]bool, len(want)) + wantm := make(map[string]int) + for k, w := range want { + wantm[w] = k + } + count := 0 + for _, s := range undefs { + if _, ok := seen[s]; ok { + continue + } + seen[s] = struct{}{} + if k, ok := wantm[ctxt.loader.SymName(s)]; ok { + rval[k] = true + count++ + if count == len(want) { + return rval + } + } + } + return rval +} + +// hostObject reads a single host object file (compare to "hostArchive"). +// This is used as part of internal linking when we need to pull in +// files such as "crt?.o". +func hostObject(ctxt *Link, objname string, path string) { + if ctxt.Debugvlog > 1 { + ctxt.Logf("hostObject(%s)\n", path) + } + objlib := sym.Library{ + Pkg: objname, + } + f, err := bio.Open(path) + if err != nil { + Exitf("cannot open host object %q file %s: %v", objname, path, err) + } + defer f.Close() + h := ldobj(ctxt, f, &objlib, 0, path, path) + if h.ld == nil { + Exitf("unrecognized object file format in %s", path) + } + h.file = path + h.length = f.MustSeek(0, 2) + f.MustSeek(h.off, 0) + h.ld(ctxt, f, h.pkg, h.length, h.pn) + if *flagCaptureHostObjs != "" { + captureHostObj(h) + } +} + +func checkFingerprint(lib *sym.Library, libfp goobj.FingerprintType, src string, srcfp goobj.FingerprintType) { + if libfp != srcfp { + Exitf("fingerprint mismatch: %s has %x, import from %s expecting %x", lib, libfp, src, srcfp) + } +} + +func readelfsymboldata(ctxt *Link, f *elf.File, sym *elf.Symbol) []byte { + data := make([]byte, sym.Size) + sect := f.Sections[sym.Section] + if sect.Type != elf.SHT_PROGBITS && sect.Type != elf.SHT_NOTE { + Errorf(nil, "reading %s from non-data section", sym.Name) + } + n, err := sect.ReadAt(data, int64(sym.Value-sect.Addr)) + if uint64(n) != sym.Size { + Errorf(nil, "reading contents of %s: %v", sym.Name, err) + } + return data +} + +func readwithpad(r io.Reader, sz int32) ([]byte, error) { + data := make([]byte, Rnd(int64(sz), 4)) + _, err := io.ReadFull(r, data) + if err != nil { + return nil, err + } + data = data[:sz] + return data, nil +} + +func readnote(f *elf.File, name []byte, typ int32) ([]byte, error) { + for _, sect := range f.Sections { + if sect.Type != elf.SHT_NOTE { + continue + } + r := sect.Open() + for { + var namesize, descsize, noteType int32 + err := binary.Read(r, f.ByteOrder, &namesize) + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("read namesize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, &descsize) + if err != nil { + return nil, fmt.Errorf("read descsize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, ¬eType) + if err != nil { + return nil, fmt.Errorf("read type failed: %v", err) + } + noteName, err := readwithpad(r, namesize) + if err != nil { + return nil, fmt.Errorf("read name failed: %v", err) + } + desc, err := readwithpad(r, descsize) + if err != nil { + return nil, fmt.Errorf("read desc failed: %v", err) + } + if string(name) == string(noteName) && typ == noteType { + return desc, nil + } + } + } + return nil, nil +} + +func findshlib(ctxt *Link, shlib string) string { + if filepath.IsAbs(shlib) { + return shlib + } + for _, libdir := range ctxt.Libdir { + libpath := filepath.Join(libdir, shlib) + if _, err := os.Stat(libpath); err == nil { + return libpath + } + } + Errorf(nil, "cannot find shared library: %s", shlib) + return "" +} + +func ldshlibsyms(ctxt *Link, shlib string) { + var libpath string + if filepath.IsAbs(shlib) { + libpath = shlib + shlib = filepath.Base(shlib) + } else { + libpath = findshlib(ctxt, shlib) + if libpath == "" { + return + } + } + for _, processedlib := range ctxt.Shlibs { + if processedlib.Path == libpath { + return + } + } + if ctxt.Debugvlog > 1 { + ctxt.Logf("ldshlibsyms: found library with name %s at %s\n", shlib, libpath) + } + + f, err := elf.Open(libpath) + if err != nil { + Errorf(nil, "cannot open shared library: %s", libpath) + return + } + // Keep the file open as decodetypeGcprog needs to read from it. + // TODO: fix. Maybe mmap the file. + //defer f.Close() + + hash, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GOABIHASH_TAG) + if err != nil { + Errorf(nil, "cannot read ABI hash from shared library %s: %v", libpath, err) + return + } + + depsbytes, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GODEPS_TAG) + if err != nil { + Errorf(nil, "cannot read dep list from shared library %s: %v", libpath, err) + return + } + var deps []string + for _, dep := range strings.Split(string(depsbytes), "\n") { + if dep == "" { + continue + } + if !filepath.IsAbs(dep) { + // If the dep can be interpreted as a path relative to the shlib + // in which it was found, do that. Otherwise, we will leave it + // to be resolved by libdir lookup. + abs := filepath.Join(filepath.Dir(libpath), dep) + if _, err := os.Stat(abs); err == nil { + dep = abs + } + } + deps = append(deps, dep) + } + + syms, err := f.DynamicSymbols() + if err != nil { + Errorf(nil, "cannot read symbols from shared library: %s", libpath) + return + } + + for _, elfsym := range syms { + if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION { + continue + } + + // Symbols whose names start with "type:" are compiler generated, + // so make functions with that prefix internal. + ver := 0 + symname := elfsym.Name // (unmangled) symbol name + if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && strings.HasPrefix(elfsym.Name, "type:") { + ver = abiInternalVer + } else if buildcfg.Experiment.RegabiWrappers && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC { + // Demangle the ABI name. Keep in sync with symtab.go:mangleABIName. + if strings.HasSuffix(elfsym.Name, ".abiinternal") { + ver = sym.SymVerABIInternal + symname = strings.TrimSuffix(elfsym.Name, ".abiinternal") + } else if strings.HasSuffix(elfsym.Name, ".abi0") { + ver = 0 + symname = strings.TrimSuffix(elfsym.Name, ".abi0") + } + } + + l := ctxt.loader + s := l.LookupOrCreateSym(symname, ver) + + // Because loadlib above loads all .a files before loading + // any shared libraries, any non-dynimport symbols we find + // that duplicate symbols already loaded should be ignored + // (the symbols from the .a files "win"). + if l.SymType(s) != 0 && l.SymType(s) != sym.SDYNIMPORT { + continue + } + su := l.MakeSymbolUpdater(s) + su.SetType(sym.SDYNIMPORT) + l.SetSymElfType(s, elf.ST_TYPE(elfsym.Info)) + su.SetSize(int64(elfsym.Size)) + if elfsym.Section != elf.SHN_UNDEF { + // Set .File for the library that actually defines the symbol. + l.SetSymPkg(s, libpath) + + // The decodetype_* functions in decodetype.go need access to + // the type data. + sname := l.SymName(s) + if strings.HasPrefix(sname, "type:") && !strings.HasPrefix(sname, "type:.") { + su.SetData(readelfsymboldata(ctxt, f, &elfsym)) + } + } + + if symname != elfsym.Name { + l.SetSymExtname(s, elfsym.Name) + } + } + ctxt.Shlibs = append(ctxt.Shlibs, Shlib{Path: libpath, Hash: hash, Deps: deps, File: f}) +} + +func addsection(ldr *loader.Loader, arch *sys.Arch, seg *sym.Segment, name string, rwx int) *sym.Section { + sect := ldr.NewSection() + sect.Rwx = uint8(rwx) + sect.Name = name + sect.Seg = seg + sect.Align = int32(arch.PtrSize) // everything is at least pointer-aligned + seg.Sections = append(seg.Sections, sect) + return sect +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n") + objabi.Flagprint(os.Stderr) + Exit(2) +} + +type SymbolType int8 // TODO: after genasmsym is gone, maybe rename to plan9typeChar or something + +const ( + // see also https://9p.io/magic/man2html/1/nm + TextSym SymbolType = 'T' + DataSym SymbolType = 'D' + BSSSym SymbolType = 'B' + UndefinedSym SymbolType = 'U' + TLSSym SymbolType = 't' + FrameSym SymbolType = 'm' + ParamSym SymbolType = 'p' + AutoSym SymbolType = 'a' + + // Deleted auto (not a real sym, just placeholder for type) + DeletedAutoSym = 'x' +) + +// defineInternal defines a symbol used internally by the go runtime. +func (ctxt *Link) defineInternal(p string, t sym.SymKind) loader.Sym { + s := ctxt.loader.CreateSymForUpdate(p, 0) + s.SetType(t) + s.SetSpecial(true) + s.SetLocal(true) + return s.Sym() +} + +func (ctxt *Link) xdefine(p string, t sym.SymKind, v int64) loader.Sym { + s := ctxt.defineInternal(p, t) + ctxt.loader.SetSymValue(s, v) + return s +} + +func datoff(ldr *loader.Loader, s loader.Sym, addr int64) int64 { + if uint64(addr) >= Segdata.Vaddr { + return int64(uint64(addr) - Segdata.Vaddr + Segdata.Fileoff) + } + if uint64(addr) >= Segtext.Vaddr { + return int64(uint64(addr) - Segtext.Vaddr + Segtext.Fileoff) + } + ldr.Errorf(s, "invalid datoff %#x", addr) + return 0 +} + +func Entryvalue(ctxt *Link) int64 { + a := *flagEntrySymbol + if a[0] >= '0' && a[0] <= '9' { + return atolwhex(a) + } + ldr := ctxt.loader + s := ldr.Lookup(a, 0) + if s == 0 { + Errorf(nil, "missing entry symbol %q", a) + return 0 + } + st := ldr.SymType(s) + if st == 0 { + return *FlagTextAddr + } + if !ctxt.IsAIX() && st != sym.STEXT { + ldr.Errorf(s, "entry not text") + } + return ldr.SymValue(s) +} + +func (ctxt *Link) callgraph() { + if !*FlagC { + return + } + + ldr := ctxt.loader + for _, s := range ctxt.Textp { + relocs := ldr.Relocs(s) + for i := 0; i < relocs.Count(); i++ { + r := relocs.At(i) + rs := r.Sym() + if rs == 0 { + continue + } + if r.Type().IsDirectCall() && ldr.SymType(rs) == sym.STEXT { + ctxt.Logf("%s calls %s\n", ldr.SymName(s), ldr.SymName(rs)) + } + } + } +} + +func Rnd(v int64, r int64) int64 { + if r <= 0 { + return v + } + v += r - 1 + c := v % r + if c < 0 { + c += r + } + v -= c + return v +} + +func bgetc(r *bio.Reader) int { + c, err := r.ReadByte() + if err != nil { + if err != io.EOF { + log.Fatalf("reading input: %v", err) + } + return -1 + } + return int(c) +} + +type markKind uint8 // for postorder traversal +const ( + _ markKind = iota + visiting + visited +) + +func postorder(libs []*sym.Library) []*sym.Library { + order := make([]*sym.Library, 0, len(libs)) // hold the result + mark := make(map[*sym.Library]markKind, len(libs)) + for _, lib := range libs { + dfs(lib, mark, &order) + } + return order +} + +func dfs(lib *sym.Library, mark map[*sym.Library]markKind, order *[]*sym.Library) { + if mark[lib] == visited { + return + } + if mark[lib] == visiting { + panic("found import cycle while visiting " + lib.Pkg) + } + mark[lib] = visiting + for _, i := range lib.Imports { + dfs(i, mark, order) + } + mark[lib] = visited + *order = append(*order, lib) +} + +func ElfSymForReloc(ctxt *Link, s loader.Sym) int32 { + // If putelfsym created a local version of this symbol, use that in all + // relocations. + les := ctxt.loader.SymLocalElfSym(s) + if les != 0 { + return les + } else { + return ctxt.loader.SymElfSym(s) + } +} + +func AddGotSym(target *Target, ldr *loader.Loader, syms *ArchSyms, s loader.Sym, elfRelocTyp uint32) { + if ldr.SymGot(s) >= 0 { + return + } + + Adddynsym(ldr, target, syms, s) + got := ldr.MakeSymbolUpdater(syms.GOT) + ldr.SetGot(s, int32(got.Size())) + got.AddUint(target.Arch, 0) + + if target.IsElf() { + if target.Arch.PtrSize == 8 { + rela := ldr.MakeSymbolUpdater(syms.Rela) + rela.AddAddrPlus(target.Arch, got.Sym(), int64(ldr.SymGot(s))) + rela.AddUint64(target.Arch, elf.R_INFO(uint32(ldr.SymDynid(s)), elfRelocTyp)) + rela.AddUint64(target.Arch, 0) + } else { + rel := ldr.MakeSymbolUpdater(syms.Rel) + rel.AddAddrPlus(target.Arch, got.Sym(), int64(ldr.SymGot(s))) + rel.AddUint32(target.Arch, elf.R_INFO32(uint32(ldr.SymDynid(s)), elfRelocTyp)) + } + } else if target.IsDarwin() { + leg := ldr.MakeSymbolUpdater(syms.LinkEditGOT) + leg.AddUint32(target.Arch, uint32(ldr.SymDynid(s))) + if target.IsPIE() && target.IsInternal() { + // Mach-O relocations are a royal pain to lay out. + // They use a compact stateful bytecode representation. + // Here we record what are needed and encode them later. + MachoAddBind(int64(ldr.SymGot(s)), s) + } + } else { + ldr.Errorf(s, "addgotsym: unsupported binary format") + } +} + +var hostobjcounter int + +// captureHostObj writes out the content of a host object (pulled from +// an archive or loaded from a *.o file directly) to a directory +// specified via the linker's "-capturehostobjs" debugging flag. This +// is intended to make it easier for a developer to inspect the actual +// object feeding into "CGO internal" link step. +func captureHostObj(h *Hostobj) { + // Form paths for info file and obj file. + ofile := fmt.Sprintf("captured-obj-%d.o", hostobjcounter) + ifile := fmt.Sprintf("captured-obj-%d.txt", hostobjcounter) + hostobjcounter++ + opath := filepath.Join(*flagCaptureHostObjs, ofile) + ipath := filepath.Join(*flagCaptureHostObjs, ifile) + + // Write the info file. + info := fmt.Sprintf("pkg: %s\npn: %s\nfile: %s\noff: %d\nlen: %d\n", + h.pkg, h.pn, h.file, h.off, h.length) + if err := os.WriteFile(ipath, []byte(info), 0666); err != nil { + log.Fatalf("error writing captured host obj info %s: %v", ipath, err) + } + + readObjData := func() []byte { + inf, err := os.Open(h.file) + if err != nil { + log.Fatalf("capturing host obj: open failed on %s: %v", h.pn, err) + } + res := make([]byte, h.length) + if n, err := inf.ReadAt(res, h.off); err != nil || n != int(h.length) { + log.Fatalf("capturing host obj: readat failed on %s: %v", h.pn, err) + } + return res + } + + // Write the object file. + if err := os.WriteFile(opath, readObjData(), 0666); err != nil { + log.Fatalf("error writing captured host object %s: %v", opath, err) + } + + fmt.Fprintf(os.Stderr, "link: info: captured host object %s to %s\n", + h.file, opath) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/link.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/link.go new file mode 100644 index 0000000000000000000000000000000000000000..34221dfa8a764d30642307335d4f1df591562c07 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/link.go @@ -0,0 +1,161 @@ +// Derived from Inferno utils/6l/l.h and related files. +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "bufio" + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "fmt" +) + +type Shlib struct { + Path string + Hash []byte + Deps []string + File *elf.File +} + +// Link holds the context for writing object code from a compiler +// or for reading that input into the linker. +type Link struct { + Target + ErrorReporter + ArchSyms + + outSem chan int // limits the number of output writers + Out *OutBuf + + version int // current version number for static/file-local symbols + + Debugvlog int + Bso *bufio.Writer + + Loaded bool // set after all inputs have been loaded as symbols + + compressDWARF bool + + Libdir []string + Library []*sym.Library + LibraryByPkg map[string]*sym.Library + Shlibs []Shlib + Textp []loader.Sym + Moduledata loader.Sym + + PackageFile map[string]string + PackageShlib map[string]string + + tramps []loader.Sym // trampolines + + compUnits []*sym.CompilationUnit // DWARF compilation units + runtimeCU *sym.CompilationUnit // One of the runtime CUs, the last one seen. + + loader *loader.Loader + cgodata []cgodata // cgo directives to load, three strings are args for loadcgo + + datap []loader.Sym + dynexp []loader.Sym + + // Elf symtab variables. + numelfsym int // starts at 0, 1 is reserved + + // These are symbols that created and written by the linker. + // Rather than creating a symbol, and writing all its data into the heap, + // you can create a symbol, and just a generation function will be called + // after the symbol's been created in the output mmap. + generatorSyms map[loader.Sym]generatorFunc +} + +type cgodata struct { + file string + pkg string + directives [][]string +} + +func (ctxt *Link) Logf(format string, args ...interface{}) { + fmt.Fprintf(ctxt.Bso, format, args...) + ctxt.Bso.Flush() +} + +func addImports(ctxt *Link, l *sym.Library, pn string) { + pkg := objabi.PathToPrefix(l.Pkg) + for _, imp := range l.Autolib { + lib := addlib(ctxt, pkg, pn, imp.Pkg, imp.Fingerprint) + if lib != nil { + l.Imports = append(l.Imports, lib) + } + } + l.Autolib = nil +} + +// Allocate a new version (i.e. symbol namespace). +func (ctxt *Link) IncVersion() int { + ctxt.version++ + return ctxt.version - 1 +} + +// returns the maximum version number +func (ctxt *Link) MaxVersion() int { + return ctxt.version +} + +// generatorFunc is a convenience type. +// Some linker-created Symbols are large and shouldn't really live in the heap. +// Such Symbols can define a generator function. Their bytes can be generated +// directly in the output mmap. +// +// Relocations are applied prior to emitting generator Symbol contents. +// Generator Symbols that require relocations can be written in two passes. +// The first pass, at Symbol creation time, adds only relocations. +// The second pass, at content generation time, adds the rest. +// See generateFunctab for an example. +// +// Generator functions shouldn't grow the Symbol size. +// Generator functions must be safe for concurrent use. +// +// Generator Symbols have their Data set to the mmapped area when the +// generator is called. +type generatorFunc func(*Link, loader.Sym) + +// createGeneratorSymbol is a convenience method for creating a generator +// symbol. +func (ctxt *Link) createGeneratorSymbol(name string, version int, t sym.SymKind, size int64, gen generatorFunc) loader.Sym { + ldr := ctxt.loader + s := ldr.LookupOrCreateSym(name, version) + ldr.SetIsGeneratedSym(s, true) + sb := ldr.MakeSymbolUpdater(s) + sb.SetType(t) + sb.SetSize(size) + ctxt.generatorSyms[s] = gen + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho.go new file mode 100644 index 0000000000000000000000000000000000000000..a36043c777a2c320aa6873cc78917775bc1a6945 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho.go @@ -0,0 +1,1588 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "cmd/internal/codesign" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/macho" + "encoding/binary" + "fmt" + "internal/buildcfg" + "io" + "os" + "sort" + "strings" + "unsafe" +) + +type MachoHdr struct { + cpu uint32 + subcpu uint32 +} + +type MachoSect struct { + name string + segname string + addr uint64 + size uint64 + off uint32 + align uint32 + reloc uint32 + nreloc uint32 + flag uint32 + res1 uint32 + res2 uint32 +} + +type MachoSeg struct { + name string + vsize uint64 + vaddr uint64 + fileoffset uint64 + filesize uint64 + prot1 uint32 + prot2 uint32 + nsect uint32 + msect uint32 + sect []MachoSect + flag uint32 +} + +// MachoPlatformLoad represents a LC_VERSION_MIN_* or +// LC_BUILD_VERSION load command. +type MachoPlatformLoad struct { + platform MachoPlatform // One of PLATFORM_* constants. + cmd MachoLoad +} + +type MachoLoad struct { + type_ uint32 + data []uint32 +} + +type MachoPlatform int + +/* + * Total amount of space to reserve at the start of the file + * for Header, PHeaders, and SHeaders. + * May waste some. + */ +const ( + INITIAL_MACHO_HEADR = 4 * 1024 +) + +const ( + MACHO_CPU_AMD64 = 1<<24 | 7 + MACHO_CPU_386 = 7 + MACHO_SUBCPU_X86 = 3 + MACHO_CPU_ARM = 12 + MACHO_SUBCPU_ARM = 0 + MACHO_SUBCPU_ARMV7 = 9 + MACHO_CPU_ARM64 = 1<<24 | 12 + MACHO_SUBCPU_ARM64_ALL = 0 + MACHO_SUBCPU_ARM64_V8 = 1 + MACHO_SUBCPU_ARM64E = 2 + MACHO32SYMSIZE = 12 + MACHO64SYMSIZE = 16 + MACHO_X86_64_RELOC_UNSIGNED = 0 + MACHO_X86_64_RELOC_SIGNED = 1 + MACHO_X86_64_RELOC_BRANCH = 2 + MACHO_X86_64_RELOC_GOT_LOAD = 3 + MACHO_X86_64_RELOC_GOT = 4 + MACHO_X86_64_RELOC_SUBTRACTOR = 5 + MACHO_X86_64_RELOC_SIGNED_1 = 6 + MACHO_X86_64_RELOC_SIGNED_2 = 7 + MACHO_X86_64_RELOC_SIGNED_4 = 8 + MACHO_ARM_RELOC_VANILLA = 0 + MACHO_ARM_RELOC_PAIR = 1 + MACHO_ARM_RELOC_SECTDIFF = 2 + MACHO_ARM_RELOC_BR24 = 5 + MACHO_ARM64_RELOC_UNSIGNED = 0 + MACHO_ARM64_RELOC_BRANCH26 = 2 + MACHO_ARM64_RELOC_PAGE21 = 3 + MACHO_ARM64_RELOC_PAGEOFF12 = 4 + MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 = 5 + MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6 + MACHO_ARM64_RELOC_ADDEND = 10 + MACHO_GENERIC_RELOC_VANILLA = 0 + MACHO_FAKE_GOTPCREL = 100 +) + +const ( + MH_MAGIC = 0xfeedface + MH_MAGIC_64 = 0xfeedfacf + + MH_OBJECT = 0x1 + MH_EXECUTE = 0x2 + + MH_NOUNDEFS = 0x1 + MH_DYLDLINK = 0x4 + MH_PIE = 0x200000 +) + +const ( + LC_SEGMENT = 0x1 + LC_SYMTAB = 0x2 + LC_SYMSEG = 0x3 + LC_THREAD = 0x4 + LC_UNIXTHREAD = 0x5 + LC_LOADFVMLIB = 0x6 + LC_IDFVMLIB = 0x7 + LC_IDENT = 0x8 + LC_FVMFILE = 0x9 + LC_PREPAGE = 0xa + LC_DYSYMTAB = 0xb + LC_LOAD_DYLIB = 0xc + LC_ID_DYLIB = 0xd + LC_LOAD_DYLINKER = 0xe + LC_ID_DYLINKER = 0xf + LC_PREBOUND_DYLIB = 0x10 + LC_ROUTINES = 0x11 + LC_SUB_FRAMEWORK = 0x12 + LC_SUB_UMBRELLA = 0x13 + LC_SUB_CLIENT = 0x14 + LC_SUB_LIBRARY = 0x15 + LC_TWOLEVEL_HINTS = 0x16 + LC_PREBIND_CKSUM = 0x17 + LC_LOAD_WEAK_DYLIB = 0x80000018 + LC_SEGMENT_64 = 0x19 + LC_ROUTINES_64 = 0x1a + LC_UUID = 0x1b + LC_RPATH = 0x8000001c + LC_CODE_SIGNATURE = 0x1d + LC_SEGMENT_SPLIT_INFO = 0x1e + LC_REEXPORT_DYLIB = 0x8000001f + LC_LAZY_LOAD_DYLIB = 0x20 + LC_ENCRYPTION_INFO = 0x21 + LC_DYLD_INFO = 0x22 + LC_DYLD_INFO_ONLY = 0x80000022 + LC_LOAD_UPWARD_DYLIB = 0x80000023 + LC_VERSION_MIN_MACOSX = 0x24 + LC_VERSION_MIN_IPHONEOS = 0x25 + LC_FUNCTION_STARTS = 0x26 + LC_DYLD_ENVIRONMENT = 0x27 + LC_MAIN = 0x80000028 + LC_DATA_IN_CODE = 0x29 + LC_SOURCE_VERSION = 0x2A + LC_DYLIB_CODE_SIGN_DRS = 0x2B + LC_ENCRYPTION_INFO_64 = 0x2C + LC_LINKER_OPTION = 0x2D + LC_LINKER_OPTIMIZATION_HINT = 0x2E + LC_VERSION_MIN_TVOS = 0x2F + LC_VERSION_MIN_WATCHOS = 0x30 + LC_VERSION_NOTE = 0x31 + LC_BUILD_VERSION = 0x32 + LC_DYLD_EXPORTS_TRIE = 0x80000033 + LC_DYLD_CHAINED_FIXUPS = 0x80000034 +) + +const ( + S_REGULAR = 0x0 + S_ZEROFILL = 0x1 + S_NON_LAZY_SYMBOL_POINTERS = 0x6 + S_SYMBOL_STUBS = 0x8 + S_MOD_INIT_FUNC_POINTERS = 0x9 + S_ATTR_PURE_INSTRUCTIONS = 0x80000000 + S_ATTR_DEBUG = 0x02000000 + S_ATTR_SOME_INSTRUCTIONS = 0x00000400 +) + +const ( + PLATFORM_MACOS MachoPlatform = 1 + PLATFORM_IOS MachoPlatform = 2 + PLATFORM_TVOS MachoPlatform = 3 + PLATFORM_WATCHOS MachoPlatform = 4 + PLATFORM_BRIDGEOS MachoPlatform = 5 +) + +// rebase table opcode +const ( + REBASE_TYPE_POINTER = 1 + REBASE_TYPE_TEXT_ABSOLUTE32 = 2 + REBASE_TYPE_TEXT_PCREL32 = 3 + + REBASE_OPCODE_MASK = 0xF0 + REBASE_IMMEDIATE_MASK = 0x0F + REBASE_OPCODE_DONE = 0x00 + REBASE_OPCODE_SET_TYPE_IMM = 0x10 + REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20 + REBASE_OPCODE_ADD_ADDR_ULEB = 0x30 + REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40 + REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50 + REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60 + REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70 + REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80 +) + +// bind table opcode +const ( + BIND_TYPE_POINTER = 1 + BIND_TYPE_TEXT_ABSOLUTE32 = 2 + BIND_TYPE_TEXT_PCREL32 = 3 + + BIND_SPECIAL_DYLIB_SELF = 0 + BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1 + BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2 + BIND_SPECIAL_DYLIB_WEAK_LOOKUP = -3 + + BIND_OPCODE_MASK = 0xF0 + BIND_IMMEDIATE_MASK = 0x0F + BIND_OPCODE_DONE = 0x00 + BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10 + BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20 + BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30 + BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40 + BIND_OPCODE_SET_TYPE_IMM = 0x50 + BIND_OPCODE_SET_ADDEND_SLEB = 0x60 + BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70 + BIND_OPCODE_ADD_ADDR_ULEB = 0x80 + BIND_OPCODE_DO_BIND = 0x90 + BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0 + BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0 + BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0 + BIND_OPCODE_THREADED = 0xD0 + BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB = 0x00 + BIND_SUBOPCODE_THREADED_APPLY = 0x01 +) + +const machoHeaderSize64 = 8 * 4 // size of 64-bit Mach-O header + +// Mach-O file writing +// https://developer.apple.com/mac/library/DOCUMENTATION/DeveloperTools/Conceptual/MachORuntime/Reference/reference.html + +var machohdr MachoHdr + +var load []MachoLoad + +var machoPlatform MachoPlatform + +var seg [16]MachoSeg + +var nseg int + +var ndebug int + +var nsect int + +const ( + SymKindLocal = 0 + iota + SymKindExtdef + SymKindUndef + NumSymKind +) + +var nkind [NumSymKind]int + +var sortsym []loader.Sym + +var nsortsym int + +// Amount of space left for adding load commands +// that refer to dynamic libraries. Because these have +// to go in the Mach-O header, we can't just pick a +// "big enough" header size. The initial header is +// one page, the non-dynamic library stuff takes +// up about 1300 bytes; we overestimate that as 2k. +var loadBudget = INITIAL_MACHO_HEADR - 2*1024 + +func getMachoHdr() *MachoHdr { + return &machohdr +} + +func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { + if arch.PtrSize == 8 && (ndata&1 != 0) { + ndata++ + } + + load = append(load, MachoLoad{}) + l := &load[len(load)-1] + l.type_ = type_ + l.data = make([]uint32, ndata) + return l +} + +func newMachoSeg(name string, msect int) *MachoSeg { + if nseg >= len(seg) { + Exitf("too many segs") + } + + s := &seg[nseg] + nseg++ + s.name = name + s.msect = uint32(msect) + s.sect = make([]MachoSect, msect) + return s +} + +func newMachoSect(seg *MachoSeg, name string, segname string) *MachoSect { + if seg.nsect >= seg.msect { + Exitf("too many sects in segment %s", seg.name) + } + + s := &seg.sect[seg.nsect] + seg.nsect++ + s.name = name + s.segname = segname + nsect++ + return s +} + +// Generic linking code. + +var dylib []string + +var linkoff int64 + +func machowrite(ctxt *Link, arch *sys.Arch, out *OutBuf, linkmode LinkMode) int { + o1 := out.Offset() + + loadsize := 4 * 4 * ndebug + for i := range load { + loadsize += 4 * (len(load[i].data) + 2) + } + if arch.PtrSize == 8 { + loadsize += 18 * 4 * nseg + loadsize += 20 * 4 * nsect + } else { + loadsize += 14 * 4 * nseg + loadsize += 17 * 4 * nsect + } + + if arch.PtrSize == 8 { + out.Write32(MH_MAGIC_64) + } else { + out.Write32(MH_MAGIC) + } + out.Write32(machohdr.cpu) + out.Write32(machohdr.subcpu) + if linkmode == LinkExternal { + out.Write32(MH_OBJECT) /* file type - mach object */ + } else { + out.Write32(MH_EXECUTE) /* file type - mach executable */ + } + out.Write32(uint32(len(load)) + uint32(nseg) + uint32(ndebug)) + out.Write32(uint32(loadsize)) + flags := uint32(0) + if nkind[SymKindUndef] == 0 { + flags |= MH_NOUNDEFS + } + if ctxt.IsPIE() && linkmode == LinkInternal { + flags |= MH_PIE | MH_DYLDLINK + } + out.Write32(flags) /* flags */ + if arch.PtrSize == 8 { + out.Write32(0) /* reserved */ + } + + for i := 0; i < nseg; i++ { + s := &seg[i] + if arch.PtrSize == 8 { + out.Write32(LC_SEGMENT_64) + out.Write32(72 + 80*s.nsect) + out.WriteStringN(s.name, 16) + out.Write64(s.vaddr) + out.Write64(s.vsize) + out.Write64(s.fileoffset) + out.Write64(s.filesize) + out.Write32(s.prot1) + out.Write32(s.prot2) + out.Write32(s.nsect) + out.Write32(s.flag) + } else { + out.Write32(LC_SEGMENT) + out.Write32(56 + 68*s.nsect) + out.WriteStringN(s.name, 16) + out.Write32(uint32(s.vaddr)) + out.Write32(uint32(s.vsize)) + out.Write32(uint32(s.fileoffset)) + out.Write32(uint32(s.filesize)) + out.Write32(s.prot1) + out.Write32(s.prot2) + out.Write32(s.nsect) + out.Write32(s.flag) + } + + for j := uint32(0); j < s.nsect; j++ { + t := &s.sect[j] + if arch.PtrSize == 8 { + out.WriteStringN(t.name, 16) + out.WriteStringN(t.segname, 16) + out.Write64(t.addr) + out.Write64(t.size) + out.Write32(t.off) + out.Write32(t.align) + out.Write32(t.reloc) + out.Write32(t.nreloc) + out.Write32(t.flag) + out.Write32(t.res1) /* reserved */ + out.Write32(t.res2) /* reserved */ + out.Write32(0) /* reserved */ + } else { + out.WriteStringN(t.name, 16) + out.WriteStringN(t.segname, 16) + out.Write32(uint32(t.addr)) + out.Write32(uint32(t.size)) + out.Write32(t.off) + out.Write32(t.align) + out.Write32(t.reloc) + out.Write32(t.nreloc) + out.Write32(t.flag) + out.Write32(t.res1) /* reserved */ + out.Write32(t.res2) /* reserved */ + } + } + } + + for i := range load { + l := &load[i] + out.Write32(l.type_) + out.Write32(4 * (uint32(len(l.data)) + 2)) + for j := 0; j < len(l.data); j++ { + out.Write32(l.data[j]) + } + } + + return int(out.Offset() - o1) +} + +func (ctxt *Link) domacho() { + if *FlagD { + return + } + + // Copy platform load command. + for _, h := range hostobj { + load, err := hostobjMachoPlatform(&h) + if err != nil { + Exitf("%v", err) + } + if load != nil { + machoPlatform = load.platform + ml := newMachoLoad(ctxt.Arch, load.cmd.type_, uint32(len(load.cmd.data))) + copy(ml.data, load.cmd.data) + break + } + } + if machoPlatform == 0 { + machoPlatform = PLATFORM_MACOS + if buildcfg.GOOS == "ios" { + machoPlatform = PLATFORM_IOS + } + if ctxt.LinkMode == LinkInternal && machoPlatform == PLATFORM_MACOS { + var version uint32 + switch ctxt.Arch.Family { + case sys.AMD64: + // This must be fairly recent for Apple signing (go.dev/issue/30488). + // Having too old a version here was also implicated in some problems + // calling into macOS libraries (go.dev/issue/56784). + // In general this can be the most recent supported macOS version. + version = 10<<16 | 13<<8 | 0<<0 // 10.13.0 + case sys.ARM64: + version = 11<<16 | 0<<8 | 0<<0 // 11.0.0 + } + ml := newMachoLoad(ctxt.Arch, LC_BUILD_VERSION, 4) + ml.data[0] = uint32(machoPlatform) + ml.data[1] = version // OS version + ml.data[2] = version // SDK version + ml.data[3] = 0 // ntools + } + } + + // empirically, string table must begin with " \x00". + s := ctxt.loader.LookupOrCreateSym(".machosymstr", 0) + sb := ctxt.loader.MakeSymbolUpdater(s) + + sb.SetType(sym.SMACHOSYMSTR) + sb.SetReachable(true) + sb.AddUint8(' ') + sb.AddUint8('\x00') + + s = ctxt.loader.LookupOrCreateSym(".machosymtab", 0) + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHOSYMTAB) + sb.SetReachable(true) + + if ctxt.IsInternal() { + s = ctxt.loader.LookupOrCreateSym(".plt", 0) // will be __symbol_stub + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHOPLT) + sb.SetReachable(true) + + s = ctxt.loader.LookupOrCreateSym(".got", 0) // will be __nl_symbol_ptr + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHOGOT) + sb.SetReachable(true) + sb.SetAlign(4) + + s = ctxt.loader.LookupOrCreateSym(".linkedit.plt", 0) // indirect table for .plt + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHOINDIRECTPLT) + sb.SetReachable(true) + + s = ctxt.loader.LookupOrCreateSym(".linkedit.got", 0) // indirect table for .got + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHOINDIRECTGOT) + sb.SetReachable(true) + } + + // Add a dummy symbol that will become the __asm marker section. + if ctxt.IsExternal() { + s = ctxt.loader.LookupOrCreateSym(".llvmasm", 0) + sb = ctxt.loader.MakeSymbolUpdater(s) + sb.SetType(sym.SMACHO) + sb.SetReachable(true) + sb.AddUint8(0) + } + + // Un-export runtime symbols from plugins. Since the runtime + // is included in both the main binary and each plugin, these + // symbols appear in both images. If we leave them exported in + // the plugin, then the dynamic linker will resolve + // relocations to these functions in the plugin's functab to + // point to the main image, causing the runtime to think the + // plugin's functab is corrupted. By unexporting them, these + // become static references, which are resolved to the + // plugin's text. + // + // It would be better to omit the runtime from plugins. (Using + // relative PCs in the functab instead of relocations would + // also address this.) + // + // See issue #18190. + if ctxt.BuildMode == BuildModePlugin { + for _, name := range []string{"_cgo_topofstack", "__cgo_topofstack", "_cgo_panic", "crosscall2"} { + // Most of these are data symbols or C + // symbols, so they have symbol version 0. + ver := 0 + // _cgo_panic is a Go function, so it uses ABIInternal. + if name == "_cgo_panic" { + ver = abiInternalVer + } + s := ctxt.loader.Lookup(name, ver) + if s != 0 { + ctxt.loader.SetAttrCgoExportDynamic(s, false) + } + } + } +} + +func machoadddynlib(lib string, linkmode LinkMode) { + if seenlib[lib] || linkmode == LinkExternal { + return + } + seenlib[lib] = true + + // Will need to store the library name rounded up + // and 24 bytes of header metadata. If not enough + // space, grab another page of initial space at the + // beginning of the output file. + loadBudget -= (len(lib)+7)/8*8 + 24 + + if loadBudget < 0 { + HEADR += 4096 + *FlagTextAddr += 4096 + loadBudget += 4096 + } + + dylib = append(dylib, lib) +} + +func machoshbits(ctxt *Link, mseg *MachoSeg, sect *sym.Section, segname string) { + buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1) + + msect := newMachoSect(mseg, buf, segname) + + if sect.Rellen > 0 { + msect.reloc = uint32(sect.Reloff) + msect.nreloc = uint32(sect.Rellen / 8) + } + + for 1< sect.Seg.Vaddr+sect.Seg.Filelen-sect.Vaddr { + Errorf(nil, "macho cannot represent section %s crossing data and bss", sect.Name) + } + msect.off = uint32(sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr) + } else { + msect.off = 0 + msect.flag |= S_ZEROFILL + } + + if sect.Rwx&1 != 0 { + msect.flag |= S_ATTR_SOME_INSTRUCTIONS + } + + if sect.Name == ".text" { + msect.flag |= S_ATTR_PURE_INSTRUCTIONS + } + + if sect.Name == ".plt" { + msect.name = "__symbol_stub1" + msect.flag = S_ATTR_PURE_INSTRUCTIONS | S_ATTR_SOME_INSTRUCTIONS | S_SYMBOL_STUBS + msect.res1 = 0 //nkind[SymKindLocal]; + msect.res2 = 6 + } + + if sect.Name == ".got" { + msect.name = "__nl_symbol_ptr" + msect.flag = S_NON_LAZY_SYMBOL_POINTERS + msect.res1 = uint32(ctxt.loader.SymSize(ctxt.ArchSyms.LinkEditPLT) / 4) /* offset into indirect symbol table */ + } + + if sect.Name == ".init_array" { + msect.name = "__mod_init_func" + msect.flag = S_MOD_INIT_FUNC_POINTERS + } + + // Some platforms such as watchOS and tvOS require binaries with + // bitcode enabled. The Go toolchain can't output bitcode, so use + // a marker section in the __LLVM segment, "__asm", to tell the Apple + // toolchain that the Go text came from assembler and thus has no + // bitcode. This is not true, but Kotlin/Native, Rust and Flutter + // are also using this trick. + if sect.Name == ".llvmasm" { + msect.name = "__asm" + msect.segname = "__LLVM" + } + + if segname == "__DWARF" { + msect.flag |= S_ATTR_DEBUG + } +} + +func asmbMacho(ctxt *Link) { + machlink := doMachoLink(ctxt) + if ctxt.IsExternal() { + symo := int64(Segdwarf.Fileoff + uint64(Rnd(int64(Segdwarf.Filelen), *FlagRound)) + uint64(machlink)) + ctxt.Out.SeekSet(symo) + machoEmitReloc(ctxt) + } + ctxt.Out.SeekSet(0) + + ldr := ctxt.loader + + /* apple MACH */ + va := *FlagTextAddr - int64(HEADR) + + mh := getMachoHdr() + switch ctxt.Arch.Family { + default: + Exitf("unknown macho architecture: %v", ctxt.Arch.Family) + + case sys.AMD64: + mh.cpu = MACHO_CPU_AMD64 + mh.subcpu = MACHO_SUBCPU_X86 + + case sys.ARM64: + mh.cpu = MACHO_CPU_ARM64 + mh.subcpu = MACHO_SUBCPU_ARM64_ALL + } + + var ms *MachoSeg + if ctxt.LinkMode == LinkExternal { + /* segment for entire file */ + ms = newMachoSeg("", 40) + + ms.fileoffset = Segtext.Fileoff + ms.filesize = Segdwarf.Fileoff + Segdwarf.Filelen - Segtext.Fileoff + ms.vsize = Segdwarf.Vaddr + Segdwarf.Length - Segtext.Vaddr + } + + /* segment for zero page */ + if ctxt.LinkMode != LinkExternal { + ms = newMachoSeg("__PAGEZERO", 0) + ms.vsize = uint64(va) + } + + /* text */ + v := Rnd(int64(uint64(HEADR)+Segtext.Length), *FlagRound) + + var mstext *MachoSeg + if ctxt.LinkMode != LinkExternal { + ms = newMachoSeg("__TEXT", 20) + ms.vaddr = uint64(va) + ms.vsize = uint64(v) + ms.fileoffset = 0 + ms.filesize = uint64(v) + ms.prot1 = 7 + ms.prot2 = 5 + mstext = ms + } + + for _, sect := range Segtext.Sections { + machoshbits(ctxt, ms, sect, "__TEXT") + } + + /* rodata */ + if ctxt.LinkMode != LinkExternal && Segrelrodata.Length > 0 { + ms = newMachoSeg("__DATA_CONST", 20) + ms.vaddr = Segrelrodata.Vaddr + ms.vsize = Segrelrodata.Length + ms.fileoffset = Segrelrodata.Fileoff + ms.filesize = Segrelrodata.Filelen + ms.prot1 = 3 + ms.prot2 = 3 + ms.flag = 0x10 // SG_READ_ONLY + } + + for _, sect := range Segrelrodata.Sections { + machoshbits(ctxt, ms, sect, "__DATA_CONST") + } + + /* data */ + if ctxt.LinkMode != LinkExternal { + ms = newMachoSeg("__DATA", 20) + ms.vaddr = Segdata.Vaddr + ms.vsize = Segdata.Length + ms.fileoffset = Segdata.Fileoff + ms.filesize = Segdata.Filelen + ms.prot1 = 3 + ms.prot2 = 3 + } + + for _, sect := range Segdata.Sections { + machoshbits(ctxt, ms, sect, "__DATA") + } + + /* dwarf */ + if !*FlagW { + if ctxt.LinkMode != LinkExternal { + ms = newMachoSeg("__DWARF", 20) + ms.vaddr = Segdwarf.Vaddr + ms.vsize = 0 + ms.fileoffset = Segdwarf.Fileoff + ms.filesize = Segdwarf.Filelen + } + for _, sect := range Segdwarf.Sections { + machoshbits(ctxt, ms, sect, "__DWARF") + } + } + + if ctxt.LinkMode != LinkExternal { + switch ctxt.Arch.Family { + default: + Exitf("unknown macho architecture: %v", ctxt.Arch.Family) + + case sys.AMD64: + ml := newMachoLoad(ctxt.Arch, LC_UNIXTHREAD, 42+2) + ml.data[0] = 4 /* thread type */ + ml.data[1] = 42 /* word count */ + ml.data[2+32] = uint32(Entryvalue(ctxt)) /* start pc */ + ml.data[2+32+1] = uint32(Entryvalue(ctxt) >> 32) + + case sys.ARM64: + ml := newMachoLoad(ctxt.Arch, LC_MAIN, 4) + ml.data[0] = uint32(uint64(Entryvalue(ctxt)) - (Segtext.Vaddr - uint64(HEADR))) + ml.data[1] = uint32((uint64(Entryvalue(ctxt)) - (Segtext.Vaddr - uint64(HEADR))) >> 32) + } + } + + var codesigOff int64 + if !*FlagD { + // must match doMachoLink below + s1 := ldr.SymSize(ldr.Lookup(".machorebase", 0)) + s2 := ldr.SymSize(ldr.Lookup(".machobind", 0)) + s3 := ldr.SymSize(ldr.Lookup(".machosymtab", 0)) + s4 := ldr.SymSize(ctxt.ArchSyms.LinkEditPLT) + s5 := ldr.SymSize(ctxt.ArchSyms.LinkEditGOT) + s6 := ldr.SymSize(ldr.Lookup(".machosymstr", 0)) + s7 := ldr.SymSize(ldr.Lookup(".machocodesig", 0)) + + if ctxt.LinkMode != LinkExternal { + ms := newMachoSeg("__LINKEDIT", 0) + ms.vaddr = uint64(Rnd(int64(Segdata.Vaddr+Segdata.Length), *FlagRound)) + ms.vsize = uint64(s1 + s2 + s3 + s4 + s5 + s6 + s7) + ms.fileoffset = uint64(linkoff) + ms.filesize = ms.vsize + ms.prot1 = 1 + ms.prot2 = 1 + + codesigOff = linkoff + s1 + s2 + s3 + s4 + s5 + s6 + } + + if ctxt.LinkMode != LinkExternal && ctxt.IsPIE() { + ml := newMachoLoad(ctxt.Arch, LC_DYLD_INFO_ONLY, 10) + ml.data[0] = uint32(linkoff) // rebase off + ml.data[1] = uint32(s1) // rebase size + ml.data[2] = uint32(linkoff + s1) // bind off + ml.data[3] = uint32(s2) // bind size + ml.data[4] = 0 // weak bind off + ml.data[5] = 0 // weak bind size + ml.data[6] = 0 // lazy bind off + ml.data[7] = 0 // lazy bind size + ml.data[8] = 0 // export + ml.data[9] = 0 // export size + } + + ml := newMachoLoad(ctxt.Arch, LC_SYMTAB, 4) + ml.data[0] = uint32(linkoff + s1 + s2) /* symoff */ + ml.data[1] = uint32(nsortsym) /* nsyms */ + ml.data[2] = uint32(linkoff + s1 + s2 + s3 + s4 + s5) /* stroff */ + ml.data[3] = uint32(s6) /* strsize */ + + if ctxt.LinkMode != LinkExternal { + machodysymtab(ctxt, linkoff+s1+s2) + + ml := newMachoLoad(ctxt.Arch, LC_LOAD_DYLINKER, 6) + ml.data[0] = 12 /* offset to string */ + stringtouint32(ml.data[1:], "/usr/lib/dyld") + + for _, lib := range dylib { + ml = newMachoLoad(ctxt.Arch, LC_LOAD_DYLIB, 4+(uint32(len(lib))+1+7)/8*2) + ml.data[0] = 24 /* offset of string from beginning of load */ + ml.data[1] = 0 /* time stamp */ + ml.data[2] = 0 /* version */ + ml.data[3] = 0 /* compatibility version */ + stringtouint32(ml.data[4:], lib) + } + } + + if ctxt.IsInternal() && ctxt.NeedCodeSign() { + ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) + ml.data[0] = uint32(codesigOff) + ml.data[1] = uint32(s7) + } + } + + a := machowrite(ctxt, ctxt.Arch, ctxt.Out, ctxt.LinkMode) + if int32(a) > HEADR { + Exitf("HEADR too small: %d > %d", a, HEADR) + } + + // Now we have written everything. Compute the code signature (which + // is a hash of the file content, so it must be done at last.) + if ctxt.IsInternal() && ctxt.NeedCodeSign() { + cs := ldr.Lookup(".machocodesig", 0) + data := ctxt.Out.Data() + if int64(len(data)) != codesigOff { + panic("wrong size") + } + codesign.Sign(ldr.Data(cs), bytes.NewReader(data), "a.out", codesigOff, int64(mstext.fileoffset), int64(mstext.filesize), ctxt.IsExe() || ctxt.IsPIE()) + ctxt.Out.SeekSet(codesigOff) + ctxt.Out.Write(ldr.Data(cs)) + } +} + +func symkind(ldr *loader.Loader, s loader.Sym) int { + if t := ldr.SymType(s); t == sym.SDYNIMPORT || t == sym.SHOSTOBJ || t == sym.SUNDEFEXT { + return SymKindUndef + } + if ldr.AttrCgoExport(s) { + return SymKindExtdef + } + return SymKindLocal +} + +func collectmachosyms(ctxt *Link) { + ldr := ctxt.loader + + addsym := func(s loader.Sym) { + sortsym = append(sortsym, s) + nkind[symkind(ldr, s)]++ + } + + // On Mach-O, even with -s, we still need to keep dynamically exported and + // referenced symbols. We can strip defined local text and data symbols. + // So *FlagS is applied based on symbol type. + + // Add special runtime.text and runtime.etext symbols (which are local). + // We've already included this symbol in Textp on darwin if ctxt.DynlinkingGo(). + // See data.go:/textaddress + // NOTE: runtime.text.N symbols (if we split text sections) are not added, though, + // so we handle them here. + if !*FlagS { + if !ctxt.DynlinkingGo() { + s := ldr.Lookup("runtime.text", 0) + if ldr.SymType(s) == sym.STEXT { + addsym(s) + } + } + for n := range Segtext.Sections[1:] { + s := ldr.Lookup(fmt.Sprintf("runtime.text.%d", n+1), 0) + if s != 0 { + addsym(s) + } else { + break + } + } + if !ctxt.DynlinkingGo() { + s := ldr.Lookup("runtime.etext", 0) + if ldr.SymType(s) == sym.STEXT { + addsym(s) + } + } + } + + // Add text symbols. + for _, s := range ctxt.Textp { + if *FlagS && !ldr.AttrCgoExportDynamic(s) { + continue + } + addsym(s) + } + + shouldBeInSymbolTable := func(s loader.Sym) bool { + if ldr.AttrNotInSymbolTable(s) { + return false + } + name := ldr.SymName(s) // TODO: try not to read the name + if name == "" || name[0] == '.' { + return false + } + return true + } + + // Add data symbols and external references. + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + t := ldr.SymType(s) + if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t == sym.STLSBSS { + // TLSBSS is not used on darwin. See data.go:allocateDataSections + continue + } + if !shouldBeInSymbolTable(s) { + continue + } + if *FlagS && !ldr.AttrCgoExportDynamic(s) { + continue + } + addsym(s) + continue + } + + switch t { + case sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT: + // Keep dynamic symbol references even if *FlagS. + addsym(s) + } + + // Some 64-bit functions have a "$INODE64" or "$INODE64$UNIX2003" suffix. + if t == sym.SDYNIMPORT && ldr.SymDynimplib(s) == "/usr/lib/libSystem.B.dylib" { + // But only on macOS. + if machoPlatform == PLATFORM_MACOS { + switch n := ldr.SymExtname(s); n { + case "fdopendir": + switch buildcfg.GOARCH { + case "amd64": + ldr.SetSymExtname(s, n+"$INODE64") + } + case "readdir_r", "getfsstat": + switch buildcfg.GOARCH { + case "amd64": + ldr.SetSymExtname(s, n+"$INODE64") + } + } + } + } + } + + nsortsym = len(sortsym) +} + +func machosymorder(ctxt *Link) { + ldr := ctxt.loader + + // On Mac OS X Mountain Lion, we must sort exported symbols + // So we sort them here and pre-allocate dynid for them + // See https://golang.org/issue/4029 + for _, s := range ctxt.dynexp { + if !ldr.AttrReachable(s) { + panic("dynexp symbol is not reachable") + } + } + collectmachosyms(ctxt) + sort.Slice(sortsym[:nsortsym], func(i, j int) bool { + s1 := sortsym[i] + s2 := sortsym[j] + k1 := symkind(ldr, s1) + k2 := symkind(ldr, s2) + if k1 != k2 { + return k1 < k2 + } + return ldr.SymExtname(s1) < ldr.SymExtname(s2) // Note: unnamed symbols are not added in collectmachosyms + }) + for i, s := range sortsym { + ldr.SetSymDynid(s, int32(i)) + } +} + +// AddMachoSym adds s to Mach-O symbol table, used in GenSymLate. +// Currently only used on ARM64 when external linking. +func AddMachoSym(ldr *loader.Loader, s loader.Sym) { + ldr.SetSymDynid(s, int32(nsortsym)) + sortsym = append(sortsym, s) + nsortsym++ + nkind[symkind(ldr, s)]++ +} + +// machoShouldExport reports whether a symbol needs to be exported. +// +// When dynamically linking, all non-local variables and plugin-exported +// symbols need to be exported. +func machoShouldExport(ctxt *Link, ldr *loader.Loader, s loader.Sym) bool { + if !ctxt.DynlinkingGo() || ldr.AttrLocal(s) { + return false + } + if ctxt.BuildMode == BuildModePlugin && strings.HasPrefix(ldr.SymExtname(s), objabi.PathToPrefix(*flagPluginPath)) { + return true + } + name := ldr.SymName(s) + if strings.HasPrefix(name, "go:itab.") { + return true + } + if strings.HasPrefix(name, "type:") && !strings.HasPrefix(name, "type:.") { + // reduce runtime typemap pressure, but do not + // export alg functions (type:.*), as these + // appear in pclntable. + return true + } + if strings.HasPrefix(name, "go:link.pkghash") { + return true + } + return ldr.SymType(s) >= sym.SFirstWritable // only writable sections +} + +func machosymtab(ctxt *Link) { + ldr := ctxt.loader + symtab := ldr.CreateSymForUpdate(".machosymtab", 0) + symstr := ldr.CreateSymForUpdate(".machosymstr", 0) + + for _, s := range sortsym[:nsortsym] { + symtab.AddUint32(ctxt.Arch, uint32(symstr.Size())) + + export := machoShouldExport(ctxt, ldr, s) + + // Prefix symbol names with "_" to match the system toolchain. + // (We used to only prefix C symbols, which is all required for the build. + // But some tools don't recognize Go symbols as symbols, so we prefix them + // as well.) + symstr.AddUint8('_') + + // replace "·" as ".", because DTrace cannot handle it. + name := strings.Replace(ldr.SymExtname(s), "·", ".", -1) + + name = mangleABIName(ctxt, ldr, s, name) + symstr.Addstring(name) + + if t := ldr.SymType(s); t == sym.SDYNIMPORT || t == sym.SHOSTOBJ || t == sym.SUNDEFEXT { + symtab.AddUint8(0x01) // type N_EXT, external symbol + symtab.AddUint8(0) // no section + symtab.AddUint16(ctxt.Arch, 0) // desc + symtab.AddUintXX(ctxt.Arch, 0, ctxt.Arch.PtrSize) // no value + } else { + if export || ldr.AttrCgoExportDynamic(s) { + symtab.AddUint8(0x0f) // N_SECT | N_EXT + } else if ldr.AttrCgoExportStatic(s) { + // Only export statically, not dynamically. (N_PEXT is like hidden visibility) + symtab.AddUint8(0x1f) // N_SECT | N_EXT | N_PEXT + } else { + symtab.AddUint8(0x0e) // N_SECT + } + o := s + if outer := ldr.OuterSym(o); outer != 0 { + o = outer + } + if ldr.SymSect(o) == nil { + ldr.Errorf(s, "missing section for symbol") + symtab.AddUint8(0) + } else { + symtab.AddUint8(uint8(ldr.SymSect(o).Extnum)) + } + symtab.AddUint16(ctxt.Arch, 0) // desc + symtab.AddUintXX(ctxt.Arch, uint64(ldr.SymAddr(s)), ctxt.Arch.PtrSize) + } + } +} + +func machodysymtab(ctxt *Link, base int64) { + ml := newMachoLoad(ctxt.Arch, LC_DYSYMTAB, 18) + + n := 0 + ml.data[0] = uint32(n) /* ilocalsym */ + ml.data[1] = uint32(nkind[SymKindLocal]) /* nlocalsym */ + n += nkind[SymKindLocal] + + ml.data[2] = uint32(n) /* iextdefsym */ + ml.data[3] = uint32(nkind[SymKindExtdef]) /* nextdefsym */ + n += nkind[SymKindExtdef] + + ml.data[4] = uint32(n) /* iundefsym */ + ml.data[5] = uint32(nkind[SymKindUndef]) /* nundefsym */ + + ml.data[6] = 0 /* tocoffset */ + ml.data[7] = 0 /* ntoc */ + ml.data[8] = 0 /* modtaboff */ + ml.data[9] = 0 /* nmodtab */ + ml.data[10] = 0 /* extrefsymoff */ + ml.data[11] = 0 /* nextrefsyms */ + + ldr := ctxt.loader + + // must match domacholink below + s1 := ldr.SymSize(ldr.Lookup(".machosymtab", 0)) + s2 := ldr.SymSize(ctxt.ArchSyms.LinkEditPLT) + s3 := ldr.SymSize(ctxt.ArchSyms.LinkEditGOT) + ml.data[12] = uint32(base + s1) /* indirectsymoff */ + ml.data[13] = uint32((s2 + s3) / 4) /* nindirectsyms */ + + ml.data[14] = 0 /* extreloff */ + ml.data[15] = 0 /* nextrel */ + ml.data[16] = 0 /* locreloff */ + ml.data[17] = 0 /* nlocrel */ +} + +func doMachoLink(ctxt *Link) int64 { + machosymtab(ctxt) + machoDyldInfo(ctxt) + + ldr := ctxt.loader + + // write data that will be linkedit section + s1 := ldr.Lookup(".machorebase", 0) + s2 := ldr.Lookup(".machobind", 0) + s3 := ldr.Lookup(".machosymtab", 0) + s4 := ctxt.ArchSyms.LinkEditPLT + s5 := ctxt.ArchSyms.LinkEditGOT + s6 := ldr.Lookup(".machosymstr", 0) + + size := ldr.SymSize(s1) + ldr.SymSize(s2) + ldr.SymSize(s3) + ldr.SymSize(s4) + ldr.SymSize(s5) + ldr.SymSize(s6) + + // Force the linkedit section to end on a 16-byte + // boundary. This allows pure (non-cgo) Go binaries + // to be code signed correctly. + // + // Apple's codesign_allocate (a helper utility for + // the codesign utility) can do this fine itself if + // it is run on a dynamic Mach-O binary. However, + // when it is run on a pure (non-cgo) Go binary, where + // the linkedit section is mostly empty, it fails to + // account for the extra padding that it itself adds + // when adding the LC_CODE_SIGNATURE load command + // (which must be aligned on a 16-byte boundary). + // + // By forcing the linkedit section to end on a 16-byte + // boundary, codesign_allocate will not need to apply + // any alignment padding itself, working around the + // issue. + if size%16 != 0 { + n := 16 - size%16 + s6b := ldr.MakeSymbolUpdater(s6) + s6b.Grow(s6b.Size() + n) + s6b.SetSize(s6b.Size() + n) + size += n + } + + if size > 0 { + linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), *FlagRound) + Rnd(int64(Segrelrodata.Filelen), *FlagRound) + Rnd(int64(Segdata.Filelen), *FlagRound) + Rnd(int64(Segdwarf.Filelen), *FlagRound) + ctxt.Out.SeekSet(linkoff) + + ctxt.Out.Write(ldr.Data(s1)) + ctxt.Out.Write(ldr.Data(s2)) + ctxt.Out.Write(ldr.Data(s3)) + ctxt.Out.Write(ldr.Data(s4)) + ctxt.Out.Write(ldr.Data(s5)) + ctxt.Out.Write(ldr.Data(s6)) + + // Add code signature if necessary. This must be the last. + s7 := machoCodeSigSym(ctxt, linkoff+size) + size += ldr.SymSize(s7) + } + + return Rnd(size, *FlagRound) +} + +func machorelocsect(ctxt *Link, out *OutBuf, sect *sym.Section, syms []loader.Sym) { + // If main section has no bits, nothing to relocate. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return + } + ldr := ctxt.loader + + for i, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if uint64(ldr.SymValue(s)) >= sect.Vaddr { + syms = syms[i:] + break + } + } + + eaddr := sect.Vaddr + sect.Length + for _, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if ldr.SymValue(s) >= int64(eaddr) { + break + } + + // Compute external relocations on the go, and pass to Machoreloc1 + // to stream out. + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + rr, ok := extreloc(ctxt, ldr, s, r) + if !ok { + continue + } + if rr.Xsym == 0 { + ldr.Errorf(s, "missing xsym in relocation") + continue + } + if !ldr.AttrReachable(rr.Xsym) { + ldr.Errorf(s, "unreachable reloc %d (%s) target %v", r.Type(), sym.RelocName(ctxt.Arch, r.Type()), ldr.SymName(rr.Xsym)) + } + if !thearch.Machoreloc1(ctxt.Arch, out, ldr, s, rr, int64(uint64(ldr.SymValue(s)+int64(r.Off()))-sect.Vaddr)) { + ldr.Errorf(s, "unsupported obj reloc %d (%s)/%d to %s", r.Type(), sym.RelocName(ctxt.Arch, r.Type()), r.Siz(), ldr.SymName(r.Sym())) + } + } + } + + // sanity check + if uint64(out.Offset()) != sect.Reloff+sect.Rellen { + panic("machorelocsect: size mismatch") + } +} + +func machoEmitReloc(ctxt *Link) { + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) + } + + sizeExtRelocs(ctxt, thearch.MachorelocSize) + relocSect, wg := relocSectFn(ctxt, machorelocsect) + + relocSect(ctxt, Segtext.Sections[0], ctxt.Textp) + for _, sect := range Segtext.Sections[1:] { + if sect.Name == ".text" { + relocSect(ctxt, sect, ctxt.Textp) + } else { + relocSect(ctxt, sect, ctxt.datap) + } + } + for _, sect := range Segrelrodata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } + for _, sect := range Segdata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } + for i := 0; i < len(Segdwarf.Sections); i++ { + sect := Segdwarf.Sections[i] + si := dwarfp[i] + if si.secSym() != loader.Sym(sect.Sym) || + ctxt.loader.SymSect(si.secSym()) != sect { + panic("inconsistency between dwarfp and Segdwarf") + } + relocSect(ctxt, sect, si.syms) + } + wg.Wait() +} + +// hostobjMachoPlatform returns the first platform load command found +// in the host object, if any. +func hostobjMachoPlatform(h *Hostobj) (*MachoPlatformLoad, error) { + f, err := os.Open(h.file) + if err != nil { + return nil, fmt.Errorf("%s: failed to open host object: %v\n", h.file, err) + } + defer f.Close() + sr := io.NewSectionReader(f, h.off, h.length) + m, err := macho.NewFile(sr) + if err != nil { + // Not a valid Mach-O file. + return nil, nil + } + return peekMachoPlatform(m) +} + +// peekMachoPlatform returns the first LC_VERSION_MIN_* or LC_BUILD_VERSION +// load command found in the Mach-O file, if any. +func peekMachoPlatform(m *macho.File) (*MachoPlatformLoad, error) { + for _, cmd := range m.Loads { + raw := cmd.Raw() + ml := MachoLoad{ + type_: m.ByteOrder.Uint32(raw), + } + // Skip the type and command length. + data := raw[8:] + var p MachoPlatform + switch ml.type_ { + case LC_VERSION_MIN_IPHONEOS: + p = PLATFORM_IOS + case LC_VERSION_MIN_MACOSX: + p = PLATFORM_MACOS + case LC_VERSION_MIN_WATCHOS: + p = PLATFORM_WATCHOS + case LC_VERSION_MIN_TVOS: + p = PLATFORM_TVOS + case LC_BUILD_VERSION: + p = MachoPlatform(m.ByteOrder.Uint32(data)) + default: + continue + } + ml.data = make([]uint32, len(data)/4) + r := bytes.NewReader(data) + if err := binary.Read(r, m.ByteOrder, &ml.data); err != nil { + return nil, err + } + return &MachoPlatformLoad{ + platform: p, + cmd: ml, + }, nil + } + return nil, nil +} + +// A rebase entry tells the dynamic linker the data at sym+off needs to be +// relocated when the in-memory image moves. (This is somewhat like, say, +// ELF R_X86_64_RELATIVE). +// For now, the only kind of entry we support is that the data is an absolute +// address. That seems all we need. +// In the binary it uses a compact stateful bytecode encoding. So we record +// entries as we go and build the table at the end. +type machoRebaseRecord struct { + sym loader.Sym + off int64 +} + +var machorebase []machoRebaseRecord + +func MachoAddRebase(s loader.Sym, off int64) { + machorebase = append(machorebase, machoRebaseRecord{s, off}) +} + +// A bind entry tells the dynamic linker the data at GOT+off should be bound +// to the address of the target symbol, which is a dynamic import. +// For now, the only kind of entry we support is that the data is an absolute +// address, and the source symbol is always the GOT. That seems all we need. +// In the binary it uses a compact stateful bytecode encoding. So we record +// entries as we go and build the table at the end. +type machoBindRecord struct { + off int64 + targ loader.Sym +} + +var machobind []machoBindRecord + +func MachoAddBind(off int64, targ loader.Sym) { + machobind = append(machobind, machoBindRecord{off, targ}) +} + +// Generate data for the dynamic linker, used in LC_DYLD_INFO_ONLY load command. +// See mach-o/loader.h, struct dyld_info_command, for the encoding. +// e.g. https://opensource.apple.com/source/xnu/xnu-6153.81.5/EXTERNAL_HEADERS/mach-o/loader.h +func machoDyldInfo(ctxt *Link) { + ldr := ctxt.loader + rebase := ldr.CreateSymForUpdate(".machorebase", 0) + bind := ldr.CreateSymForUpdate(".machobind", 0) + + if !(ctxt.IsPIE() && ctxt.IsInternal()) { + return + } + + segId := func(seg *sym.Segment) uint8 { + switch seg { + case &Segtext: + return 1 + case &Segrelrodata: + return 2 + case &Segdata: + if Segrelrodata.Length > 0 { + return 3 + } + return 2 + } + panic("unknown segment") + } + + dylibId := func(s loader.Sym) int { + slib := ldr.SymDynimplib(s) + for i, lib := range dylib { + if lib == slib { + return i + 1 + } + } + return BIND_SPECIAL_DYLIB_FLAT_LOOKUP // don't know where it is from + } + + // Rebase table. + // TODO: use more compact encoding. The encoding is stateful, and + // we can use delta encoding. + rebase.AddUint8(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER) + for _, r := range machorebase { + seg := ldr.SymSect(r.sym).Seg + off := uint64(ldr.SymValue(r.sym)+r.off) - seg.Vaddr + rebase.AddUint8(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | segId(seg)) + rebase.AddUleb(off) + + rebase.AddUint8(REBASE_OPCODE_DO_REBASE_IMM_TIMES | 1) + } + rebase.AddUint8(REBASE_OPCODE_DONE) + sz := Rnd(rebase.Size(), 8) + rebase.Grow(sz) + rebase.SetSize(sz) + + // Bind table. + // TODO: compact encoding, as above. + // TODO: lazy binding? + got := ctxt.GOT + seg := ldr.SymSect(got).Seg + gotAddr := ldr.SymValue(got) + bind.AddUint8(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER) + for _, r := range machobind { + off := uint64(gotAddr+r.off) - seg.Vaddr + bind.AddUint8(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | segId(seg)) + bind.AddUleb(off) + + d := dylibId(r.targ) + if d > 0 && d < 128 { + bind.AddUint8(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | uint8(d)&0xf) + } else if d >= 128 { + bind.AddUint8(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB) + bind.AddUleb(uint64(d)) + } else { // d <= 0 + bind.AddUint8(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | uint8(d)&0xf) + } + + bind.AddUint8(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM) + // target symbol name as a C string, with _ prefix + bind.AddUint8('_') + bind.Addstring(ldr.SymExtname(r.targ)) + + bind.AddUint8(BIND_OPCODE_DO_BIND) + } + bind.AddUint8(BIND_OPCODE_DONE) + sz = Rnd(bind.Size(), 16) // make it 16-byte aligned, see the comment in doMachoLink + bind.Grow(sz) + bind.SetSize(sz) + + // TODO: export table. + // The symbols names are encoded as a trie. I'm really too lazy to do that + // for now. + // Without it, the symbols are not dynamically exported, so they cannot be + // e.g. dlsym'd. But internal linking is not the default in that case, so + // it is fine. +} + +// machoCodeSigSym creates and returns a symbol for code signature. +// The symbol context is left as zeros, which will be generated at the end +// (as it depends on the rest of the file). +func machoCodeSigSym(ctxt *Link, codeSize int64) loader.Sym { + ldr := ctxt.loader + cs := ldr.CreateSymForUpdate(".machocodesig", 0) + if !ctxt.NeedCodeSign() || ctxt.IsExternal() { + return cs.Sym() + } + sz := codesign.Size(codeSize, "a.out") + cs.Grow(sz) + cs.SetSize(sz) + return cs.Sym() +} + +// machoCodeSign code-signs Mach-O file fname with an ad-hoc signature. +// This is used for updating an external linker generated binary. +func machoCodeSign(ctxt *Link, fname string) error { + f, err := os.OpenFile(fname, os.O_RDWR, 0) + if err != nil { + return err + } + defer f.Close() + + mf, err := macho.NewFile(f) + if err != nil { + return err + } + if mf.Magic != macho.Magic64 { + Exitf("not 64-bit Mach-O file: %s", fname) + } + + // Find existing LC_CODE_SIGNATURE and __LINKEDIT segment + var sigOff, sigSz, csCmdOff, linkeditOff int64 + var linkeditSeg, textSeg *macho.Segment + loadOff := int64(machoHeaderSize64) + get32 := mf.ByteOrder.Uint32 + for _, l := range mf.Loads { + data := l.Raw() + cmd, sz := get32(data), get32(data[4:]) + if cmd == LC_CODE_SIGNATURE { + sigOff = int64(get32(data[8:])) + sigSz = int64(get32(data[12:])) + csCmdOff = loadOff + } + if seg, ok := l.(*macho.Segment); ok { + switch seg.Name { + case "__LINKEDIT": + linkeditSeg = seg + linkeditOff = loadOff + case "__TEXT": + textSeg = seg + } + } + loadOff += int64(sz) + } + + if sigOff == 0 { + // The C linker doesn't generate a signed binary, for some reason. + // Skip. + return nil + } + + fi, err := f.Stat() + if err != nil { + return err + } + if sigOff+sigSz != fi.Size() { + // We don't expect anything after the signature (this will invalidate + // the signature anyway.) + return fmt.Errorf("unexpected content after code signature") + } + + sz := codesign.Size(sigOff, "a.out") + if sz != sigSz { + // Update the load command, + var tmp [8]byte + mf.ByteOrder.PutUint32(tmp[:4], uint32(sz)) + _, err = f.WriteAt(tmp[:4], csCmdOff+12) + if err != nil { + return err + } + + // Uodate the __LINKEDIT segment. + segSz := sigOff + sz - int64(linkeditSeg.Offset) + mf.ByteOrder.PutUint64(tmp[:8], uint64(segSz)) + _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Memsz))) + if err != nil { + return err + } + _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Filesz))) + if err != nil { + return err + } + } + + cs := make([]byte, sz) + codesign.Sign(cs, f, "a.out", sigOff, int64(textSeg.Offset), int64(textSeg.Filesz), ctxt.IsExe() || ctxt.IsPIE()) + _, err = f.WriteAt(cs, sigOff) + if err != nil { + return err + } + err = f.Truncate(sigOff + sz) + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho_combine_dwarf.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho_combine_dwarf.go new file mode 100644 index 0000000000000000000000000000000000000000..2e8bfcdbed5e6b11a1c1217332b11de8bdc38190 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -0,0 +1,438 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "compress/zlib" + "debug/macho" + "encoding/binary" + "fmt" + "io" + "os" + "reflect" + "unsafe" +) + +type loadCmd struct { + Cmd macho.LoadCmd + Len uint32 +} + +type dyldInfoCmd struct { + Cmd macho.LoadCmd + Len uint32 + RebaseOff, RebaseLen uint32 + BindOff, BindLen uint32 + WeakBindOff, WeakBindLen uint32 + LazyBindOff, LazyBindLen uint32 + ExportOff, ExportLen uint32 +} + +type linkEditDataCmd struct { + Cmd macho.LoadCmd + Len uint32 + DataOff, DataLen uint32 +} + +type encryptionInfoCmd struct { + Cmd macho.LoadCmd + Len uint32 + CryptOff, CryptLen uint32 + CryptId uint32 +} + +type loadCmdReader struct { + offset, next int64 + f *os.File + order binary.ByteOrder +} + +func (r *loadCmdReader) Next() (loadCmd, error) { + var cmd loadCmd + + r.offset = r.next + if _, err := r.f.Seek(r.offset, 0); err != nil { + return cmd, err + } + if err := binary.Read(r.f, r.order, &cmd); err != nil { + return cmd, err + } + r.next = r.offset + int64(cmd.Len) + return cmd, nil +} + +func (r loadCmdReader) ReadAt(offset int64, data interface{}) error { + if _, err := r.f.Seek(r.offset+offset, 0); err != nil { + return err + } + return binary.Read(r.f, r.order, data) +} + +func (r loadCmdReader) WriteAt(offset int64, data interface{}) error { + if _, err := r.f.Seek(r.offset+offset, 0); err != nil { + return err + } + return binary.Write(r.f, r.order, data) +} + +// machoCombineDwarf merges dwarf info generated by dsymutil into a macho executable. +// +// With internal linking, DWARF is embedded into the executable, this lets us do the +// same for external linking. +// exef is the file of the executable with no DWARF. It must have enough room in the macho +// header to add the DWARF sections. (Use ld's -headerpad option) +// exem is the macho representation of exef. +// dsym is the path to the macho file containing DWARF from dsymutil. +// outexe is the path where the combined executable should be saved. +func machoCombineDwarf(ctxt *Link, exef *os.File, exem *macho.File, dsym, outexe string) error { + dwarff, err := os.Open(dsym) + if err != nil { + return err + } + defer dwarff.Close() + outf, err := os.OpenFile(outexe, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err != nil { + return err + } + defer outf.Close() + dwarfm, err := macho.NewFile(dwarff) + if err != nil { + return err + } + defer dwarfm.Close() + + // The string table needs to be the last thing in the file + // for code signing to work. So we'll need to move the + // linkedit section, but all the others can be copied directly. + linkseg := exem.Segment("__LINKEDIT") + if linkseg == nil { + return fmt.Errorf("missing __LINKEDIT segment") + } + + if _, err := exef.Seek(0, 0); err != nil { + return err + } + if _, err := io.CopyN(outf, exef, int64(linkseg.Offset)); err != nil { + return err + } + + realdwarf := dwarfm.Segment("__DWARF") + if realdwarf == nil { + return fmt.Errorf("missing __DWARF segment") + } + + // Try to compress the DWARF sections. This includes some Apple + // proprietary sections like __apple_types. + compressedSects, compressedBytes, err := machoCompressSections(ctxt, dwarfm) + if err != nil { + return err + } + + // Now copy the dwarf data into the output. + // Kernel requires all loaded segments to be page-aligned in the file, + // even though we mark this one as being 0 bytes of virtual address space. + dwarfstart := Rnd(int64(linkseg.Offset), *FlagRound) + if _, err := outf.Seek(dwarfstart, 0); err != nil { + return err + } + + if _, err := dwarff.Seek(int64(realdwarf.Offset), 0); err != nil { + return err + } + + // Write out the compressed sections, or the originals if we gave up + // on compressing them. + var dwarfsize uint64 + if compressedBytes != nil { + dwarfsize = uint64(len(compressedBytes)) + if _, err := outf.Write(compressedBytes); err != nil { + return err + } + } else { + if _, err := io.CopyN(outf, dwarff, int64(realdwarf.Filesz)); err != nil { + return err + } + dwarfsize = realdwarf.Filesz + } + + // And finally the linkedit section. + if _, err := exef.Seek(int64(linkseg.Offset), 0); err != nil { + return err + } + linkstart := Rnd(dwarfstart+int64(dwarfsize), *FlagRound) + if _, err := outf.Seek(linkstart, 0); err != nil { + return err + } + if _, err := io.Copy(outf, exef); err != nil { + return err + } + + // Now we need to update the headers. + textsect := exem.Section("__text") + if textsect == nil { + return fmt.Errorf("missing __text section") + } + + cmdOffset := unsafe.Sizeof(exem.FileHeader) + if is64bit := exem.Magic == macho.Magic64; is64bit { + // mach_header_64 has one extra uint32. + cmdOffset += unsafe.Sizeof(exem.Magic) + } + dwarfCmdOffset := uint32(cmdOffset) + exem.FileHeader.Cmdsz + availablePadding := textsect.Offset - dwarfCmdOffset + if availablePadding < realdwarf.Len { + return fmt.Errorf("no room to add dwarf info. Need at least %d padding bytes, found %d", realdwarf.Len, availablePadding) + } + // First, copy the dwarf load command into the header. It will be + // updated later with new offsets and lengths as necessary. + if _, err := outf.Seek(int64(dwarfCmdOffset), 0); err != nil { + return err + } + if _, err := io.CopyN(outf, bytes.NewReader(realdwarf.Raw()), int64(realdwarf.Len)); err != nil { + return err + } + if _, err := outf.Seek(int64(unsafe.Offsetof(exem.FileHeader.Ncmd)), 0); err != nil { + return err + } + if err := binary.Write(outf, exem.ByteOrder, exem.Ncmd+1); err != nil { + return err + } + if err := binary.Write(outf, exem.ByteOrder, exem.Cmdsz+realdwarf.Len); err != nil { + return err + } + + reader := loadCmdReader{next: int64(cmdOffset), f: outf, order: exem.ByteOrder} + for i := uint32(0); i < exem.Ncmd; i++ { + cmd, err := reader.Next() + if err != nil { + return err + } + linkoffset := uint64(linkstart) - linkseg.Offset + switch cmd.Cmd { + case macho.LoadCmdSegment64: + err = machoUpdateSegment(reader, linkseg, linkoffset) + case macho.LoadCmdSegment: + panic("unexpected 32-bit segment") + case LC_DYLD_INFO, LC_DYLD_INFO_ONLY: + err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &dyldInfoCmd{}, "RebaseOff", "BindOff", "WeakBindOff", "LazyBindOff", "ExportOff") + case macho.LoadCmdSymtab: + err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &macho.SymtabCmd{}, "Symoff", "Stroff") + case macho.LoadCmdDysymtab: + err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &macho.DysymtabCmd{}, "Tocoffset", "Modtaboff", "Extrefsymoff", "Indirectsymoff", "Extreloff", "Locreloff") + case LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS, + LC_DYLD_EXPORTS_TRIE, LC_DYLD_CHAINED_FIXUPS: + err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &linkEditDataCmd{}, "DataOff") + case LC_ENCRYPTION_INFO, LC_ENCRYPTION_INFO_64: + err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &encryptionInfoCmd{}, "CryptOff") + case macho.LoadCmdDylib, macho.LoadCmdThread, macho.LoadCmdUnixThread, + LC_PREBOUND_DYLIB, LC_UUID, LC_VERSION_MIN_MACOSX, LC_VERSION_MIN_IPHONEOS, LC_SOURCE_VERSION, + LC_MAIN, LC_LOAD_DYLINKER, LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB, LC_RPATH, LC_ID_DYLIB, + LC_SYMSEG, LC_LOADFVMLIB, LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_ID_DYLINKER, + LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT, LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, + LC_PREBIND_CKSUM, LC_ROUTINES_64, LC_LAZY_LOAD_DYLIB, LC_LOAD_UPWARD_DYLIB, LC_DYLD_ENVIRONMENT, + LC_LINKER_OPTION, LC_LINKER_OPTIMIZATION_HINT, LC_VERSION_MIN_TVOS, LC_VERSION_MIN_WATCHOS, + LC_VERSION_NOTE, LC_BUILD_VERSION: + // Nothing to update + default: + err = fmt.Errorf("unknown load command 0x%x (%s)", int(cmd.Cmd), cmd.Cmd) + } + if err != nil { + return err + } + } + // Do the final update of the DWARF segment's load command. + return machoUpdateDwarfHeader(&reader, compressedSects, dwarfsize, dwarfstart, realdwarf) +} + +// machoCompressSections tries to compress the DWARF segments in dwarfm, +// returning the updated sections and segment contents, nils if the sections +// weren't compressed, or an error if there was a problem reading dwarfm. +func machoCompressSections(ctxt *Link, dwarfm *macho.File) ([]*macho.Section, []byte, error) { + if !ctxt.compressDWARF { + return nil, nil, nil + } + + dwarfseg := dwarfm.Segment("__DWARF") + var sects []*macho.Section + var buf bytes.Buffer + + for _, sect := range dwarfm.Sections { + if sect.Seg != "__DWARF" { + continue + } + + // As of writing, there are no relocations in dsymutil's output + // so there's no point in worrying about them. Bail out if that + // changes. + if sect.Nreloc != 0 { + return nil, nil, nil + } + + data, err := sect.Data() + if err != nil { + return nil, nil, err + } + + compressed, contents, err := machoCompressSection(data) + if err != nil { + return nil, nil, err + } + + newSec := *sect + newSec.Offset = uint32(dwarfseg.Offset) + uint32(buf.Len()) + newSec.Addr = dwarfseg.Addr + uint64(buf.Len()) + if compressed { + newSec.Name = "__z" + sect.Name[2:] + newSec.Size = uint64(len(contents)) + } + sects = append(sects, &newSec) + buf.Write(contents) + } + return sects, buf.Bytes(), nil +} + +// machoCompressSection compresses secBytes if it results in less data. +func machoCompressSection(sectBytes []byte) (compressed bool, contents []byte, err error) { + var buf bytes.Buffer + buf.WriteString("ZLIB") + var sizeBytes [8]byte + binary.BigEndian.PutUint64(sizeBytes[:], uint64(len(sectBytes))) + buf.Write(sizeBytes[:]) + + z := zlib.NewWriter(&buf) + if _, err := z.Write(sectBytes); err != nil { + return false, nil, err + } + if err := z.Close(); err != nil { + return false, nil, err + } + if buf.Len() >= len(sectBytes) { + return false, sectBytes, nil + } + return true, buf.Bytes(), nil +} + +// machoUpdateSegment updates the load command for a moved segment. +// Only the linkedit segment should move, and it should have 0 sections. +func machoUpdateSegment(r loadCmdReader, linkseg *macho.Segment, linkoffset uint64) error { + var seg macho.Segment64 + if err := r.ReadAt(0, &seg); err != nil { + return err + } + + // Only the linkedit segment moved, anything before that is fine. + if seg.Offset < linkseg.Offset { + return nil + } + seg.Offset += linkoffset + if err := r.WriteAt(0, &seg); err != nil { + return err + } + // There shouldn't be any sections, but just to make sure... + return machoUpdateSections(r, &seg, linkoffset, nil) +} + +func machoUpdateSections(r loadCmdReader, seg *macho.Segment64, deltaOffset uint64, compressedSects []*macho.Section) error { + nsect := seg.Nsect + if nsect == 0 { + return nil + } + sectOffset := int64(unsafe.Sizeof(*seg)) + + var sect macho.Section64 + sectSize := int64(unsafe.Sizeof(sect)) + for i := uint32(0); i < nsect; i++ { + if err := r.ReadAt(sectOffset, §); err != nil { + return err + } + if compressedSects != nil { + cSect := compressedSects[i] + copy(sect.Name[:], cSect.Name) + sect.Size = cSect.Size + if cSect.Offset != 0 { + sect.Offset = cSect.Offset + uint32(deltaOffset) + } + if cSect.Addr != 0 { + sect.Addr = cSect.Addr + } + } else { + if sect.Offset != 0 { + sect.Offset += uint32(deltaOffset) + } + if sect.Reloff != 0 { + sect.Reloff += uint32(deltaOffset) + } + } + if err := r.WriteAt(sectOffset, §); err != nil { + return err + } + sectOffset += sectSize + } + return nil +} + +// machoUpdateDwarfHeader updates the DWARF segment load command. +func machoUpdateDwarfHeader(r *loadCmdReader, compressedSects []*macho.Section, dwarfsize uint64, dwarfstart int64, realdwarf *macho.Segment) error { + cmd, err := r.Next() + if err != nil { + return err + } + if cmd.Cmd != macho.LoadCmdSegment64 { + panic("not a Segment64") + } + var seg macho.Segment64 + if err := r.ReadAt(0, &seg); err != nil { + return err + } + seg.Offset = uint64(dwarfstart) + + if compressedSects != nil { + var segSize uint64 + for _, newSect := range compressedSects { + segSize += newSect.Size + } + seg.Filesz = segSize + } else { + seg.Filesz = dwarfsize + } + + // We want the DWARF segment to be considered non-loadable, so + // force vmaddr and vmsize to zero. In addition, set the initial + // protection to zero so as to make the dynamic loader happy, + // since otherwise it may complain that the vm size and file + // size don't match for the segment. See issues 21647 and 32673 + // for more context. Also useful to refer to the Apple dynamic + // loader source, specifically ImageLoaderMachO::sniffLoadCommands + // in ImageLoaderMachO.cpp (various versions can be found online, see + // https://opensource.apple.com/source/dyld/dyld-519.2.2/src/ImageLoaderMachO.cpp.auto.html + // as one example). + seg.Addr = 0 + seg.Memsz = 0 + seg.Prot = 0 + + if err := r.WriteAt(0, &seg); err != nil { + return err + } + return machoUpdateSections(*r, &seg, uint64(dwarfstart)-realdwarf.Offset, compressedSects) +} + +func machoUpdateLoadCommand(r loadCmdReader, linkseg *macho.Segment, linkoffset uint64, cmd interface{}, fields ...string) error { + if err := r.ReadAt(0, cmd); err != nil { + return err + } + value := reflect.Indirect(reflect.ValueOf(cmd)) + + for _, name := range fields { + field := value.FieldByName(name) + if fieldval := field.Uint(); fieldval >= linkseg.Offset { + field.SetUint(fieldval + linkoffset) + } + } + if err := r.WriteAt(0, cmd); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/main.go new file mode 100644 index 0000000000000000000000000000000000000000..feb4ba5c1725f1654a188642791230a4fce700fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/main.go @@ -0,0 +1,522 @@ +// Inferno utils/6l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "bufio" + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/quoted" + "cmd/internal/sys" + "cmd/link/internal/benchmark" + "flag" + "internal/buildcfg" + "log" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" +) + +var ( + pkglistfornote []byte + windowsgui bool // writes a "GUI binary" instead of a "console binary" + ownTmpDir bool // set to true if tmp dir created by linker (e.g. no -tmpdir) +) + +func init() { + flag.Var(&rpath, "r", "set the ELF dynamic linker search `path` to dir1:dir2:...") + flag.Var(&flagExtld, "extld", "use `linker` when linking in external mode") + flag.Var(&flagExtldflags, "extldflags", "pass `flags` to external linker") + flag.Var(&flagW, "w", "disable DWARF generation") +} + +// Flags used by the linker. The exported flags are used by the architecture-specific packages. +var ( + flagBuildid = flag.String("buildid", "", "record `id` as Go toolchain build id") + + flagOutfile = flag.String("o", "", "write output to `file`") + flagPluginPath = flag.String("pluginpath", "", "full path name for plugin") + + flagInstallSuffix = flag.String("installsuffix", "", "set package directory `suffix`") + flagDumpDep = flag.Bool("dumpdep", false, "dump symbol dependency graph") + flagRace = flag.Bool("race", false, "enable race detector") + flagMsan = flag.Bool("msan", false, "enable MSan interface") + flagAsan = flag.Bool("asan", false, "enable ASan interface") + flagAslr = flag.Bool("aslr", true, "enable ASLR for buildmode=c-shared on windows") + + flagFieldTrack = flag.String("k", "", "set field tracking `symbol`") + flagLibGCC = flag.String("libgcc", "", "compiler support lib for internal linking; use \"none\" to disable") + flagTmpdir = flag.String("tmpdir", "", "use `directory` for temporary files") + + flagExtld quoted.Flag + flagExtldflags quoted.Flag + flagExtar = flag.String("extar", "", "archive program for buildmode=c-archive") + + flagCaptureHostObjs = flag.String("capturehostobjs", "", "capture host object files loaded during internal linking to specified dir") + + flagA = flag.Bool("a", false, "no-op (deprecated)") + FlagC = flag.Bool("c", false, "dump call graph") + FlagD = flag.Bool("d", false, "disable dynamic executable") + flagF = flag.Bool("f", false, "ignore version mismatch") + flagG = flag.Bool("g", false, "disable go package data checks") + flagH = flag.Bool("h", false, "halt on error") + flagN = flag.Bool("n", false, "no-op (deprecated)") + FlagS = flag.Bool("s", false, "disable symbol table") + flag8 bool // use 64-bit addresses in symbol table + flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") + FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") + FlagDebugTextSize = flag.Int("debugtextsize", 0, "debug text section max size") + flagDebugNosplit = flag.Bool("debugnosplit", false, "dump nosplit call graph") + FlagStrictDups = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).") + FlagRound = flag.Int64("R", -1, "set address rounding `quantum`") + FlagTextAddr = flag.Int64("T", -1, "set the start address of text symbols") + flagEntrySymbol = flag.String("E", "", "set `entry` symbol name") + flagPruneWeakMap = flag.Bool("pruneweakmap", true, "prune weak mapinit refs") + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") + memprofile = flag.String("memprofile", "", "write memory profile to `file`") + memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") + benchmarkFlag = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking") + benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof") + + flagW ternaryFlag + FlagW = new(bool) // the -w flag, computed in main from flagW +) + +// ternaryFlag is like a boolean flag, but has a default value that is +// neither true nor false, allowing it to be set from context (e.g. from another +// flag). +// *ternaryFlag implements flag.Value. +type ternaryFlag int + +const ( + ternaryFlagUnset ternaryFlag = iota + ternaryFlagFalse + ternaryFlagTrue +) + +func (t *ternaryFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + if v { + *t = ternaryFlagTrue + } else { + *t = ternaryFlagFalse + } + return nil +} + +func (t *ternaryFlag) String() string { + switch *t { + case ternaryFlagFalse: + return "false" + case ternaryFlagTrue: + return "true" + } + return "unset" +} + +func (t *ternaryFlag) IsBoolFlag() bool { return true } // parse like a boolean flag + +// Main is the main entry point for the linker code. +func Main(arch *sys.Arch, theArch Arch) { + log.SetPrefix("link: ") + log.SetFlags(0) + + thearch = theArch + ctxt := linknew(arch) + ctxt.Bso = bufio.NewWriter(os.Stdout) + + // For testing behavior of go command when tools crash silently. + // Undocumented, not in standard flag parser to avoid + // exposing in usage message. + for _, arg := range os.Args { + if arg == "-crash_for_testing" { + os.Exit(2) + } + } + + if final := gorootFinal(); final == "$GOROOT" { + // cmd/go sets GOROOT_FINAL to the dummy value "$GOROOT" when -trimpath is set, + // but runtime.GOROOT() should return the empty string, not a bogus value. + // (See https://go.dev/issue/51461.) + } else { + addstrdata1(ctxt, "runtime.defaultGOROOT="+final) + } + + buildVersion := buildcfg.Version + if goexperiment := buildcfg.Experiment.String(); goexperiment != "" { + buildVersion += " X:" + goexperiment + } + addstrdata1(ctxt, "runtime.buildVersion="+buildVersion) + + // TODO(matloob): define these above and then check flag values here + if ctxt.Arch.Family == sys.AMD64 && buildcfg.GOOS == "plan9" { + flag.BoolVar(&flag8, "8", false, "use 64-bit addresses in symbol table") + } + flagHeadType := flag.String("H", "", "set header `type`") + flag.BoolVar(&ctxt.linkShared, "linkshared", false, "link against installed Go shared libraries") + flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") + flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") + flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") + objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) + objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) + objabi.AddVersionFlag() // -V + objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) + objabi.Flagcount("v", "print link trace", &ctxt.Debugvlog) + objabi.Flagfn1("importcfg", "read import configuration from `file`", ctxt.readImportCfg) + + objabi.Flagparse(usage) + + if ctxt.Debugvlog > 0 { + // dump symbol info on crash + defer func() { ctxt.loader.Dump() }() + } + if ctxt.Debugvlog > 1 { + // dump symbol info on error + AtExit(func() { + if nerrors > 0 { + ctxt.loader.Dump() + } + }) + } + + switch *flagHeadType { + case "": + case "windowsgui": + ctxt.HeadType = objabi.Hwindows + windowsgui = true + default: + if err := ctxt.HeadType.Set(*flagHeadType); err != nil { + Errorf(nil, "%v", err) + usage() + } + } + if ctxt.HeadType == objabi.Hunknown { + ctxt.HeadType.Set(buildcfg.GOOS) + } + + if !*flagAslr && ctxt.BuildMode != BuildModeCShared { + Errorf(nil, "-aslr=false is only allowed for -buildmode=c-shared") + usage() + } + + if *FlagD && ctxt.UsesLibc() { + Exitf("dynamic linking required on %s; -d flag cannot be used", buildcfg.GOOS) + } + + isPowerOfTwo := func(n int64) bool { + return n > 0 && n&(n-1) == 0 + } + if *FlagRound != -1 && (*FlagRound < 4096 || !isPowerOfTwo(*FlagRound)) { + Exitf("invalid -R value 0x%x", *FlagRound) + } + + checkStrictDups = *FlagStrictDups + + switch flagW { + case ternaryFlagFalse: + *FlagW = false + case ternaryFlagTrue: + *FlagW = true + case ternaryFlagUnset: + *FlagW = *FlagS // -s implies -w if not explicitly set + if ctxt.IsDarwin() && ctxt.BuildMode == BuildModeCShared { + *FlagW = true // default to -w in c-shared mode on darwin, see #61229 + } + } + + if !buildcfg.Experiment.RegabiWrappers { + abiInternalVer = 0 + } + + startProfile() + if ctxt.BuildMode == BuildModeUnset { + ctxt.BuildMode.Set("exe") + } + + if ctxt.BuildMode != BuildModeShared && flag.NArg() != 1 { + usage() + } + + if *flagOutfile == "" { + *flagOutfile = "a.out" + if ctxt.HeadType == objabi.Hwindows { + *flagOutfile += ".exe" + } + } + + interpreter = *flagInterpreter + + if *flagBuildid == "" && ctxt.Target.IsOpenbsd() { + // TODO(jsing): Remove once direct syscalls are no longer in use. + // OpenBSD 6.7 onwards will not permit direct syscalls from a + // dynamically linked binary unless it identifies the binary + // contains a .note.go.buildid ELF note. See issue #36435. + *flagBuildid = "go-openbsd" + } + + // enable benchmarking + var bench *benchmark.Metrics + if len(*benchmarkFlag) != 0 { + if *benchmarkFlag == "mem" { + bench = benchmark.New(benchmark.GC, *benchmarkFileFlag) + } else if *benchmarkFlag == "cpu" { + bench = benchmark.New(benchmark.NoGC, *benchmarkFileFlag) + } else { + Errorf(nil, "unknown benchmark flag: %q", *benchmarkFlag) + usage() + } + } + + bench.Start("libinit") + libinit(ctxt) // creates outfile + bench.Start("computeTLSOffset") + ctxt.computeTLSOffset() + bench.Start("Archinit") + thearch.Archinit(ctxt) + + if ctxt.linkShared && !ctxt.IsELF { + Exitf("-linkshared can only be used on elf systems") + } + + if ctxt.Debugvlog != 0 { + onOff := func(b bool) string { + if b { + return "on" + } + return "off" + } + ctxt.Logf("build mode: %s, symbol table: %s, DWARF: %s\n", ctxt.BuildMode, onOff(!*FlagS), onOff(dwarfEnabled(ctxt))) + ctxt.Logf("HEADER = -H%d -T0x%x -R0x%x\n", ctxt.HeadType, uint64(*FlagTextAddr), uint32(*FlagRound)) + } + + zerofp := goobj.FingerprintType{} + switch ctxt.BuildMode { + case BuildModeShared: + for i := 0; i < flag.NArg(); i++ { + arg := flag.Arg(i) + parts := strings.SplitN(arg, "=", 2) + var pkgpath, file string + if len(parts) == 1 { + pkgpath, file = "main", arg + } else { + pkgpath, file = parts[0], parts[1] + } + pkglistfornote = append(pkglistfornote, pkgpath...) + pkglistfornote = append(pkglistfornote, '\n') + addlibpath(ctxt, "command line", "command line", file, pkgpath, "", zerofp) + } + case BuildModePlugin: + addlibpath(ctxt, "command line", "command line", flag.Arg(0), *flagPluginPath, "", zerofp) + default: + addlibpath(ctxt, "command line", "command line", flag.Arg(0), "main", "", zerofp) + } + bench.Start("loadlib") + ctxt.loadlib() + + bench.Start("inittasks") + ctxt.inittasks() + + bench.Start("deadcode") + deadcode(ctxt) + + bench.Start("linksetup") + ctxt.linksetup() + + bench.Start("dostrdata") + ctxt.dostrdata() + if buildcfg.Experiment.FieldTrack { + bench.Start("fieldtrack") + fieldtrack(ctxt.Arch, ctxt.loader) + } + + bench.Start("dwarfGenerateDebugInfo") + dwarfGenerateDebugInfo(ctxt) + + bench.Start("callgraph") + ctxt.callgraph() + + bench.Start("doStackCheck") + ctxt.doStackCheck() + + bench.Start("mangleTypeSym") + ctxt.mangleTypeSym() + + if ctxt.IsELF { + bench.Start("doelf") + ctxt.doelf() + } + if ctxt.IsDarwin() { + bench.Start("domacho") + ctxt.domacho() + } + if ctxt.IsWindows() { + bench.Start("dope") + ctxt.dope() + bench.Start("windynrelocsyms") + ctxt.windynrelocsyms() + } + if ctxt.IsAIX() { + bench.Start("doxcoff") + ctxt.doxcoff() + } + + bench.Start("textbuildid") + ctxt.textbuildid() + bench.Start("addexport") + ctxt.setArchSyms() + ctxt.addexport() + bench.Start("Gentext") + thearch.Gentext(ctxt, ctxt.loader) // trampolines, call stubs, etc. + + bench.Start("textaddress") + ctxt.textaddress() + bench.Start("typelink") + ctxt.typelink() + bench.Start("buildinfo") + ctxt.buildinfo() + bench.Start("pclntab") + containers := ctxt.findContainerSyms() + pclnState := ctxt.pclntab(containers) + bench.Start("findfunctab") + ctxt.findfunctab(pclnState, containers) + bench.Start("dwarfGenerateDebugSyms") + dwarfGenerateDebugSyms(ctxt) + bench.Start("symtab") + symGroupType := ctxt.symtab(pclnState) + bench.Start("dodata") + ctxt.dodata(symGroupType) + bench.Start("address") + order := ctxt.address() + bench.Start("dwarfcompress") + dwarfcompress(ctxt) + bench.Start("layout") + filesize := ctxt.layout(order) + + // Write out the output file. + // It is split into two parts (Asmb and Asmb2). The first + // part writes most of the content (sections and segments), + // for which we have computed the size and offset, in a + // mmap'd region. The second part writes more content, for + // which we don't know the size. + if ctxt.Arch.Family != sys.Wasm { + // Don't mmap if we're building for Wasm. Wasm file + // layout is very different so filesize is meaningless. + if err := ctxt.Out.Mmap(filesize); err != nil { + Exitf("mapping output file failed: %v", err) + } + } + // asmb will redirect symbols to the output file mmap, and relocations + // will be applied directly there. + bench.Start("Asmb") + asmb(ctxt) + + exitIfErrors() + + // Generate additional symbols for the native symbol table just prior + // to code generation. + bench.Start("GenSymsLate") + if thearch.GenSymsLate != nil { + thearch.GenSymsLate(ctxt, ctxt.loader) + } + + bench.Start("Asmb2") + asmb2(ctxt) + + bench.Start("Munmap") + ctxt.Out.Close() // Close handles Munmapping if necessary. + + bench.Start("hostlink") + ctxt.hostlink() + if ctxt.Debugvlog != 0 { + ctxt.Logf("%s", ctxt.loader.Stat()) + ctxt.Logf("%d liveness data\n", liveness) + } + bench.Start("Flush") + ctxt.Bso.Flush() + bench.Start("archive") + ctxt.archive() + bench.Report(os.Stdout) + + errorexit() +} + +type Rpath struct { + set bool + val string +} + +func (r *Rpath) Set(val string) error { + r.set = true + r.val = val + return nil +} + +func (r *Rpath) String() string { + return r.val +} + +func startProfile() { + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Fatalf("%v", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatalf("%v", err) + } + AtExit(pprof.StopCPUProfile) + } + if *memprofile != "" { + if *memprofilerate != 0 { + runtime.MemProfileRate = int(*memprofilerate) + } + f, err := os.Create(*memprofile) + if err != nil { + log.Fatalf("%v", err) + } + AtExit(func() { + // Profile all outstanding allocations. + runtime.GC() + // compilebench parses the memory profile to extract memstats, + // which are only written in the legacy pprof format. + // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap. + const writeLegacyFormat = 1 + if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil { + log.Fatalf("%v", err) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/msync_darwin_libc.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/msync_darwin_libc.go new file mode 100644 index 0000000000000000000000000000000000000000..6627ecaabb663e4454266a00a8071c312932493f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/msync_darwin_libc.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package ld + +import _ "unsafe" // for go:linkname + +//go:linkname msync syscall.msync +func msync(b []byte, flags int) (err error) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/nooptcgolink_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/nooptcgolink_test.go new file mode 100644 index 0000000000000000000000000000000000000000..646583f13ac62b87dd8849183e06db6231f5c57e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/nooptcgolink_test.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "internal/testenv" + "path/filepath" + "testing" +) + +func TestNooptCgoBuild(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + t.Parallel() + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + dir := t.TempDir() + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", filepath.Join(dir, "a.out")) + cmd.Dir = filepath.Join(testenv.GOROOT(t), "src", "runtime", "testdata", "testprogcgo") + out, err := cmd.CombinedOutput() + if err != nil { + t.Logf("go build output: %s", out) + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf.go new file mode 100644 index 0000000000000000000000000000000000000000..54fafcaf9963b5b381b2d802f79f504fa25012fd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf.go @@ -0,0 +1,325 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/sys" + "cmd/link/internal/loader" + "encoding/binary" + "errors" + "log" + "os" +) + +// If fallocate is not supported on this platform, return this error. The error +// is ignored where needed, and OutBuf writes to heap memory. +var errNoFallocate = errors.New("operation not supported") + +const outbufMode = 0775 + +// OutBuf is a buffered file writer. +// +// It is similar to the Writer in cmd/internal/bio with a few small differences. +// +// First, it tracks the output architecture and uses it to provide +// endian helpers. +// +// Second, it provides a very cheap offset counter that doesn't require +// any system calls to read the value. +// +// Third, it also mmaps the output file (if available). The intended usage is: +// - Mmap the output file +// - Write the content +// - possibly apply any edits in the output buffer +// - possibly write more content to the file. These writes take place in a heap +// backed buffer that will get synced to disk. +// - Munmap the output file +// +// And finally, it provides a mechanism by which you can multithread the +// writing of output files. This mechanism is accomplished by copying a OutBuf, +// and using it in the thread/goroutine. +// +// Parallel OutBuf is intended to be used like: +// +// func write(out *OutBuf) { +// var wg sync.WaitGroup +// for i := 0; i < 10; i++ { +// wg.Add(1) +// view, err := out.View(start[i]) +// if err != nil { +// // handle output +// continue +// } +// go func(out *OutBuf, i int) { +// // do output +// wg.Done() +// }(view, i) +// } +// wg.Wait() +// } +type OutBuf struct { + arch *sys.Arch + off int64 + + buf []byte // backing store of mmap'd output file + heap []byte // backing store for non-mmapped data + + name string + f *os.File + encbuf [8]byte // temp buffer used by WriteN methods + isView bool // true if created from View() +} + +func (out *OutBuf) Open(name string) error { + if out.f != nil { + return errors.New("cannot open more than one file") + } + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, outbufMode) + if err != nil { + return err + } + out.off = 0 + out.name = name + out.f = f + return nil +} + +func NewOutBuf(arch *sys.Arch) *OutBuf { + return &OutBuf{ + arch: arch, + } +} + +var viewError = errors.New("output not mmapped") + +func (out *OutBuf) View(start uint64) (*OutBuf, error) { + return &OutBuf{ + arch: out.arch, + name: out.name, + buf: out.buf, + heap: out.heap, + off: int64(start), + isView: true, + }, nil +} + +var viewCloseError = errors.New("cannot Close OutBuf from View") + +func (out *OutBuf) Close() error { + if out.isView { + return viewCloseError + } + if out.isMmapped() { + out.copyHeap() + out.purgeSignatureCache() + out.munmap() + } + if out.f == nil { + return nil + } + if len(out.heap) != 0 { + if _, err := out.f.Write(out.heap); err != nil { + return err + } + } + if err := out.f.Close(); err != nil { + return err + } + out.f = nil + return nil +} + +// ErrorClose closes the output file (if any). +// It is supposed to be called only at exit on error, so it doesn't do +// any clean up or buffer flushing, just closes the file. +func (out *OutBuf) ErrorClose() { + if out.isView { + panic(viewCloseError) + } + if out.f == nil { + return + } + out.f.Close() // best effort, ignore error + out.f = nil +} + +// isMmapped returns true if the OutBuf is mmaped. +func (out *OutBuf) isMmapped() bool { + return len(out.buf) != 0 +} + +// Data returns the whole written OutBuf as a byte slice. +func (out *OutBuf) Data() []byte { + if out.isMmapped() { + out.copyHeap() + return out.buf + } + return out.heap +} + +// copyHeap copies the heap to the mmapped section of memory, returning true if +// a copy takes place. +func (out *OutBuf) copyHeap() bool { + if !out.isMmapped() { // only valuable for mmapped OutBufs. + return false + } + if out.isView { + panic("can't copyHeap a view") + } + + bufLen := len(out.buf) + heapLen := len(out.heap) + total := uint64(bufLen + heapLen) + if heapLen != 0 { + if err := out.Mmap(total); err != nil { // Mmap will copy out.heap over to out.buf + Exitf("mapping output file failed: %v", err) + } + } + return true +} + +// maxOutBufHeapLen limits the growth of the heap area. +const maxOutBufHeapLen = 10 << 20 + +// writeLoc determines the write location if a buffer is mmaped. +// We maintain two write buffers, an mmapped section, and a heap section for +// writing. When the mmapped section is full, we switch over the heap memory +// for writing. +func (out *OutBuf) writeLoc(lenToWrite int64) (int64, []byte) { + // See if we have enough space in the mmaped area. + bufLen := int64(len(out.buf)) + if out.off+lenToWrite <= bufLen { + return out.off, out.buf + } + + // Not enough space in the mmaped area, write to heap area instead. + heapPos := out.off - bufLen + heapLen := int64(len(out.heap)) + lenNeeded := heapPos + lenToWrite + if lenNeeded > heapLen { // do we need to grow the heap storage? + // The heap variables aren't protected by a mutex. For now, just bomb if you + // try to use OutBuf in parallel. (Note this probably could be fixed.) + if out.isView { + panic("cannot write to heap in parallel") + } + // See if our heap would grow to be too large, and if so, copy it to the end + // of the mmapped area. + if heapLen > maxOutBufHeapLen && out.copyHeap() { + heapPos -= heapLen + lenNeeded = heapPos + lenToWrite + heapLen = 0 + } + out.heap = append(out.heap, make([]byte, lenNeeded-heapLen)...) + } + return heapPos, out.heap +} + +func (out *OutBuf) SeekSet(p int64) { + out.off = p +} + +func (out *OutBuf) Offset() int64 { + return out.off +} + +// Write writes the contents of v to the buffer. +func (out *OutBuf) Write(v []byte) (int, error) { + n := len(v) + pos, buf := out.writeLoc(int64(n)) + copy(buf[pos:], v) + out.off += int64(n) + return n, nil +} + +func (out *OutBuf) Write8(v uint8) { + pos, buf := out.writeLoc(1) + buf[pos] = v + out.off++ +} + +// WriteByte is an alias for Write8 to fulfill the io.ByteWriter interface. +func (out *OutBuf) WriteByte(v byte) error { + out.Write8(v) + return nil +} + +func (out *OutBuf) Write16(v uint16) { + out.arch.ByteOrder.PutUint16(out.encbuf[:], v) + out.Write(out.encbuf[:2]) +} + +func (out *OutBuf) Write32(v uint32) { + out.arch.ByteOrder.PutUint32(out.encbuf[:], v) + out.Write(out.encbuf[:4]) +} + +func (out *OutBuf) Write32b(v uint32) { + binary.BigEndian.PutUint32(out.encbuf[:], v) + out.Write(out.encbuf[:4]) +} + +func (out *OutBuf) Write64(v uint64) { + out.arch.ByteOrder.PutUint64(out.encbuf[:], v) + out.Write(out.encbuf[:8]) +} + +func (out *OutBuf) Write64b(v uint64) { + binary.BigEndian.PutUint64(out.encbuf[:], v) + out.Write(out.encbuf[:8]) +} + +func (out *OutBuf) WriteString(s string) { + pos, buf := out.writeLoc(int64(len(s))) + n := copy(buf[pos:], s) + if n != len(s) { + log.Fatalf("WriteString truncated. buffer size: %d, offset: %d, len(s)=%d", len(out.buf), out.off, len(s)) + } + out.off += int64(n) +} + +// WriteStringN writes the first n bytes of s. +// If n is larger than len(s) then it is padded with zero bytes. +func (out *OutBuf) WriteStringN(s string, n int) { + out.WriteStringPad(s, n, zeros[:]) +} + +// WriteStringPad writes the first n bytes of s. +// If n is larger than len(s) then it is padded with the bytes in pad (repeated as needed). +func (out *OutBuf) WriteStringPad(s string, n int, pad []byte) { + if len(s) >= n { + out.WriteString(s[:n]) + } else { + out.WriteString(s) + n -= len(s) + for n > len(pad) { + out.Write(pad) + n -= len(pad) + + } + out.Write(pad[:n]) + } +} + +// WriteSym writes the content of a Symbol, and returns the output buffer +// that we just wrote, so we can apply further edit to the symbol content. +// For generator symbols, it also sets the symbol's Data to the output +// buffer. +func (out *OutBuf) WriteSym(ldr *loader.Loader, s loader.Sym) []byte { + if !ldr.IsGeneratedSym(s) { + P := ldr.Data(s) + n := int64(len(P)) + pos, buf := out.writeLoc(n) + copy(buf[pos:], P) + out.off += n + ldr.FreeData(s) + return buf[pos : pos+n] + } else { + n := ldr.SymSize(s) + pos, buf := out.writeLoc(n) + out.off += n + ldr.MakeSymbolUpdater(s).SetData(buf[pos : pos+n]) + return buf[pos : pos+n] + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_darwin.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..17f7e2a65fc7d94c0ca0cc13916f241df16d80cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_darwin.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "syscall" + "unsafe" +) + +// Implemented in the syscall package. +// +//go:linkname fcntl syscall.fcntl +func fcntl(fd int, cmd int, arg int) (int, error) + +func (out *OutBuf) fallocate(size uint64) error { + stat, err := out.f.Stat() + if err != nil { + return err + } + // F_PEOFPOSMODE allocates from the end of the file, so we want the size difference. + // Apparently, it uses the end of the allocation, instead of the logical end of the + // file. + cursize := uint64(stat.Sys().(*syscall.Stat_t).Blocks * 512) // allocated size + if size <= cursize { + return nil + } + + store := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Offset: 0, + Length: int64(size - cursize), + } + + _, err = fcntl(int(out.f.Fd()), syscall.F_PREALLOCATE, int(uintptr(unsafe.Pointer(store)))) + return err +} + +func (out *OutBuf) purgeSignatureCache() { + // Apparently, the Darwin kernel may cache the code signature at mmap. + // When we mmap the output buffer, it doesn't have a code signature + // (as we haven't generated one). Invalidate the kernel cache now that + // we have generated the signature. See issue #42684. + msync(out.buf, syscall.MS_INVALIDATE) + // Best effort. Ignore error. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_freebsd.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_freebsd.go new file mode 100644 index 0000000000000000000000000000000000000000..7e718c1408e7304965af5e8836a7179a032e63e1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_freebsd.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && go1.21 + +package ld + +import ( + "internal/syscall/unix" + "syscall" +) + +func (out *OutBuf) fallocate(size uint64) error { + err := unix.PosixFallocate(int(out.f.Fd()), 0, int64(size)) + // ZFS on FreeBSD does not support posix_fallocate and returns EINVAL in that case. + if err == syscall.EINVAL { + return errNoFallocate + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_linux.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..bd9a0c6761c563ed520dd7f9835d5b06ca629ae6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_linux.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import "syscall" + +func (out *OutBuf) fallocate(size uint64) error { + return syscall.Fallocate(int(out.f.Fd()), 0, 0, int64(size)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_mmap.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..2972d8a3fa1712ff5769c248f9ceef1178f0cda0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_mmap.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package ld + +import ( + "syscall" +) + +// Mmap maps the output file with the given size. It unmaps the old mapping +// if it is already mapped. It also flushes any in-heap data to the new +// mapping. +func (out *OutBuf) Mmap(filesize uint64) (err error) { + oldlen := len(out.buf) + if oldlen != 0 { + out.munmap() + } + + for { + if err = out.fallocate(filesize); err != syscall.EINTR { + break + } + } + if err != nil { + // Some file systems do not support fallocate. We ignore that error as linking + // can still take place, but you might SIGBUS when you write to the mmapped + // area. + if err != syscall.ENOTSUP && err != syscall.EPERM && err != errNoFallocate { + return err + } + } + err = out.f.Truncate(int64(filesize)) + if err != nil { + Exitf("resize output file failed: %v", err) + } + out.buf, err = syscall.Mmap(int(out.f.Fd()), 0, int(filesize), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED|syscall.MAP_FILE) + if err != nil { + return err + } + + // copy heap to new mapping + if uint64(oldlen+len(out.heap)) > filesize { + panic("mmap size too small") + } + copy(out.buf[oldlen:], out.heap) + out.heap = out.heap[:0] + return nil +} + +func (out *OutBuf) munmap() { + if out.buf == nil { + return + } + syscall.Munmap(out.buf) + out.buf = nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nofallocate.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nofallocate.go new file mode 100644 index 0000000000000000000000000000000000000000..435be5e09fe5b950c0a33ffce3d70d0fd9a31363 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nofallocate.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !(freebsd && go1.21) && !linux + +package ld + +func (out *OutBuf) fallocate(size uint64) error { + return errNoFallocate +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nommap.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nommap.go new file mode 100644 index 0000000000000000000000000000000000000000..6a40b97c6516d112835ac2f59ca2e812614b34d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_nommap.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows + +package ld + +// Mmap allocates an in-heap output buffer with the given size. It copies +// any old data (if any) to the new buffer. +func (out *OutBuf) Mmap(filesize uint64) error { + // We need space to put all the symbols before we apply relocations. + oldheap := out.heap + if filesize < uint64(len(oldheap)) { + panic("mmap size too small") + } + out.heap = make([]byte, filesize) + copy(out.heap, oldheap) + return nil +} + +func (out *OutBuf) munmap() { panic("unreachable") } diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_notdarwin.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_notdarwin.go new file mode 100644 index 0000000000000000000000000000000000000000..3e5c67a5c25ce74cef232bdfb3e6dbe0a4c43dbf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_notdarwin.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin + +package ld + +func (out *OutBuf) purgeSignatureCache() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a7b105f887a3d08db332e5ae62ea4b3d0e5968c1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_test.go @@ -0,0 +1,93 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "path/filepath" + "runtime" + "testing" +) + +// TestMMap ensures that we can actually mmap on every supported platform. +func TestMMap(t *testing.T) { + switch runtime.GOOS { + default: + t.Skip("unsupported OS") + case "aix", "darwin", "ios", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "windows": + } + dir := t.TempDir() + filename := filepath.Join(dir, "foo.out") + ob := NewOutBuf(nil) + if err := ob.Open(filename); err != nil { + t.Fatalf("error opening file: %v", err) + } + defer ob.Close() + if err := ob.Mmap(1 << 20); err != nil { + t.Errorf("error mmapping file %v", err) + } + if !ob.isMmapped() { + t.Errorf("should be mmapped") + } +} + +// TestWriteLoc ensures that the math surrounding writeLoc is correct. +func TestWriteLoc(t *testing.T) { + tests := []struct { + bufLen int + off int64 + heapLen int + lenToWrite int64 + expectedHeapLen int + writePos int64 + addressInHeap bool + }{ + {100, 0, 0, 100, 0, 0, false}, + {100, 100, 0, 100, 100, 0, true}, + {10, 10, 0, 100, 100, 0, true}, + {10, 20, 10, 100, 110, 10, true}, + {0, 0, 0, 100, 100, 0, true}, + } + + for i, test := range tests { + ob := &OutBuf{ + buf: make([]byte, test.bufLen), + off: test.off, + heap: make([]byte, test.heapLen), + } + pos, buf := ob.writeLoc(test.lenToWrite) + if pos != test.writePos { + t.Errorf("[%d] position = %d, expected %d", i, pos, test.writePos) + } + message := "mmapped area" + expected := ob.buf + if test.addressInHeap { + message = "heap" + expected = ob.heap + } + if &buf[0] != &expected[0] { + t.Errorf("[%d] expected position to be %q", i, message) + } + if len(ob.heap) != test.expectedHeapLen { + t.Errorf("[%d] expected len(ob.heap) == %d, got %d", i, test.expectedHeapLen, len(ob.heap)) + } + } +} + +func TestIsMmapped(t *testing.T) { + tests := []struct { + length int + expected bool + }{ + {0, false}, + {1, true}, + } + for i, test := range tests { + ob := &OutBuf{buf: make([]byte, test.length)} + if v := ob.isMmapped(); v != test.expected { + + t.Errorf("[%d] isMmapped == %t, expected %t", i, v, test.expected) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_windows.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..95937e781c6a0a54cae90356c15e2aa3fbf5a3e0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/outbuf_windows.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "internal/unsafeheader" + "syscall" + "unsafe" +) + +// Mmap maps the output file with the given size. It unmaps the old mapping +// if it is already mapped. It also flushes any in-heap data to the new +// mapping. +func (out *OutBuf) Mmap(filesize uint64) error { + oldlen := len(out.buf) + if oldlen != 0 { + out.munmap() + } + + err := out.f.Truncate(int64(filesize)) + if err != nil { + Exitf("resize output file failed: %v", err) + } + + low, high := uint32(filesize), uint32(filesize>>32) + fmap, err := syscall.CreateFileMapping(syscall.Handle(out.f.Fd()), nil, syscall.PAGE_READWRITE, high, low, nil) + if err != nil { + return err + } + defer syscall.CloseHandle(fmap) + + ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ|syscall.FILE_MAP_WRITE, 0, 0, uintptr(filesize)) + if err != nil { + return err + } + bufHdr := (*unsafeheader.Slice)(unsafe.Pointer(&out.buf)) + bufHdr.Data = unsafe.Pointer(ptr) + bufHdr.Len = int(filesize) + bufHdr.Cap = int(filesize) + + // copy heap to new mapping + if uint64(oldlen+len(out.heap)) > filesize { + panic("mmap size too small") + } + copy(out.buf[oldlen:], out.heap) + out.heap = out.heap[:0] + return nil +} + +func (out *OutBuf) munmap() { + if out.buf == nil { + return + } + // Apparently unmapping without flush may cause ACCESS_DENIED error + // (see issue 38440). + err := syscall.FlushViewOfFile(uintptr(unsafe.Pointer(&out.buf[0])), 0) + if err != nil { + Exitf("FlushViewOfFile failed: %v", err) + } + // Issue 44817: apparently the call below may be needed (according + // to the Windows docs) in addition to the FlushViewOfFile call + // above, " ... to flush all the dirty pages plus the metadata for + // the file and ensure that they are physically written to disk". + // Windows DOC links: + // + // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-flushviewoffile + // https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-flushfilebuffers + err = syscall.FlushFileBuffers(syscall.Handle(out.f.Fd())) + if err != nil { + Exitf("FlushFileBuffers failed: %v", err) + } + err = syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&out.buf[0]))) + out.buf = nil + if err != nil { + Exitf("UnmapViewOfFile failed: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pcln.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pcln.go new file mode 100644 index 0000000000000000000000000000000000000000..5734b92507e15b554efe10d8a848912f63bcea3b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pcln.go @@ -0,0 +1,935 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "internal/abi" + "internal/buildcfg" + "os" + "path/filepath" + "strings" +) + +const funcSize = 11 * 4 // funcSize is the size of the _func object in runtime/runtime2.go + +// pclntab holds the state needed for pclntab generation. +type pclntab struct { + // The first and last functions found. + firstFunc, lastFunc loader.Sym + + // Running total size of pclntab. + size int64 + + // runtime.pclntab's symbols + carrier loader.Sym + pclntab loader.Sym + pcheader loader.Sym + funcnametab loader.Sym + findfunctab loader.Sym + cutab loader.Sym + filetab loader.Sym + pctab loader.Sym + + // The number of functions + number of TEXT sections - 1. This is such an + // unexpected value because platforms that have more than one TEXT section + // get a dummy function inserted between because the external linker can place + // functions in those areas. We mark those areas as not covered by the Go + // runtime. + // + // On most platforms this is the number of reachable functions. + nfunc int32 + + // The number of filenames in runtime.filetab. + nfiles uint32 +} + +// addGeneratedSym adds a generator symbol to pclntab, returning the new Sym. +// It is the caller's responsibility to save the symbol in state. +func (state *pclntab) addGeneratedSym(ctxt *Link, name string, size int64, f generatorFunc) loader.Sym { + size = Rnd(size, int64(ctxt.Arch.PtrSize)) + state.size += size + s := ctxt.createGeneratorSymbol(name, 0, sym.SPCLNTAB, size, f) + ctxt.loader.SetAttrReachable(s, true) + ctxt.loader.SetCarrierSym(s, state.carrier) + ctxt.loader.SetAttrNotInSymbolTable(s, true) + return s +} + +// makePclntab makes a pclntab object, and assembles all the compilation units +// we'll need to write pclntab. Returns the pclntab structure, a slice of the +// CompilationUnits we need, and a slice of the function symbols we need to +// generate pclntab. +func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.CompilationUnit, []loader.Sym) { + ldr := ctxt.loader + state := new(pclntab) + + // Gather some basic stats and info. + seenCUs := make(map[*sym.CompilationUnit]struct{}) + compUnits := []*sym.CompilationUnit{} + funcs := []loader.Sym{} + + for _, s := range ctxt.Textp { + if !emitPcln(ctxt, s, container) { + continue + } + funcs = append(funcs, s) + state.nfunc++ + if state.firstFunc == 0 { + state.firstFunc = s + } + state.lastFunc = s + + // We need to keep track of all compilation units we see. Some symbols + // (eg, go.buildid, _cgoexp_, etc) won't have a compilation unit. + cu := ldr.SymUnit(s) + if _, ok := seenCUs[cu]; cu != nil && !ok { + seenCUs[cu] = struct{}{} + cu.PclnIndex = len(compUnits) + compUnits = append(compUnits, cu) + } + } + return state, compUnits, funcs +} + +func emitPcln(ctxt *Link, s loader.Sym, container loader.Bitmap) bool { + if ctxt.Target.IsRISCV64() { + // Avoid adding local symbols to the pcln table - RISC-V + // linking generates a very large number of these, particularly + // for HI20 symbols (which we need to load in order to be able + // to resolve relocations). Unnecessarily including all of + // these symbols quickly blows out the size of the pcln table + // and overflows hash buckets. + symName := ctxt.loader.SymName(s) + if symName == "" || strings.HasPrefix(symName, ".L") { + return false + } + } + + // We want to generate func table entries only for the "lowest + // level" symbols, not containers of subsymbols. + return !container.Has(s) +} + +func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 { + ldr := ctxt.loader + target := ctxt.Target + deferreturn := uint32(0) + lastWasmAddr := uint32(0) + + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if target.IsWasm() && r.Type() == objabi.R_ADDR { + // wasm/ssa.go generates an ARESUMEPOINT just + // before the deferreturn call. The "PC" of + // the deferreturn call is stored in the + // R_ADDR relocation on the ARESUMEPOINT. + lastWasmAddr = uint32(r.Add()) + } + if r.Type().IsDirectCall() && (r.Sym() == deferReturnSym || ldr.IsDeferReturnTramp(r.Sym())) { + if target.IsWasm() { + deferreturn = lastWasmAddr - 1 + } else { + // Note: the relocation target is in the call instruction, but + // is not necessarily the whole instruction (for instance, on + // x86 the relocation applies to bytes [1:5] of the 5 byte call + // instruction). + deferreturn = uint32(r.Off()) + switch target.Arch.Family { + case sys.AMD64, sys.I386: + deferreturn-- + case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64: + // no change + case sys.S390X: + deferreturn -= 2 + default: + panic(fmt.Sprint("Unhandled architecture:", target.Arch.Family)) + } + } + break // only need one + } + } + return deferreturn +} + +// genInlTreeSym generates the InlTree sym for a function with the +// specified FuncInfo. +func genInlTreeSym(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, arch *sys.Arch, nameOffsets map[loader.Sym]uint32) loader.Sym { + ldr := ctxt.loader + its := ldr.CreateExtSym("", 0) + inlTreeSym := ldr.MakeSymbolUpdater(its) + // Note: the generated symbol is given a type of sym.SGOFUNC, as a + // signal to the symtab() phase that it needs to be grouped in with + // other similar symbols (gcdata, etc); the dodata() phase will + // eventually switch the type back to SRODATA. + inlTreeSym.SetType(sym.SGOFUNC) + ldr.SetAttrReachable(its, true) + ldr.SetSymAlign(its, 4) // it has 32-bit fields + ninl := fi.NumInlTree() + for i := 0; i < int(ninl); i++ { + call := fi.InlTree(i) + nameOff, ok := nameOffsets[call.Func] + if !ok { + panic("couldn't find function name offset") + } + + inlFunc := ldr.FuncInfo(call.Func) + var funcID abi.FuncID + startLine := int32(0) + if inlFunc.Valid() { + funcID = inlFunc.FuncID() + startLine = inlFunc.StartLine() + } else if !ctxt.linkShared { + // Inlined functions are always Go functions, and thus + // must have FuncInfo. + // + // Unfortunately, with -linkshared, the inlined + // function may be external symbols (from another + // shared library), and we don't load FuncInfo from the + // shared library. We will report potentially incorrect + // FuncID in this case. See https://go.dev/issue/55954. + panic(fmt.Sprintf("inlined function %s missing func info", ldr.SymName(call.Func))) + } + + // Construct runtime.inlinedCall value. + const size = 16 + inlTreeSym.SetUint8(arch, int64(i*size+0), uint8(funcID)) + // Bytes 1-3 are unused. + inlTreeSym.SetUint32(arch, int64(i*size+4), uint32(nameOff)) + inlTreeSym.SetUint32(arch, int64(i*size+8), uint32(call.ParentPC)) + inlTreeSym.SetUint32(arch, int64(i*size+12), uint32(startLine)) + } + return its +} + +// makeInlSyms returns a map of loader.Sym that are created inlSyms. +func makeInlSyms(ctxt *Link, funcs []loader.Sym, nameOffsets map[loader.Sym]uint32) map[loader.Sym]loader.Sym { + ldr := ctxt.loader + // Create the inline symbols we need. + inlSyms := make(map[loader.Sym]loader.Sym) + for _, s := range funcs { + if fi := ldr.FuncInfo(s); fi.Valid() { + fi.Preload() + if fi.NumInlTree() > 0 { + inlSyms[s] = genInlTreeSym(ctxt, ldr.SymUnit(s), fi, ctxt.Arch, nameOffsets) + } + } + } + return inlSyms +} + +// generatePCHeader creates the runtime.pcheader symbol, setting it up as a +// generator to fill in its data later. +func (state *pclntab) generatePCHeader(ctxt *Link) { + ldr := ctxt.loader + textStartOff := int64(8 + 2*ctxt.Arch.PtrSize) + size := int64(8 + 8*ctxt.Arch.PtrSize) + writeHeader := func(ctxt *Link, s loader.Sym) { + header := ctxt.loader.MakeSymbolUpdater(s) + + writeSymOffset := func(off int64, ws loader.Sym) int64 { + diff := ldr.SymValue(ws) - ldr.SymValue(s) + if diff <= 0 { + name := ldr.SymName(ws) + panic(fmt.Sprintf("expected runtime.pcheader(%x) to be placed before %s(%x)", ldr.SymValue(s), name, ldr.SymValue(ws))) + } + return header.SetUintptr(ctxt.Arch, off, uintptr(diff)) + } + + // Write header. + // Keep in sync with runtime/symtab.go:pcHeader and package debug/gosym. + header.SetUint32(ctxt.Arch, 0, 0xfffffff1) + header.SetUint8(ctxt.Arch, 6, uint8(ctxt.Arch.MinLC)) + header.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize)) + off := header.SetUint(ctxt.Arch, 8, uint64(state.nfunc)) + off = header.SetUint(ctxt.Arch, off, uint64(state.nfiles)) + if off != textStartOff { + panic(fmt.Sprintf("pcHeader textStartOff: %d != %d", off, textStartOff)) + } + off += int64(ctxt.Arch.PtrSize) // skip runtimeText relocation + off = writeSymOffset(off, state.funcnametab) + off = writeSymOffset(off, state.cutab) + off = writeSymOffset(off, state.filetab) + off = writeSymOffset(off, state.pctab) + off = writeSymOffset(off, state.pclntab) + if off != size { + panic(fmt.Sprintf("pcHeader size: %d != %d", off, size)) + } + } + + state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) + // Create the runtimeText relocation. + sb := ldr.MakeSymbolUpdater(state.pcheader) + sb.SetAddr(ctxt.Arch, textStartOff, ldr.Lookup("runtime.text", 0)) +} + +// walkFuncs iterates over the funcs, calling a function for each unique +// function and inlined function. +func walkFuncs(ctxt *Link, funcs []loader.Sym, f func(loader.Sym)) { + ldr := ctxt.loader + seen := make(map[loader.Sym]struct{}) + for _, s := range funcs { + if _, ok := seen[s]; !ok { + f(s) + seen[s] = struct{}{} + } + + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + for i, ni := 0, fi.NumInlTree(); i < int(ni); i++ { + call := fi.InlTree(i).Func + if _, ok := seen[call]; !ok { + f(call) + seen[call] = struct{}{} + } + } + } +} + +// generateFuncnametab creates the function name table. Returns a map of +// func symbol to the name offset in runtime.funcnamtab. +func (state *pclntab) generateFuncnametab(ctxt *Link, funcs []loader.Sym) map[loader.Sym]uint32 { + nameOffsets := make(map[loader.Sym]uint32, state.nfunc) + + // Write the null terminated strings. + writeFuncNameTab := func(ctxt *Link, s loader.Sym) { + symtab := ctxt.loader.MakeSymbolUpdater(s) + for s, off := range nameOffsets { + symtab.AddCStringAt(int64(off), ctxt.loader.SymName(s)) + } + } + + // Loop through the CUs, and calculate the size needed. + var size int64 + walkFuncs(ctxt, funcs, func(s loader.Sym) { + nameOffsets[s] = uint32(size) + size += int64(len(ctxt.loader.SymName(s)) + 1) // NULL terminate + }) + + state.funcnametab = state.addGeneratedSym(ctxt, "runtime.funcnametab", size, writeFuncNameTab) + return nameOffsets +} + +// walkFilenames walks funcs, calling a function for each filename used in each +// function's line table. +func walkFilenames(ctxt *Link, funcs []loader.Sym, f func(*sym.CompilationUnit, goobj.CUFileIndex)) { + ldr := ctxt.loader + + // Loop through all functions, finding the filenames we need. + for _, s := range funcs { + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + + cu := ldr.SymUnit(s) + for i, nf := 0, int(fi.NumFile()); i < nf; i++ { + f(cu, fi.File(i)) + } + for i, ninl := 0, int(fi.NumInlTree()); i < ninl; i++ { + call := fi.InlTree(i) + f(cu, call.File) + } + } +} + +// generateFilenameTabs creates LUTs needed for filename lookup. Returns a slice +// of the index at which each CU begins in runtime.cutab. +// +// Function objects keep track of the files they reference to print the stack. +// This function creates a per-CU list of filenames if CU[M] references +// files[1-N], the following is generated: +// +// runtime.cutab: +// CU[M] +// offsetToFilename[0] +// offsetToFilename[1] +// .. +// +// runtime.filetab +// filename[0] +// filename[1] +// +// Looking up a filename then becomes: +// 0. Given a func, and filename index [K] +// 1. Get Func.CUIndex: M := func.cuOffset +// 2. Find filename offset: fileOffset := runtime.cutab[M+K] +// 3. Get the filename: getcstring(runtime.filetab[fileOffset]) +func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.CompilationUnit, funcs []loader.Sym) []uint32 { + // On a per-CU basis, keep track of all the filenames we need. + // + // Note, that we store the filenames in a separate section in the object + // files, and deduplicate based on the actual value. It would be better to + // store the filenames as symbols, using content addressable symbols (and + // then not loading extra filenames), and just use the hash value of the + // symbol name to do this cataloging. + // + // TODO: Store filenames as symbols. (Note this would be easiest if you + // also move strings to ALWAYS using the larger content addressable hash + // function, and use that hash value for uniqueness testing.) + cuEntries := make([]goobj.CUFileIndex, len(compUnits)) + fileOffsets := make(map[string]uint32) + + // Walk the filenames. + // We store the total filename string length we need to load, and the max + // file index we've seen per CU so we can calculate how large the + // CU->global table needs to be. + var fileSize int64 + walkFilenames(ctxt, funcs, func(cu *sym.CompilationUnit, i goobj.CUFileIndex) { + // Note we use the raw filename for lookup, but use the expanded filename + // when we save the size. + filename := cu.FileTable[i] + if _, ok := fileOffsets[filename]; !ok { + fileOffsets[filename] = uint32(fileSize) + fileSize += int64(len(expandFile(filename)) + 1) // NULL terminate + } + + // Find the maximum file index we've seen. + if cuEntries[cu.PclnIndex] < i+1 { + cuEntries[cu.PclnIndex] = i + 1 // Store max + 1 + } + }) + + // Calculate the size of the runtime.cutab variable. + var totalEntries uint32 + cuOffsets := make([]uint32, len(cuEntries)) + for i, entries := range cuEntries { + // Note, cutab is a slice of uint32, so an offset to a cu's entry is just the + // running total of all cu indices we've needed to store so far, not the + // number of bytes we've stored so far. + cuOffsets[i] = totalEntries + totalEntries += uint32(entries) + } + + // Write cutab. + writeCutab := func(ctxt *Link, s loader.Sym) { + sb := ctxt.loader.MakeSymbolUpdater(s) + + var off int64 + for i, max := range cuEntries { + // Write the per CU LUT. + cu := compUnits[i] + for j := goobj.CUFileIndex(0); j < max; j++ { + fileOffset, ok := fileOffsets[cu.FileTable[j]] + if !ok { + // We're looping through all possible file indices. It's possible a file's + // been deadcode eliminated, and although it's a valid file in the CU, it's + // not needed in this binary. When that happens, use an invalid offset. + fileOffset = ^uint32(0) + } + off = sb.SetUint32(ctxt.Arch, off, fileOffset) + } + } + } + state.cutab = state.addGeneratedSym(ctxt, "runtime.cutab", int64(totalEntries*4), writeCutab) + + // Write filetab. + writeFiletab := func(ctxt *Link, s loader.Sym) { + sb := ctxt.loader.MakeSymbolUpdater(s) + + // Write the strings. + for filename, loc := range fileOffsets { + sb.AddStringAt(int64(loc), expandFile(filename)) + } + } + state.nfiles = uint32(len(fileOffsets)) + state.filetab = state.addGeneratedSym(ctxt, "runtime.filetab", fileSize, writeFiletab) + + return cuOffsets +} + +// generatePctab creates the runtime.pctab variable, holding all the +// deduplicated pcdata. +func (state *pclntab) generatePctab(ctxt *Link, funcs []loader.Sym) { + ldr := ctxt.loader + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + size := int64(1) + + // Walk the functions, finding offset to store each pcdata. + seen := make(map[loader.Sym]struct{}) + saveOffset := func(pcSym loader.Sym) { + if _, ok := seen[pcSym]; !ok { + datSize := ldr.SymSize(pcSym) + if datSize != 0 { + ldr.SetSymValue(pcSym, size) + } else { + // Invalid PC data, record as zero. + ldr.SetSymValue(pcSym, 0) + } + size += datSize + seen[pcSym] = struct{}{} + } + } + var pcsp, pcline, pcfile, pcinline loader.Sym + var pcdata []loader.Sym + for _, s := range funcs { + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + pcsp, pcfile, pcline, pcinline, pcdata = ldr.PcdataAuxs(s, pcdata) + + pcSyms := []loader.Sym{pcsp, pcfile, pcline} + for _, pcSym := range pcSyms { + saveOffset(pcSym) + } + for _, pcSym := range pcdata { + saveOffset(pcSym) + } + if fi.NumInlTree() > 0 { + saveOffset(pcinline) + } + } + + // TODO: There is no reason we need a generator for this variable, and it + // could be moved to a carrier symbol. However, carrier symbols containing + // carrier symbols don't work yet (as of Aug 2020). Once this is fixed, + // runtime.pctab could just be a carrier sym. + writePctab := func(ctxt *Link, s loader.Sym) { + ldr := ctxt.loader + sb := ldr.MakeSymbolUpdater(s) + for sym := range seen { + sb.SetBytesAt(ldr.SymValue(sym), ldr.Data(sym)) + } + } + + state.pctab = state.addGeneratedSym(ctxt, "runtime.pctab", size, writePctab) +} + +// numPCData returns the number of PCData syms for the FuncInfo. +// NB: Preload must be called on valid FuncInfos before calling this function. +func numPCData(ldr *loader.Loader, s loader.Sym, fi loader.FuncInfo) uint32 { + if !fi.Valid() { + return 0 + } + numPCData := uint32(ldr.NumPcdata(s)) + if fi.NumInlTree() > 0 { + if numPCData < abi.PCDATA_InlTreeIndex+1 { + numPCData = abi.PCDATA_InlTreeIndex + 1 + } + } + return numPCData +} + +// generateFunctab creates the runtime.functab +// +// runtime.functab contains two things: +// +// - pc->func look up table. +// - array of func objects, interleaved with pcdata and funcdata +func (state *pclntab) generateFunctab(ctxt *Link, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) { + // Calculate the size of the table. + size, startLocations := state.calculateFunctabSize(ctxt, funcs) + writePcln := func(ctxt *Link, s loader.Sym) { + ldr := ctxt.loader + sb := ldr.MakeSymbolUpdater(s) + // Write the data. + writePCToFunc(ctxt, sb, funcs, startLocations) + writeFuncs(ctxt, sb, funcs, inlSyms, startLocations, cuOffsets, nameOffsets) + } + state.pclntab = state.addGeneratedSym(ctxt, "runtime.functab", size, writePcln) +} + +// funcData returns the funcdata and offsets for the FuncInfo. +// The funcdata are written into runtime.functab after each func +// object. This is a helper function to make querying the FuncInfo object +// cleaner. +// +// NB: Preload must be called on the FuncInfo before calling. +// NB: fdSyms is used as scratch space. +func funcData(ldr *loader.Loader, s loader.Sym, fi loader.FuncInfo, inlSym loader.Sym, fdSyms []loader.Sym) []loader.Sym { + fdSyms = fdSyms[:0] + if fi.Valid() { + fdSyms = ldr.Funcdata(s, fdSyms) + if fi.NumInlTree() > 0 { + if len(fdSyms) < abi.FUNCDATA_InlTree+1 { + fdSyms = append(fdSyms, make([]loader.Sym, abi.FUNCDATA_InlTree+1-len(fdSyms))...) + } + fdSyms[abi.FUNCDATA_InlTree] = inlSym + } + } + return fdSyms +} + +// calculateFunctabSize calculates the size of the pclntab, and the offsets in +// the output buffer for individual func entries. +func (state pclntab) calculateFunctabSize(ctxt *Link, funcs []loader.Sym) (int64, []uint32) { + ldr := ctxt.loader + startLocations := make([]uint32, len(funcs)) + + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + size := int64(int(state.nfunc)*2*4 + 4) + + // Now find the space for the func objects. We do this in a running manner, + // so that we can find individual starting locations. + for i, s := range funcs { + size = Rnd(size, int64(ctxt.Arch.PtrSize)) + startLocations[i] = uint32(size) + fi := ldr.FuncInfo(s) + size += funcSize + if fi.Valid() { + fi.Preload() + numFuncData := ldr.NumFuncdata(s) + if fi.NumInlTree() > 0 { + if numFuncData < abi.FUNCDATA_InlTree+1 { + numFuncData = abi.FUNCDATA_InlTree + 1 + } + } + size += int64(numPCData(ldr, s, fi) * 4) + size += int64(numFuncData * 4) + } + } + + return size, startLocations +} + +// writePCToFunc writes the PC->func lookup table. +func writePCToFunc(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, startLocations []uint32) { + ldr := ctxt.loader + textStart := ldr.SymValue(ldr.Lookup("runtime.text", 0)) + pcOff := func(s loader.Sym) uint32 { + off := ldr.SymValue(s) - textStart + if off < 0 { + panic(fmt.Sprintf("expected func %s(%x) to be placed at or after textStart (%x)", ldr.SymName(s), ldr.SymValue(s), textStart)) + } + return uint32(off) + } + for i, s := range funcs { + sb.SetUint32(ctxt.Arch, int64(i*2*4), pcOff(s)) + sb.SetUint32(ctxt.Arch, int64((i*2+1)*4), startLocations[i]) + } + + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + sb.SetUint32(ctxt.Arch, int64(len(funcs))*2*4, pcOff(lastFunc)+uint32(ldr.SymSize(lastFunc))) +} + +// writeFuncs writes the func structures and pcdata to runtime.functab. +func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, startLocations, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) { + ldr := ctxt.loader + deferReturnSym := ldr.Lookup("runtime.deferreturn", abiInternalVer) + gofunc := ldr.Lookup("go:func.*", 0) + gofuncBase := ldr.SymValue(gofunc) + textStart := ldr.SymValue(ldr.Lookup("runtime.text", 0)) + funcdata := []loader.Sym{} + var pcsp, pcfile, pcline, pcinline loader.Sym + var pcdata []loader.Sym + + // Write the individual func objects. + for i, s := range funcs { + startLine := int32(0) + fi := ldr.FuncInfo(s) + if fi.Valid() { + fi.Preload() + pcsp, pcfile, pcline, pcinline, pcdata = ldr.PcdataAuxs(s, pcdata) + startLine = fi.StartLine() + } + + off := int64(startLocations[i]) + // entryOff uint32 (offset of func entry PC from textStart) + entryOff := ldr.SymValue(s) - textStart + if entryOff < 0 { + panic(fmt.Sprintf("expected func %s(%x) to be placed before or at textStart (%x)", ldr.SymName(s), ldr.SymValue(s), textStart)) + } + off = sb.SetUint32(ctxt.Arch, off, uint32(entryOff)) + + // nameOff int32 + nameOff, ok := nameOffsets[s] + if !ok { + panic("couldn't find function name offset") + } + off = sb.SetUint32(ctxt.Arch, off, uint32(nameOff)) + + // args int32 + // TODO: Move into funcinfo. + args := uint32(0) + if fi.Valid() { + args = uint32(fi.Args()) + } + off = sb.SetUint32(ctxt.Arch, off, args) + + // deferreturn + deferreturn := computeDeferReturn(ctxt, deferReturnSym, s) + off = sb.SetUint32(ctxt.Arch, off, deferreturn) + + // pcdata + if fi.Valid() { + off = sb.SetUint32(ctxt.Arch, off, uint32(ldr.SymValue(pcsp))) + off = sb.SetUint32(ctxt.Arch, off, uint32(ldr.SymValue(pcfile))) + off = sb.SetUint32(ctxt.Arch, off, uint32(ldr.SymValue(pcline))) + } else { + off += 12 + } + off = sb.SetUint32(ctxt.Arch, off, uint32(numPCData(ldr, s, fi))) + + // Store the offset to compilation unit's file table. + cuIdx := ^uint32(0) + if cu := ldr.SymUnit(s); cu != nil { + cuIdx = cuOffsets[cu.PclnIndex] + } + off = sb.SetUint32(ctxt.Arch, off, cuIdx) + + // startLine int32 + off = sb.SetUint32(ctxt.Arch, off, uint32(startLine)) + + // funcID uint8 + var funcID abi.FuncID + if fi.Valid() { + funcID = fi.FuncID() + } + off = sb.SetUint8(ctxt.Arch, off, uint8(funcID)) + + // flag uint8 + var flag abi.FuncFlag + if fi.Valid() { + flag = fi.FuncFlag() + } + off = sb.SetUint8(ctxt.Arch, off, uint8(flag)) + + off += 1 // pad + + // nfuncdata must be the final entry. + funcdata = funcData(ldr, s, fi, 0, funcdata) + off = sb.SetUint8(ctxt.Arch, off, uint8(len(funcdata))) + + // Output the pcdata. + if fi.Valid() { + for j, pcSym := range pcdata { + sb.SetUint32(ctxt.Arch, off+int64(j*4), uint32(ldr.SymValue(pcSym))) + } + if fi.NumInlTree() > 0 { + sb.SetUint32(ctxt.Arch, off+abi.PCDATA_InlTreeIndex*4, uint32(ldr.SymValue(pcinline))) + } + } + + // Write funcdata refs as offsets from go:func.* and go:funcrel.*. + funcdata = funcData(ldr, s, fi, inlSyms[s], funcdata) + // Missing funcdata will be ^0. See runtime/symtab.go:funcdata. + off = int64(startLocations[i] + funcSize + numPCData(ldr, s, fi)*4) + for j := range funcdata { + dataoff := off + int64(4*j) + fdsym := funcdata[j] + + // cmd/internal/obj optimistically populates ArgsPointerMaps and + // ArgInfo for assembly functions, hoping that the compiler will + // emit appropriate symbols from their Go stub declarations. If + // it didn't though, just ignore it. + // + // TODO(cherryyz): Fix arg map generation (see discussion on CL 523335). + if fdsym != 0 && (j == abi.FUNCDATA_ArgsPointerMaps || j == abi.FUNCDATA_ArgInfo) && ldr.IsFromAssembly(s) && ldr.Data(fdsym) == nil { + fdsym = 0 + } + + if fdsym == 0 { + sb.SetUint32(ctxt.Arch, dataoff, ^uint32(0)) // ^0 is a sentinel for "no value" + continue + } + + if outer := ldr.OuterSym(fdsym); outer != gofunc { + panic(fmt.Sprintf("bad carrier sym for symbol %s (funcdata %s#%d), want go:func.* got %s", ldr.SymName(fdsym), ldr.SymName(s), j, ldr.SymName(outer))) + } + sb.SetUint32(ctxt.Arch, dataoff, uint32(ldr.SymValue(fdsym)-gofuncBase)) + } + } +} + +// pclntab initializes the pclntab symbol with +// runtime function and file name information. + +// pclntab generates the pcln table for the link output. +func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { + // Go 1.2's symtab layout is documented in golang.org/s/go12symtab, but the + // layout and data has changed since that time. + // + // As of August 2020, here's the layout of pclntab: + // + // .gopclntab/__gopclntab [elf/macho section] + // runtime.pclntab + // Carrier symbol for the entire pclntab section. + // + // runtime.pcheader (see: runtime/symtab.go:pcHeader) + // 8-byte magic + // nfunc [thearch.ptrsize bytes] + // offset to runtime.funcnametab from the beginning of runtime.pcheader + // offset to runtime.pclntab_old from beginning of runtime.pcheader + // + // runtime.funcnametab + // []list of null terminated function names + // + // runtime.cutab + // for i=0..#CUs + // for j=0..#max used file index in CU[i] + // uint32 offset into runtime.filetab for the filename[j] + // + // runtime.filetab + // []null terminated filename strings + // + // runtime.pctab + // []byte of deduplicated pc data. + // + // runtime.functab + // function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes] + // end PC [thearch.ptrsize bytes] + // func structures, pcdata offsets, func data. + + state, compUnits, funcs := makePclntab(ctxt, container) + + ldr := ctxt.loader + state.carrier = ldr.LookupOrCreateSym("runtime.pclntab", 0) + ldr.MakeSymbolUpdater(state.carrier).SetType(sym.SPCLNTAB) + ldr.SetAttrReachable(state.carrier, true) + setCarrierSym(sym.SPCLNTAB, state.carrier) + + state.generatePCHeader(ctxt) + nameOffsets := state.generateFuncnametab(ctxt, funcs) + cuOffsets := state.generateFilenameTabs(ctxt, compUnits, funcs) + state.generatePctab(ctxt, funcs) + inlSyms := makeInlSyms(ctxt, funcs, nameOffsets) + state.generateFunctab(ctxt, funcs, inlSyms, cuOffsets, nameOffsets) + + return state +} + +func gorootFinal() string { + root := buildcfg.GOROOT + if final := os.Getenv("GOROOT_FINAL"); final != "" { + root = final + } + return root +} + +func expandGoroot(s string) string { + const n = len("$GOROOT") + if len(s) >= n+1 && s[:n] == "$GOROOT" && (s[n] == '/' || s[n] == '\\') { + if final := gorootFinal(); final != "" { + return filepath.ToSlash(filepath.Join(final, s[n:])) + } + } + return s +} + +const ( + BUCKETSIZE = 256 * MINFUNC + SUBBUCKETS = 16 + SUBBUCKETSIZE = BUCKETSIZE / SUBBUCKETS + NOIDX = 0x7fffffff +) + +// findfunctab generates a lookup table to quickly find the containing +// function for a pc. See src/runtime/symtab.go:findfunc for details. +func (ctxt *Link) findfunctab(state *pclntab, container loader.Bitmap) { + ldr := ctxt.loader + + // find min and max address + min := ldr.SymValue(ctxt.Textp[0]) + lastp := ctxt.Textp[len(ctxt.Textp)-1] + max := ldr.SymValue(lastp) + ldr.SymSize(lastp) + + // for each subbucket, compute the minimum of all symbol indexes + // that map to that subbucket. + n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE) + + nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE) + + size := 4*int64(nbuckets) + int64(n) + + writeFindFuncTab := func(_ *Link, s loader.Sym) { + t := ldr.MakeSymbolUpdater(s) + + indexes := make([]int32, n) + for i := int32(0); i < n; i++ { + indexes[i] = NOIDX + } + idx := int32(0) + for i, s := range ctxt.Textp { + if !emitPcln(ctxt, s, container) { + continue + } + p := ldr.SymValue(s) + var e loader.Sym + i++ + if i < len(ctxt.Textp) { + e = ctxt.Textp[i] + } + for e != 0 && !emitPcln(ctxt, e, container) && i < len(ctxt.Textp) { + e = ctxt.Textp[i] + i++ + } + q := max + if e != 0 { + q = ldr.SymValue(e) + } + + //print("%d: [%lld %lld] %s\n", idx, p, q, s->name); + for ; p < q; p += SUBBUCKETSIZE { + i = int((p - min) / SUBBUCKETSIZE) + if indexes[i] > idx { + indexes[i] = idx + } + } + + i = int((q - 1 - min) / SUBBUCKETSIZE) + if indexes[i] > idx { + indexes[i] = idx + } + idx++ + } + + // fill in table + for i := int32(0); i < nbuckets; i++ { + base := indexes[i*SUBBUCKETS] + if base == NOIDX { + Errorf(nil, "hole in findfunctab") + } + t.SetUint32(ctxt.Arch, int64(i)*(4+SUBBUCKETS), uint32(base)) + for j := int32(0); j < SUBBUCKETS && i*SUBBUCKETS+j < n; j++ { + idx = indexes[i*SUBBUCKETS+j] + if idx == NOIDX { + Errorf(nil, "hole in findfunctab") + } + if idx-base >= 256 { + Errorf(nil, "too many functions in a findfunc bucket! %d/%d %d %d", i, nbuckets, j, idx-base) + } + + t.SetUint8(ctxt.Arch, int64(i)*(4+SUBBUCKETS)+4+int64(j), uint8(idx-base)) + } + } + } + + state.findfunctab = ctxt.createGeneratorSymbol("runtime.findfunctab", 0, sym.SRODATA, size, writeFindFuncTab) + ldr.SetAttrReachable(state.findfunctab, true) + ldr.SetAttrLocal(state.findfunctab, true) +} + +// findContainerSyms returns a bitmap, indexed by symbol number, where there's +// a 1 for every container symbol. +func (ctxt *Link) findContainerSyms() loader.Bitmap { + ldr := ctxt.loader + container := loader.MakeBitmap(ldr.NSym()) + // Find container symbols and mark them as such. + for _, s := range ctxt.Textp { + outer := ldr.OuterSym(s) + if outer != 0 { + container.Set(outer) + } + } + return container +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pe.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pe.go new file mode 100644 index 0000000000000000000000000000000000000000..8cfecafe84ab6c576a3cdc9470f0fabfcdbdf5b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/pe.go @@ -0,0 +1,1752 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// PE (Portable Executable) file writing +// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format + +package ld + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/pe" + "encoding/binary" + "fmt" + "internal/buildcfg" + "sort" + "strconv" + "strings" +) + +type IMAGE_IMPORT_DESCRIPTOR struct { + OriginalFirstThunk uint32 + TimeDateStamp uint32 + ForwarderChain uint32 + Name uint32 + FirstThunk uint32 +} + +type IMAGE_EXPORT_DIRECTORY struct { + Characteristics uint32 + TimeDateStamp uint32 + MajorVersion uint16 + MinorVersion uint16 + Name uint32 + Base uint32 + NumberOfFunctions uint32 + NumberOfNames uint32 + AddressOfFunctions uint32 + AddressOfNames uint32 + AddressOfNameOrdinals uint32 +} + +var ( + // PEBASE is the base address for the executable. + // It is small for 32-bit and large for 64-bit. + PEBASE int64 + + // SectionAlignment must be greater than or equal to FileAlignment. + // The default is the page size for the architecture. + PESECTALIGN int64 = 0x1000 + + // FileAlignment should be a power of 2 between 512 and 64 K, inclusive. + // The default is 512. If the SectionAlignment is less than + // the architecture's page size, then FileAlignment must match SectionAlignment. + PEFILEALIGN int64 = 2 << 8 +) + +const ( + IMAGE_SCN_CNT_CODE = 0x00000020 + IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 + IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 + IMAGE_SCN_LNK_OTHER = 0x00000100 + IMAGE_SCN_LNK_INFO = 0x00000200 + IMAGE_SCN_LNK_REMOVE = 0x00000800 + IMAGE_SCN_LNK_COMDAT = 0x00001000 + IMAGE_SCN_GPREL = 0x00008000 + IMAGE_SCN_MEM_PURGEABLE = 0x00020000 + IMAGE_SCN_MEM_16BIT = 0x00020000 + IMAGE_SCN_MEM_LOCKED = 0x00040000 + IMAGE_SCN_MEM_PRELOAD = 0x00080000 + IMAGE_SCN_ALIGN_1BYTES = 0x00100000 + IMAGE_SCN_ALIGN_2BYTES = 0x00200000 + IMAGE_SCN_ALIGN_4BYTES = 0x00300000 + IMAGE_SCN_ALIGN_8BYTES = 0x00400000 + IMAGE_SCN_ALIGN_16BYTES = 0x00500000 + IMAGE_SCN_ALIGN_32BYTES = 0x00600000 + IMAGE_SCN_ALIGN_64BYTES = 0x00700000 + IMAGE_SCN_ALIGN_128BYTES = 0x00800000 + IMAGE_SCN_ALIGN_256BYTES = 0x00900000 + IMAGE_SCN_ALIGN_512BYTES = 0x00A00000 + IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000 + IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000 + IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000 + IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000 + IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000 + IMAGE_SCN_MEM_DISCARDABLE = 0x02000000 + IMAGE_SCN_MEM_NOT_CACHED = 0x04000000 + IMAGE_SCN_MEM_NOT_PAGED = 0x08000000 + IMAGE_SCN_MEM_SHARED = 0x10000000 + IMAGE_SCN_MEM_EXECUTE = 0x20000000 + IMAGE_SCN_MEM_READ = 0x40000000 + IMAGE_SCN_MEM_WRITE = 0x80000000 +) + +// See https://docs.microsoft.com/en-us/windows/win32/debug/pe-format. +// TODO(crawshaw): add these constants to debug/pe. +const ( + IMAGE_SYM_TYPE_NULL = 0 + IMAGE_SYM_TYPE_STRUCT = 8 + IMAGE_SYM_DTYPE_FUNCTION = 2 + IMAGE_SYM_DTYPE_ARRAY = 3 + IMAGE_SYM_CLASS_EXTERNAL = 2 + IMAGE_SYM_CLASS_STATIC = 3 + + IMAGE_REL_I386_DIR32 = 0x0006 + IMAGE_REL_I386_DIR32NB = 0x0007 + IMAGE_REL_I386_SECREL = 0x000B + IMAGE_REL_I386_REL32 = 0x0014 + + IMAGE_REL_AMD64_ADDR64 = 0x0001 + IMAGE_REL_AMD64_ADDR32 = 0x0002 + IMAGE_REL_AMD64_ADDR32NB = 0x0003 + IMAGE_REL_AMD64_REL32 = 0x0004 + IMAGE_REL_AMD64_SECREL = 0x000B + + IMAGE_REL_ARM_ABSOLUTE = 0x0000 + IMAGE_REL_ARM_ADDR32 = 0x0001 + IMAGE_REL_ARM_ADDR32NB = 0x0002 + IMAGE_REL_ARM_BRANCH24 = 0x0003 + IMAGE_REL_ARM_BRANCH11 = 0x0004 + IMAGE_REL_ARM_SECREL = 0x000F + + IMAGE_REL_ARM64_ABSOLUTE = 0x0000 + IMAGE_REL_ARM64_ADDR32 = 0x0001 + IMAGE_REL_ARM64_ADDR32NB = 0x0002 + IMAGE_REL_ARM64_BRANCH26 = 0x0003 + IMAGE_REL_ARM64_PAGEBASE_REL21 = 0x0004 + IMAGE_REL_ARM64_REL21 = 0x0005 + IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006 + IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007 + IMAGE_REL_ARM64_SECREL = 0x0008 + IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009 + IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A + IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B + IMAGE_REL_ARM64_TOKEN = 0x000C + IMAGE_REL_ARM64_SECTION = 0x000D + IMAGE_REL_ARM64_ADDR64 = 0x000E + IMAGE_REL_ARM64_BRANCH19 = 0x000F + IMAGE_REL_ARM64_BRANCH14 = 0x0010 + IMAGE_REL_ARM64_REL32 = 0x0011 + + IMAGE_REL_BASED_HIGHLOW = 3 + IMAGE_REL_BASED_DIR64 = 10 +) + +const ( + PeMinimumTargetMajorVersion = 6 + PeMinimumTargetMinorVersion = 1 +) + +// DOS stub that prints out +// "This program cannot be run in DOS mode." +// See IMAGE_DOS_HEADER in the Windows SDK for the format of the header used here. +var dosstub = []uint8{ + 0x4d, + 0x5a, + 0x90, + 0x00, + 0x03, + 0x00, + 0x00, + 0x00, + 0x04, + 0x00, + 0x00, + 0x00, + 0xff, + 0xff, + 0x00, + 0x00, + 0x8b, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x40, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x80, + 0x00, + 0x00, + 0x00, + 0x0e, + 0x1f, + 0xba, + 0x0e, + 0x00, + 0xb4, + 0x09, + 0xcd, + 0x21, + 0xb8, + 0x01, + 0x4c, + 0xcd, + 0x21, + 0x54, + 0x68, + 0x69, + 0x73, + 0x20, + 0x70, + 0x72, + 0x6f, + 0x67, + 0x72, + 0x61, + 0x6d, + 0x20, + 0x63, + 0x61, + 0x6e, + 0x6e, + 0x6f, + 0x74, + 0x20, + 0x62, + 0x65, + 0x20, + 0x72, + 0x75, + 0x6e, + 0x20, + 0x69, + 0x6e, + 0x20, + 0x44, + 0x4f, + 0x53, + 0x20, + 0x6d, + 0x6f, + 0x64, + 0x65, + 0x2e, + 0x0d, + 0x0d, + 0x0a, + 0x24, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, +} + +type Imp struct { + s loader.Sym + off uint64 + next *Imp + argsize int +} + +type Dll struct { + name string + nameoff uint64 + thunkoff uint64 + ms *Imp + next *Dll +} + +var ( + rsrcsyms []loader.Sym + PESECTHEADR int32 + PEFILEHEADR int32 + pe64 int + dr *Dll + + dexport = make([]loader.Sym, 0, 1024) +) + +// peStringTable is a COFF string table. +type peStringTable struct { + strings []string + stringsLen int +} + +// size returns size of string table t. +func (t *peStringTable) size() int { + // string table starts with 4-byte length at the beginning + return t.stringsLen + 4 +} + +// add adds string str to string table t. +func (t *peStringTable) add(str string) int { + off := t.size() + t.strings = append(t.strings, str) + t.stringsLen += len(str) + 1 // each string will have 0 appended to it + return off +} + +// write writes string table t into the output file. +func (t *peStringTable) write(out *OutBuf) { + out.Write32(uint32(t.size())) + for _, s := range t.strings { + out.WriteString(s) + out.Write8(0) + } +} + +// peSection represents section from COFF section table. +type peSection struct { + name string + shortName string + index int // one-based index into the Section Table + virtualSize uint32 + virtualAddress uint32 + sizeOfRawData uint32 + pointerToRawData uint32 + pointerToRelocations uint32 + numberOfRelocations uint16 + characteristics uint32 +} + +// checkOffset verifies COFF section sect offset in the file. +func (sect *peSection) checkOffset(off int64) { + if off != int64(sect.pointerToRawData) { + Errorf(nil, "%s.PointerToRawData = %#x, want %#x", sect.name, uint64(int64(sect.pointerToRawData)), uint64(off)) + errorexit() + } +} + +// checkSegment verifies COFF section sect matches address +// and file offset provided in segment seg. +func (sect *peSection) checkSegment(seg *sym.Segment) { + if seg.Vaddr-uint64(PEBASE) != uint64(sect.virtualAddress) { + Errorf(nil, "%s.VirtualAddress = %#x, want %#x", sect.name, uint64(int64(sect.virtualAddress)), uint64(int64(seg.Vaddr-uint64(PEBASE)))) + errorexit() + } + if seg.Fileoff != uint64(sect.pointerToRawData) { + Errorf(nil, "%s.PointerToRawData = %#x, want %#x", sect.name, uint64(int64(sect.pointerToRawData)), uint64(int64(seg.Fileoff))) + errorexit() + } +} + +// pad adds zeros to the section sect. It writes as many bytes +// as necessary to make section sect.SizeOfRawData bytes long. +// It assumes that n bytes are already written to the file. +func (sect *peSection) pad(out *OutBuf, n uint32) { + out.WriteStringN("", int(sect.sizeOfRawData-n)) +} + +// write writes COFF section sect into the output file. +func (sect *peSection) write(out *OutBuf, linkmode LinkMode) error { + h := pe.SectionHeader32{ + VirtualSize: sect.virtualSize, + SizeOfRawData: sect.sizeOfRawData, + PointerToRawData: sect.pointerToRawData, + PointerToRelocations: sect.pointerToRelocations, + NumberOfRelocations: sect.numberOfRelocations, + Characteristics: sect.characteristics, + } + if linkmode != LinkExternal { + h.VirtualAddress = sect.virtualAddress + } + copy(h.Name[:], sect.shortName) + return binary.Write(out, binary.LittleEndian, h) +} + +// emitRelocations emits the relocation entries for the sect. +// The actual relocations are emitted by relocfn. +// This updates the corresponding PE section table entry +// with the relocation offset and count. +func (sect *peSection) emitRelocations(out *OutBuf, relocfn func() int) { + sect.pointerToRelocations = uint32(out.Offset()) + // first entry: extended relocs + out.Write32(0) // placeholder for number of relocation + 1 + out.Write32(0) + out.Write16(0) + + n := relocfn() + 1 + + cpos := out.Offset() + out.SeekSet(int64(sect.pointerToRelocations)) + out.Write32(uint32(n)) + out.SeekSet(cpos) + if n > 0x10000 { + n = 0x10000 + sect.characteristics |= IMAGE_SCN_LNK_NRELOC_OVFL + } else { + sect.pointerToRelocations += 10 // skip the extend reloc entry + } + sect.numberOfRelocations = uint16(n - 1) +} + +// peFile is used to build COFF file. +type peFile struct { + sections []*peSection + stringTable peStringTable + textSect *peSection + rdataSect *peSection + dataSect *peSection + bssSect *peSection + ctorsSect *peSection + pdataSect *peSection + xdataSect *peSection + nextSectOffset uint32 + nextFileOffset uint32 + symtabOffset int64 // offset to the start of symbol table + symbolCount int // number of symbol table records written + dataDirectory [16]pe.DataDirectory +} + +// addSection adds section to the COFF file f. +func (f *peFile) addSection(name string, sectsize int, filesize int) *peSection { + sect := &peSection{ + name: name, + shortName: name, + index: len(f.sections) + 1, + virtualAddress: f.nextSectOffset, + pointerToRawData: f.nextFileOffset, + } + f.nextSectOffset = uint32(Rnd(int64(f.nextSectOffset)+int64(sectsize), PESECTALIGN)) + if filesize > 0 { + sect.virtualSize = uint32(sectsize) + sect.sizeOfRawData = uint32(Rnd(int64(filesize), PEFILEALIGN)) + f.nextFileOffset += sect.sizeOfRawData + } else { + sect.sizeOfRawData = uint32(sectsize) + } + f.sections = append(f.sections, sect) + return sect +} + +// addDWARFSection adds DWARF section to the COFF file f. +// This function is similar to addSection, but DWARF section names are +// longer than 8 characters, so they need to be stored in the string table. +func (f *peFile) addDWARFSection(name string, size int) *peSection { + if size == 0 { + Exitf("DWARF section %q is empty", name) + } + // DWARF section names are longer than 8 characters. + // PE format requires such names to be stored in string table, + // and section names replaced with slash (/) followed by + // correspondent string table index. + // see http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx + // for details + off := f.stringTable.add(name) + h := f.addSection(name, size, size) + h.shortName = fmt.Sprintf("/%d", off) + h.characteristics = IMAGE_SCN_ALIGN_1BYTES | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE | IMAGE_SCN_CNT_INITIALIZED_DATA + return h +} + +// addDWARF adds DWARF information to the COFF file f. +func (f *peFile) addDWARF() { + if *FlagS { // disable symbol table + return + } + if *FlagW { // disable dwarf + return + } + for _, sect := range Segdwarf.Sections { + h := f.addDWARFSection(sect.Name, int(sect.Length)) + fileoff := sect.Vaddr - Segdwarf.Vaddr + Segdwarf.Fileoff + if uint64(h.pointerToRawData) != fileoff { + Exitf("%s.PointerToRawData = %#x, want %#x", sect.Name, h.pointerToRawData, fileoff) + } + } +} + +// addSEH adds SEH information to the COFF file f. +func (f *peFile) addSEH(ctxt *Link) { + // .pdata section can exist without the .xdata section. + // .xdata section depends on the .pdata section. + if Segpdata.Length == 0 { + return + } + d := pefile.addSection(".pdata", int(Segpdata.Length), int(Segpdata.Length)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + if ctxt.LinkMode == LinkExternal { + // Some gcc versions don't honor the default alignment for the .pdata section. + d.characteristics |= IMAGE_SCN_ALIGN_4BYTES + } + pefile.pdataSect = d + d.checkSegment(&Segpdata) + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXCEPTION].VirtualAddress = d.virtualAddress + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXCEPTION].Size = d.virtualSize + + if Segxdata.Length > 0 { + d = pefile.addSection(".xdata", int(Segxdata.Length), int(Segxdata.Length)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + if ctxt.LinkMode == LinkExternal { + // Some gcc versions don't honor the default alignment for the .xdata section. + d.characteristics |= IMAGE_SCN_ALIGN_4BYTES + } + pefile.xdataSect = d + d.checkSegment(&Segxdata) + } +} + +// addInitArray adds .ctors COFF section to the file f. +func (f *peFile) addInitArray(ctxt *Link) *peSection { + // The size below was determined by the specification for array relocations, + // and by observing what GCC writes here. If the initarray section grows to + // contain more than one constructor entry, the size will need to be 8 * constructor_count. + // However, the entire Go runtime is initialized from just one function, so it is unlikely + // that this will need to grow in the future. + var size int + var alignment uint32 + switch buildcfg.GOARCH { + default: + Exitf("peFile.addInitArray: unsupported GOARCH=%q\n", buildcfg.GOARCH) + case "386", "arm": + size = 4 + alignment = IMAGE_SCN_ALIGN_4BYTES + case "amd64", "arm64": + size = 8 + alignment = IMAGE_SCN_ALIGN_8BYTES + } + sect := f.addSection(".ctors", size, size) + sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | alignment + sect.sizeOfRawData = uint32(size) + ctxt.Out.SeekSet(int64(sect.pointerToRawData)) + sect.checkOffset(ctxt.Out.Offset()) + + init_entry := ctxt.loader.Lookup(*flagEntrySymbol, 0) + addr := uint64(ctxt.loader.SymValue(init_entry)) - ctxt.loader.SymSect(init_entry).Vaddr + switch buildcfg.GOARCH { + case "386", "arm": + ctxt.Out.Write32(uint32(addr)) + case "amd64", "arm64": + ctxt.Out.Write64(addr) + } + return sect +} + +// emitRelocations emits relocation entries for go.o in external linking. +func (f *peFile) emitRelocations(ctxt *Link) { + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) + } + + ldr := ctxt.loader + + // relocsect relocates symbols from first in section sect, and returns + // the total number of relocations emitted. + relocsect := func(sect *sym.Section, syms []loader.Sym, base uint64) int { + // If main section has no bits, nothing to relocate. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return 0 + } + sect.Reloff = uint64(ctxt.Out.Offset()) + for i, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if uint64(ldr.SymValue(s)) >= sect.Vaddr { + syms = syms[i:] + break + } + } + eaddr := int64(sect.Vaddr + sect.Length) + for _, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if ldr.SymValue(s) >= eaddr { + break + } + // Compute external relocations on the go, and pass to PEreloc1 + // to stream out. + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + rr, ok := extreloc(ctxt, ldr, s, r) + if !ok { + continue + } + if rr.Xsym == 0 { + ctxt.Errorf(s, "missing xsym in relocation") + continue + } + if ldr.SymDynid(rr.Xsym) < 0 { + ctxt.Errorf(s, "reloc %d to non-coff symbol %s (outer=%s) %d", r.Type(), ldr.SymName(r.Sym()), ldr.SymName(rr.Xsym), ldr.SymType(r.Sym())) + } + if !thearch.PEreloc1(ctxt.Arch, ctxt.Out, ldr, s, rr, int64(uint64(ldr.SymValue(s)+int64(r.Off()))-base)) { + ctxt.Errorf(s, "unsupported obj reloc %v/%d to %s", r.Type(), r.Siz(), ldr.SymName(r.Sym())) + } + } + } + sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff + const relocLen = 4 + 4 + 2 + return int(sect.Rellen / relocLen) + } + + type relsect struct { + peSect *peSection + seg *sym.Segment + syms []loader.Sym + } + sects := []relsect{ + {f.textSect, &Segtext, ctxt.Textp}, + {f.rdataSect, &Segrodata, ctxt.datap}, + {f.dataSect, &Segdata, ctxt.datap}, + } + if len(sehp.pdata) != 0 { + sects = append(sects, relsect{f.pdataSect, &Segpdata, sehp.pdata}) + } + if len(sehp.xdata) != 0 { + sects = append(sects, relsect{f.xdataSect, &Segxdata, sehp.xdata}) + } + for _, s := range sects { + s.peSect.emitRelocations(ctxt.Out, func() int { + var n int + for _, sect := range s.seg.Sections { + n += relocsect(sect, s.syms, s.seg.Vaddr) + } + return n + }) + } + +dwarfLoop: + for i := 0; i < len(Segdwarf.Sections); i++ { + sect := Segdwarf.Sections[i] + si := dwarfp[i] + if si.secSym() != loader.Sym(sect.Sym) || + ldr.SymSect(si.secSym()) != sect { + panic("inconsistency between dwarfp and Segdwarf") + } + for _, pesect := range f.sections { + if sect.Name == pesect.name { + pesect.emitRelocations(ctxt.Out, func() int { + return relocsect(sect, si.syms, sect.Vaddr) + }) + continue dwarfLoop + } + } + Errorf(nil, "emitRelocations: could not find %q section", sect.Name) + } + + if f.ctorsSect == nil { + return + } + + f.ctorsSect.emitRelocations(ctxt.Out, func() int { + dottext := ldr.Lookup(".text", 0) + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(ldr.SymDynid(dottext))) + switch buildcfg.GOARCH { + default: + ctxt.Errorf(dottext, "unknown architecture for PE: %q\n", buildcfg.GOARCH) + case "386": + ctxt.Out.Write16(IMAGE_REL_I386_DIR32) + case "amd64": + ctxt.Out.Write16(IMAGE_REL_AMD64_ADDR64) + case "arm": + ctxt.Out.Write16(IMAGE_REL_ARM_ADDR32) + case "arm64": + ctxt.Out.Write16(IMAGE_REL_ARM64_ADDR64) + } + return 1 + }) +} + +// writeSymbol appends symbol s to file f symbol table. +// It also sets s.Dynid to written symbol number. +func (f *peFile) writeSymbol(out *OutBuf, ldr *loader.Loader, s loader.Sym, name string, value int64, sectidx int, typ uint16, class uint8) { + if len(name) > 8 { + out.Write32(0) + out.Write32(uint32(f.stringTable.add(name))) + } else { + out.WriteStringN(name, 8) + } + out.Write32(uint32(value)) + out.Write16(uint16(sectidx)) + out.Write16(typ) + out.Write8(class) + out.Write8(0) // no aux entries + + ldr.SetSymDynid(s, int32(f.symbolCount)) + + f.symbolCount++ +} + +// mapToPESection searches peFile f for s symbol's location. +// It returns PE section index, and offset within that section. +func (f *peFile) mapToPESection(ldr *loader.Loader, s loader.Sym, linkmode LinkMode) (pesectidx int, offset int64, err error) { + sect := ldr.SymSect(s) + if sect == nil { + return 0, 0, fmt.Errorf("could not map %s symbol with no section", ldr.SymName(s)) + } + if sect.Seg == &Segtext { + return f.textSect.index, int64(uint64(ldr.SymValue(s)) - Segtext.Vaddr), nil + } + if sect.Seg == &Segrodata { + return f.rdataSect.index, int64(uint64(ldr.SymValue(s)) - Segrodata.Vaddr), nil + } + if sect.Seg != &Segdata { + return 0, 0, fmt.Errorf("could not map %s symbol with non .text or .rdata or .data section", ldr.SymName(s)) + } + v := uint64(ldr.SymValue(s)) - Segdata.Vaddr + if linkmode != LinkExternal { + return f.dataSect.index, int64(v), nil + } + if ldr.SymType(s) == sym.SDATA { + return f.dataSect.index, int64(v), nil + } + // Note: although address of runtime.edata (type sym.SDATA) is at the start of .bss section + // it still belongs to the .data section, not the .bss section. + if v < Segdata.Filelen { + return f.dataSect.index, int64(v), nil + } + return f.bssSect.index, int64(v - Segdata.Filelen), nil +} + +var isLabel = make(map[loader.Sym]bool) + +func AddPELabelSym(ldr *loader.Loader, s loader.Sym) { + isLabel[s] = true +} + +// writeSymbols writes all COFF symbol table records. +func (f *peFile) writeSymbols(ctxt *Link) { + ldr := ctxt.loader + addsym := func(s loader.Sym) { + t := ldr.SymType(s) + if ldr.SymSect(s) == nil && t != sym.SDYNIMPORT && t != sym.SHOSTOBJ && t != sym.SUNDEFEXT { + return + } + + name := ldr.SymName(s) + + // Only windows/386 requires underscore prefix on external symbols. + if ctxt.Is386() && ctxt.IsExternal() && + (t == sym.SHOSTOBJ || t == sym.SUNDEFEXT || ldr.AttrCgoExport(s) || + // TODO(cuonglm): remove this hack + // + // Previously, windows/386 requires underscore prefix on external symbols, + // but that's only applied for SHOSTOBJ/SUNDEFEXT or cgo export symbols. + // "go.buildid" is STEXT, "type.*" is STYPE, thus they are not prefixed + // with underscore. + // + // In external linking mode, the external linker can't resolve them as + // external symbols. But we are lucky that they have "." in their name, + // so the external linker see them as Forwarder RVA exports. See: + // + // - https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#export-address-table + // - https://sourceware.org/git/?p=binutils-gdb.git;a=blob;f=ld/pe-dll.c;h=e7b82ba6ffadf74dc1b9ee71dc13d48336941e51;hb=HEAD#l972 + // + // CL 317917 changes "." to ":" in symbols name, so these symbols can not be + // found by external linker anymore. So a hacky way is adding the + // underscore prefix for these 2 symbols. I don't have enough knowledge to + // verify whether adding the underscore for all STEXT/STYPE symbols are + // fine, even if it could be, that would be done in future CL. + name == "go:buildid" || name == "type:*") { + name = "_" + name + } + + name = mangleABIName(ctxt, ldr, s, name) + + var peSymType uint16 = IMAGE_SYM_TYPE_NULL + switch t { + case sym.STEXT, sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT: + // Microsoft's PE documentation is contradictory. It says that the symbol's complex type + // is stored in the pesym.Type most significant byte, but MSVC, LLVM, and mingw store it + // in the 4 high bits of the less significant byte. Also, the PE documentation says that + // the basic type for a function should be IMAGE_SYM_TYPE_VOID, + // but the reality is that it uses IMAGE_SYM_TYPE_NULL instead. + peSymType = IMAGE_SYM_DTYPE_FUNCTION<<4 + IMAGE_SYM_TYPE_NULL + } + sect, value, err := f.mapToPESection(ldr, s, ctxt.LinkMode) + if err != nil { + switch t { + case sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT: + default: + ctxt.Errorf(s, "addpesym: %v", err) + } + } + class := IMAGE_SYM_CLASS_EXTERNAL + if ldr.IsFileLocal(s) || ldr.AttrVisibilityHidden(s) || ldr.AttrLocal(s) { + class = IMAGE_SYM_CLASS_STATIC + } + f.writeSymbol(ctxt.Out, ldr, s, name, value, sect, peSymType, uint8(class)) + } + + if ctxt.LinkMode == LinkExternal { + // Include section symbols as external, because + // .ctors and .debug_* section relocations refer to it. + for _, pesect := range f.sections { + s := ldr.LookupOrCreateSym(pesect.name, 0) + f.writeSymbol(ctxt.Out, ldr, s, pesect.name, 0, pesect.index, IMAGE_SYM_TYPE_NULL, IMAGE_SYM_CLASS_STATIC) + } + } + + // Add special runtime.text and runtime.etext symbols. + s := ldr.Lookup("runtime.text", 0) + if ldr.SymType(s) == sym.STEXT { + addsym(s) + } + s = ldr.Lookup("runtime.etext", 0) + if ldr.SymType(s) == sym.STEXT { + addsym(s) + } + + // Add text symbols. + for _, s := range ctxt.Textp { + addsym(s) + } + + shouldBeInSymbolTable := func(s loader.Sym) bool { + if ldr.AttrNotInSymbolTable(s) { + return false + } + name := ldr.SymName(s) // TODO: try not to read the name + if name == "" || name[0] == '.' { + return false + } + return true + } + + // Add data symbols and external references. + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + t := ldr.SymType(s) + if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t == sym.STLSBSS { + continue + } + if !shouldBeInSymbolTable(s) { + continue + } + addsym(s) + } + + switch t { + case sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT: + addsym(s) + default: + if len(isLabel) > 0 && isLabel[s] { + addsym(s) + } + } + } +} + +// writeSymbolTableAndStringTable writes out symbol and string tables for peFile f. +func (f *peFile) writeSymbolTableAndStringTable(ctxt *Link) { + f.symtabOffset = ctxt.Out.Offset() + + // write COFF symbol table + if !*FlagS || ctxt.LinkMode == LinkExternal { + f.writeSymbols(ctxt) + } + + // update COFF file header and section table + size := f.stringTable.size() + 18*f.symbolCount + var h *peSection + if ctxt.LinkMode != LinkExternal { + // We do not really need .symtab for go.o, and if we have one, ld + // will also include it in the exe, and that will confuse windows. + h = f.addSection(".symtab", size, size) + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE + h.checkOffset(f.symtabOffset) + } + + // write COFF string table + f.stringTable.write(ctxt.Out) + if ctxt.LinkMode != LinkExternal { + h.pad(ctxt.Out, uint32(size)) + } +} + +// writeFileHeader writes COFF file header for peFile f. +func (f *peFile) writeFileHeader(ctxt *Link) { + var fh pe.FileHeader + + switch ctxt.Arch.Family { + default: + Exitf("unknown PE architecture: %v", ctxt.Arch.Family) + case sys.AMD64: + fh.Machine = pe.IMAGE_FILE_MACHINE_AMD64 + case sys.I386: + fh.Machine = pe.IMAGE_FILE_MACHINE_I386 + case sys.ARM: + fh.Machine = pe.IMAGE_FILE_MACHINE_ARMNT + case sys.ARM64: + fh.Machine = pe.IMAGE_FILE_MACHINE_ARM64 + } + + fh.NumberOfSections = uint16(len(f.sections)) + + // Being able to produce identical output for identical input is + // much more beneficial than having build timestamp in the header. + fh.TimeDateStamp = 0 + + if ctxt.LinkMode != LinkExternal { + fh.Characteristics = pe.IMAGE_FILE_EXECUTABLE_IMAGE + switch ctxt.Arch.Family { + case sys.AMD64, sys.I386: + if ctxt.BuildMode != BuildModePIE { + fh.Characteristics |= pe.IMAGE_FILE_RELOCS_STRIPPED + } + } + } + if pe64 != 0 { + var oh64 pe.OptionalHeader64 + fh.SizeOfOptionalHeader = uint16(binary.Size(&oh64)) + fh.Characteristics |= pe.IMAGE_FILE_LARGE_ADDRESS_AWARE + } else { + var oh pe.OptionalHeader32 + fh.SizeOfOptionalHeader = uint16(binary.Size(&oh)) + fh.Characteristics |= pe.IMAGE_FILE_32BIT_MACHINE + } + + fh.PointerToSymbolTable = uint32(f.symtabOffset) + fh.NumberOfSymbols = uint32(f.symbolCount) + + binary.Write(ctxt.Out, binary.LittleEndian, &fh) +} + +// writeOptionalHeader writes COFF optional header for peFile f. +func (f *peFile) writeOptionalHeader(ctxt *Link) { + var oh pe.OptionalHeader32 + var oh64 pe.OptionalHeader64 + + if pe64 != 0 { + oh64.Magic = 0x20b // PE32+ + } else { + oh.Magic = 0x10b // PE32 + oh.BaseOfData = f.dataSect.virtualAddress + } + + // Fill out both oh64 and oh. We only use one. Oh well. + oh64.MajorLinkerVersion = 3 + oh.MajorLinkerVersion = 3 + oh64.MinorLinkerVersion = 0 + oh.MinorLinkerVersion = 0 + oh64.SizeOfCode = f.textSect.sizeOfRawData + oh.SizeOfCode = f.textSect.sizeOfRawData + oh64.SizeOfInitializedData = f.dataSect.sizeOfRawData + oh.SizeOfInitializedData = f.dataSect.sizeOfRawData + oh64.SizeOfUninitializedData = 0 + oh.SizeOfUninitializedData = 0 + if ctxt.LinkMode != LinkExternal { + oh64.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) + oh.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) + } + oh64.BaseOfCode = f.textSect.virtualAddress + oh.BaseOfCode = f.textSect.virtualAddress + oh64.ImageBase = uint64(PEBASE) + oh.ImageBase = uint32(PEBASE) + oh64.SectionAlignment = uint32(PESECTALIGN) + oh.SectionAlignment = uint32(PESECTALIGN) + oh64.FileAlignment = uint32(PEFILEALIGN) + oh.FileAlignment = uint32(PEFILEALIGN) + oh64.MajorOperatingSystemVersion = PeMinimumTargetMajorVersion + oh.MajorOperatingSystemVersion = PeMinimumTargetMajorVersion + oh64.MinorOperatingSystemVersion = PeMinimumTargetMinorVersion + oh.MinorOperatingSystemVersion = PeMinimumTargetMinorVersion + oh64.MajorImageVersion = 1 + oh.MajorImageVersion = 1 + oh64.MinorImageVersion = 0 + oh.MinorImageVersion = 0 + oh64.MajorSubsystemVersion = PeMinimumTargetMajorVersion + oh.MajorSubsystemVersion = PeMinimumTargetMajorVersion + oh64.MinorSubsystemVersion = PeMinimumTargetMinorVersion + oh.MinorSubsystemVersion = PeMinimumTargetMinorVersion + oh64.SizeOfImage = f.nextSectOffset + oh.SizeOfImage = f.nextSectOffset + oh64.SizeOfHeaders = uint32(PEFILEHEADR) + oh.SizeOfHeaders = uint32(PEFILEHEADR) + if windowsgui { + oh64.Subsystem = pe.IMAGE_SUBSYSTEM_WINDOWS_GUI + oh.Subsystem = pe.IMAGE_SUBSYSTEM_WINDOWS_GUI + } else { + oh64.Subsystem = pe.IMAGE_SUBSYSTEM_WINDOWS_CUI + oh.Subsystem = pe.IMAGE_SUBSYSTEM_WINDOWS_CUI + } + + // Mark as having awareness of terminal services, to avoid ancient compatibility hacks. + oh64.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE + oh.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE + + // Enable DEP + oh64.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_NX_COMPAT + oh.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_NX_COMPAT + + // The DLL can be relocated at load time. + if needPEBaseReloc(ctxt) { + oh64.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + oh.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + } + + // Image can handle a high entropy 64-bit virtual address space. + if ctxt.BuildMode == BuildModePIE { + oh64.DllCharacteristics |= pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA + } + + // Disable stack growth as we don't want Windows to + // fiddle with the thread stack limits, which we set + // ourselves to circumvent the stack checks in the + // Windows exception dispatcher. + // Commit size must be strictly less than reserve + // size otherwise reserve will be rounded up to a + // larger size, as verified with VMMap. + + // On 64-bit, we always reserve 2MB stacks. "Pure" Go code is + // okay with much smaller stacks, but the syscall package + // makes it easy to call into arbitrary C code without cgo, + // and system calls even in "pure" Go code are actually C + // calls that may need more stack than we think. + // + // The default stack reserve size directly affects only the main + // thread. + // + // For other threads, the runtime explicitly asks the kernel + // to use the default stack size so that all stacks are + // consistent. + // + // At thread start, in minit, the runtime queries the OS for + // the actual stack bounds so that the stack size doesn't need + // to be hard-coded into the runtime. + oh64.SizeOfStackReserve = 0x00200000 + if !iscgo { + oh64.SizeOfStackCommit = 0x00001000 + } else { + // TODO(brainman): Maybe remove optional header writing altogether for cgo. + // For cgo it is the external linker that is building final executable. + // And it probably does not use any information stored in optional header. + oh64.SizeOfStackCommit = 0x00200000 - 0x2000 // account for 2 guard pages + } + + oh.SizeOfStackReserve = 0x00100000 + if !iscgo { + oh.SizeOfStackCommit = 0x00001000 + } else { + oh.SizeOfStackCommit = 0x00100000 - 0x2000 // account for 2 guard pages + } + + oh64.SizeOfHeapReserve = 0x00100000 + oh.SizeOfHeapReserve = 0x00100000 + oh64.SizeOfHeapCommit = 0x00001000 + oh.SizeOfHeapCommit = 0x00001000 + oh64.NumberOfRvaAndSizes = 16 + oh.NumberOfRvaAndSizes = 16 + + if pe64 != 0 { + oh64.DataDirectory = f.dataDirectory + } else { + oh.DataDirectory = f.dataDirectory + } + + if pe64 != 0 { + binary.Write(ctxt.Out, binary.LittleEndian, &oh64) + } else { + binary.Write(ctxt.Out, binary.LittleEndian, &oh) + } +} + +var pefile peFile + +func Peinit(ctxt *Link) { + var l int + + if ctxt.Arch.PtrSize == 8 { + // 64-bit architectures + pe64 = 1 + PEBASE = 1 << 32 + if ctxt.Arch.Family == sys.AMD64 { + // TODO(rsc): For cgo we currently use 32-bit relocations + // that fail when PEBASE is too large. + // We need to fix this, but for now, use a smaller PEBASE. + PEBASE = 1 << 22 + } + var oh64 pe.OptionalHeader64 + l = binary.Size(&oh64) + } else { + // 32-bit architectures + PEBASE = 1 << 22 + var oh pe.OptionalHeader32 + l = binary.Size(&oh) + } + + if ctxt.LinkMode == LinkExternal { + // .rdata section will contain "masks" and "shifts" symbols, and they + // need to be aligned to 16-bytes. So make all sections aligned + // to 32-byte and mark them all IMAGE_SCN_ALIGN_32BYTES so external + // linker will honour that requirement. + PESECTALIGN = 32 + PEFILEALIGN = 0 + // We are creating an object file. The absolute address is irrelevant. + PEBASE = 0 + } + + var sh [16]pe.SectionHeader32 + var fh pe.FileHeader + PEFILEHEADR = int32(Rnd(int64(len(dosstub)+binary.Size(&fh)+l+binary.Size(&sh)), PEFILEALIGN)) + if ctxt.LinkMode != LinkExternal { + PESECTHEADR = int32(Rnd(int64(PEFILEHEADR), PESECTALIGN)) + } else { + PESECTHEADR = 0 + } + pefile.nextSectOffset = uint32(PESECTHEADR) + pefile.nextFileOffset = uint32(PEFILEHEADR) + + if ctxt.LinkMode == LinkInternal { + // some mingw libs depend on this symbol, for example, FindPESectionByName + for _, name := range [2]string{"__image_base__", "_image_base__"} { + sb := ctxt.loader.CreateSymForUpdate(name, 0) + sb.SetType(sym.SDATA) + sb.SetValue(PEBASE) + ctxt.loader.SetAttrSpecial(sb.Sym(), true) + ctxt.loader.SetAttrLocal(sb.Sym(), true) + } + } + + HEADR = PEFILEHEADR + if *FlagRound == -1 { + *FlagRound = PESECTALIGN + } + if *FlagTextAddr == -1 { + *FlagTextAddr = Rnd(PEBASE, *FlagRound) + int64(PESECTHEADR) + } +} + +func pewrite(ctxt *Link) { + ctxt.Out.SeekSet(0) + if ctxt.LinkMode != LinkExternal { + ctxt.Out.Write(dosstub) + ctxt.Out.WriteStringN("PE", 4) + } + + pefile.writeFileHeader(ctxt) + + pefile.writeOptionalHeader(ctxt) + + for _, sect := range pefile.sections { + sect.write(ctxt.Out, ctxt.LinkMode) + } +} + +func strput(out *OutBuf, s string) { + out.WriteString(s) + out.Write8(0) + // string must be padded to even size + if (len(s)+1)%2 != 0 { + out.Write8(0) + } +} + +func initdynimport(ctxt *Link) *Dll { + ldr := ctxt.loader + var d *Dll + + dr = nil + var m *Imp + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) || ldr.SymType(s) != sym.SDYNIMPORT { + continue + } + dynlib := ldr.SymDynimplib(s) + for d = dr; d != nil; d = d.next { + if d.name == dynlib { + m = new(Imp) + break + } + } + + if d == nil { + d = new(Dll) + d.name = dynlib + d.next = dr + dr = d + m = new(Imp) + } + + // Because external link requires properly stdcall decorated name, + // all external symbols in runtime use %n to denote that the number + // of uinptrs this function consumes. Store the argsize and discard + // the %n suffix if any. + m.argsize = -1 + extName := ldr.SymExtname(s) + if i := strings.IndexByte(extName, '%'); i >= 0 { + var err error + m.argsize, err = strconv.Atoi(extName[i+1:]) + if err != nil { + ctxt.Errorf(s, "failed to parse stdcall decoration: %v", err) + } + m.argsize *= ctxt.Arch.PtrSize + ldr.SetSymExtname(s, extName[:i]) + } + + m.s = s + m.next = d.ms + d.ms = m + } + + if ctxt.IsExternal() { + // Add real symbol name + for d := dr; d != nil; d = d.next { + for m = d.ms; m != nil; m = m.next { + sb := ldr.MakeSymbolUpdater(m.s) + sb.SetType(sym.SDATA) + sb.Grow(int64(ctxt.Arch.PtrSize)) + dynName := sb.Extname() + // only windows/386 requires stdcall decoration + if ctxt.Is386() && m.argsize >= 0 { + dynName += fmt.Sprintf("@%d", m.argsize) + } + dynSym := ldr.CreateSymForUpdate(dynName, 0) + dynSym.SetType(sym.SHOSTOBJ) + r, _ := sb.AddRel(objabi.R_ADDR) + r.SetSym(dynSym.Sym()) + r.SetSiz(uint8(ctxt.Arch.PtrSize)) + } + } + } else { + dynamic := ldr.CreateSymForUpdate(".windynamic", 0) + dynamic.SetType(sym.SWINDOWS) + for d := dr; d != nil; d = d.next { + for m = d.ms; m != nil; m = m.next { + sb := ldr.MakeSymbolUpdater(m.s) + sb.SetType(sym.SWINDOWS) + sb.SetValue(dynamic.Size()) + dynamic.SetSize(dynamic.Size() + int64(ctxt.Arch.PtrSize)) + dynamic.AddInteriorSym(m.s) + } + + dynamic.SetSize(dynamic.Size() + int64(ctxt.Arch.PtrSize)) + } + } + + return dr +} + +// peimporteddlls returns the gcc command line argument to link all imported +// DLLs. +func peimporteddlls() []string { + var dlls []string + + for d := dr; d != nil; d = d.next { + dlls = append(dlls, "-l"+strings.TrimSuffix(d.name, ".dll")) + } + + return dlls +} + +func addimports(ctxt *Link, datsect *peSection) { + ldr := ctxt.loader + startoff := ctxt.Out.Offset() + dynamic := ldr.LookupOrCreateSym(".windynamic", 0) + + // skip import descriptor table (will write it later) + n := uint64(0) + + for d := dr; d != nil; d = d.next { + n++ + } + ctxt.Out.SeekSet(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1)) + + // write dll names + for d := dr; d != nil; d = d.next { + d.nameoff = uint64(ctxt.Out.Offset()) - uint64(startoff) + strput(ctxt.Out, d.name) + } + + // write function names + for d := dr; d != nil; d = d.next { + for m := d.ms; m != nil; m = m.next { + m.off = uint64(pefile.nextSectOffset) + uint64(ctxt.Out.Offset()) - uint64(startoff) + ctxt.Out.Write16(0) // hint + strput(ctxt.Out, ldr.SymExtname(m.s)) + } + } + + // write OriginalFirstThunks + oftbase := uint64(ctxt.Out.Offset()) - uint64(startoff) + + n = uint64(ctxt.Out.Offset()) + for d := dr; d != nil; d = d.next { + d.thunkoff = uint64(ctxt.Out.Offset()) - n + for m := d.ms; m != nil; m = m.next { + if pe64 != 0 { + ctxt.Out.Write64(m.off) + } else { + ctxt.Out.Write32(uint32(m.off)) + } + } + + if pe64 != 0 { + ctxt.Out.Write64(0) + } else { + ctxt.Out.Write32(0) + } + } + + // add pe section and pad it at the end + n = uint64(ctxt.Out.Offset()) - uint64(startoff) + + isect := pefile.addSection(".idata", int(n), int(n)) + isect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE + isect.checkOffset(startoff) + isect.pad(ctxt.Out, uint32(n)) + endoff := ctxt.Out.Offset() + + // write FirstThunks (allocated in .data section) + ftbase := uint64(ldr.SymValue(dynamic)) - uint64(datsect.virtualAddress) - uint64(PEBASE) + + ctxt.Out.SeekSet(int64(uint64(datsect.pointerToRawData) + ftbase)) + for d := dr; d != nil; d = d.next { + for m := d.ms; m != nil; m = m.next { + if pe64 != 0 { + ctxt.Out.Write64(m.off) + } else { + ctxt.Out.Write32(uint32(m.off)) + } + } + + if pe64 != 0 { + ctxt.Out.Write64(0) + } else { + ctxt.Out.Write32(0) + } + } + + // finally write import descriptor table + out := ctxt.Out + out.SeekSet(startoff) + + for d := dr; d != nil; d = d.next { + out.Write32(uint32(uint64(isect.virtualAddress) + oftbase + d.thunkoff)) + out.Write32(0) + out.Write32(0) + out.Write32(uint32(uint64(isect.virtualAddress) + d.nameoff)) + out.Write32(uint32(uint64(datsect.virtualAddress) + ftbase + d.thunkoff)) + } + + out.Write32(0) //end + out.Write32(0) + out.Write32(0) + out.Write32(0) + out.Write32(0) + + // update data directory + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress = isect.virtualAddress + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_IMPORT].Size = isect.virtualSize + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress = uint32(ldr.SymValue(dynamic) - PEBASE) + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_IAT].Size = uint32(ldr.SymSize(dynamic)) + + out.SeekSet(endoff) +} + +func initdynexport(ctxt *Link) { + ldr := ctxt.loader + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) || !ldr.AttrCgoExportDynamic(s) { + continue + } + if len(dexport)+1 > cap(dexport) { + ctxt.Errorf(s, "pe dynexport table is full") + errorexit() + } + + dexport = append(dexport, s) + } + + sort.Slice(dexport, func(i, j int) bool { return ldr.SymExtname(dexport[i]) < ldr.SymExtname(dexport[j]) }) +} + +func addexports(ctxt *Link) { + ldr := ctxt.loader + var e IMAGE_EXPORT_DIRECTORY + + nexport := len(dexport) + size := binary.Size(&e) + 10*nexport + len(*flagOutfile) + 1 + for _, s := range dexport { + size += len(ldr.SymExtname(s)) + 1 + } + + if nexport == 0 { + return + } + + sect := pefile.addSection(".edata", size, size) + sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + sect.checkOffset(ctxt.Out.Offset()) + va := int(sect.virtualAddress) + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va) + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.virtualSize + + vaName := va + binary.Size(&e) + nexport*4 + vaAddr := va + binary.Size(&e) + vaNa := va + binary.Size(&e) + nexport*8 + + e.Characteristics = 0 + e.MajorVersion = 0 + e.MinorVersion = 0 + e.NumberOfFunctions = uint32(nexport) + e.NumberOfNames = uint32(nexport) + e.Name = uint32(va+binary.Size(&e)) + uint32(nexport)*10 // Program names. + e.Base = 1 + e.AddressOfFunctions = uint32(vaAddr) + e.AddressOfNames = uint32(vaName) + e.AddressOfNameOrdinals = uint32(vaNa) + + out := ctxt.Out + + // put IMAGE_EXPORT_DIRECTORY + binary.Write(out, binary.LittleEndian, &e) + + // put EXPORT Address Table + for _, s := range dexport { + out.Write32(uint32(ldr.SymValue(s) - PEBASE)) + } + + // put EXPORT Name Pointer Table + v := int(e.Name + uint32(len(*flagOutfile)) + 1) + + for _, s := range dexport { + out.Write32(uint32(v)) + v += len(ldr.SymExtname(s)) + 1 + } + + // put EXPORT Ordinal Table + for i := 0; i < nexport; i++ { + out.Write16(uint16(i)) + } + + // put Names + out.WriteStringN(*flagOutfile, len(*flagOutfile)+1) + + for _, s := range dexport { + name := ldr.SymExtname(s) + out.WriteStringN(name, len(name)+1) + } + sect.pad(out, uint32(size)) +} + +// peBaseRelocEntry represents a single relocation entry. +type peBaseRelocEntry struct { + typeOff uint16 +} + +// peBaseRelocBlock represents a Base Relocation Block. A block +// is a collection of relocation entries in a page, where each +// entry describes a single relocation. +// The block page RVA (Relative Virtual Address) is the index +// into peBaseRelocTable.blocks. +type peBaseRelocBlock struct { + entries []peBaseRelocEntry +} + +// pePages is a type used to store the list of pages for which there +// are base relocation blocks. This is defined as a type so that +// it can be sorted. +type pePages []uint32 + +func (p pePages) Len() int { return len(p) } +func (p pePages) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p pePages) Less(i, j int) bool { return p[i] < p[j] } + +// A PE base relocation table is a list of blocks, where each block +// contains relocation information for a single page. The blocks +// must be emitted in order of page virtual address. +// See https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format#the-reloc-section-image-only +type peBaseRelocTable struct { + blocks map[uint32]peBaseRelocBlock + + // pePages is a list of keys into blocks map. + // It is stored separately for ease of sorting. + pages pePages +} + +func (rt *peBaseRelocTable) init(ctxt *Link) { + rt.blocks = make(map[uint32]peBaseRelocBlock) +} + +func (rt *peBaseRelocTable) addentry(ldr *loader.Loader, s loader.Sym, r *loader.Reloc) { + // pageSize is the size in bytes of a page + // described by a base relocation block. + const pageSize = 0x1000 + const pageMask = pageSize - 1 + + addr := ldr.SymValue(s) + int64(r.Off()) - int64(PEBASE) + page := uint32(addr &^ pageMask) + off := uint32(addr & pageMask) + + b, ok := rt.blocks[page] + if !ok { + rt.pages = append(rt.pages, page) + } + + e := peBaseRelocEntry{ + typeOff: uint16(off & 0xFFF), + } + + // Set entry type + switch r.Siz() { + default: + Exitf("unsupported relocation size %d\n", r.Siz) + case 4: + e.typeOff |= uint16(IMAGE_REL_BASED_HIGHLOW << 12) + case 8: + e.typeOff |= uint16(IMAGE_REL_BASED_DIR64 << 12) + } + + b.entries = append(b.entries, e) + rt.blocks[page] = b +} + +func (rt *peBaseRelocTable) write(ctxt *Link) { + out := ctxt.Out + + // sort the pages array + sort.Sort(rt.pages) + + for _, p := range rt.pages { + b := rt.blocks[p] + const sizeOfPEbaseRelocBlock = 8 // 2 * sizeof(uint32) + blockSize := uint32(sizeOfPEbaseRelocBlock + len(b.entries)*2) + out.Write32(p) + out.Write32(blockSize) + + for _, e := range b.entries { + out.Write16(e.typeOff) + } + } +} + +func addPEBaseRelocSym(ldr *loader.Loader, s loader.Sym, rt *peBaseRelocTable) { + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.Type() >= objabi.ElfRelocOffset { + continue + } + if r.Siz() == 0 { // informational relocation + continue + } + if r.Type() == objabi.R_DWARFFILEREF { + continue + } + rs := r.Sym() + if rs == 0 { + continue + } + if !ldr.AttrReachable(s) { + continue + } + + switch r.Type() { + default: + case objabi.R_ADDR: + rt.addentry(ldr, s, &r) + } + } +} + +func needPEBaseReloc(ctxt *Link) bool { + // Non-PIE x86 binaries don't need the base relocation table. + // Everyone else does. + if (ctxt.Arch.Family == sys.I386 || ctxt.Arch.Family == sys.AMD64) && ctxt.BuildMode != BuildModePIE { + return false + } + return true +} + +func addPEBaseReloc(ctxt *Link) { + if !needPEBaseReloc(ctxt) { + return + } + + var rt peBaseRelocTable + rt.init(ctxt) + + // Get relocation information + ldr := ctxt.loader + for _, s := range ctxt.Textp { + addPEBaseRelocSym(ldr, s, &rt) + } + for _, s := range ctxt.datap { + addPEBaseRelocSym(ldr, s, &rt) + } + + // Write relocation information + startoff := ctxt.Out.Offset() + rt.write(ctxt) + size := ctxt.Out.Offset() - startoff + + // Add a PE section and pad it at the end + rsect := pefile.addSection(".reloc", int(size), int(size)) + rsect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE + rsect.checkOffset(startoff) + rsect.pad(ctxt.Out, uint32(size)) + + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_BASERELOC].VirtualAddress = rsect.virtualAddress + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_BASERELOC].Size = rsect.virtualSize +} + +func (ctxt *Link) dope() { + initdynimport(ctxt) + initdynexport(ctxt) + writeSEH(ctxt) +} + +func setpersrc(ctxt *Link, syms []loader.Sym) { + if len(rsrcsyms) != 0 { + Errorf(nil, "too many .rsrc sections") + } + rsrcsyms = syms +} + +func addpersrc(ctxt *Link) { + if len(rsrcsyms) == 0 { + return + } + + var size int64 + for _, rsrcsym := range rsrcsyms { + size += ctxt.loader.SymSize(rsrcsym) + } + h := pefile.addSection(".rsrc", int(size), int(size)) + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA + h.checkOffset(ctxt.Out.Offset()) + + for _, rsrcsym := range rsrcsyms { + // A split resource happens when the actual resource data and its relocations are + // split across multiple sections, denoted by a $01 or $02 at the end of the .rsrc + // section name. + splitResources := strings.Contains(ctxt.loader.SymName(rsrcsym), ".rsrc$") + relocs := ctxt.loader.Relocs(rsrcsym) + data := ctxt.loader.Data(rsrcsym) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + p := data[r.Off():] + val := uint32(int64(h.virtualAddress) + r.Add()) + if splitResources { + // If we're a split resource section, and that section has relocation + // symbols, then the data that it points to doesn't actually begin at + // the virtual address listed in this current section, but rather + // begins at the section immediately after this one. So, in order to + // calculate the proper virtual address of the data it's pointing to, + // we have to add the length of this section to the virtual address. + // This works because .rsrc sections are divided into two (but not more) + // of these sections. + val += uint32(len(data)) + } + binary.LittleEndian.PutUint32(p, val) + } + ctxt.Out.Write(data) + } + h.pad(ctxt.Out, uint32(size)) + + // update data directory + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_RESOURCE].VirtualAddress = h.virtualAddress + pefile.dataDirectory[pe.IMAGE_DIRECTORY_ENTRY_RESOURCE].Size = h.virtualSize +} + +func asmbPe(ctxt *Link) { + t := pefile.addSection(".text", int(Segtext.Length), int(Segtext.Length)) + t.characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ + if ctxt.LinkMode == LinkExternal { + // some data symbols (e.g. masks) end up in the .text section, and they normally + // expect larger alignment requirement than the default text section alignment. + t.characteristics |= IMAGE_SCN_ALIGN_32BYTES + } + t.checkSegment(&Segtext) + pefile.textSect = t + + ro := pefile.addSection(".rdata", int(Segrodata.Length), int(Segrodata.Length)) + ro.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + if ctxt.LinkMode == LinkExternal { + // some data symbols (e.g. masks) end up in the .rdata section, and they normally + // expect larger alignment requirement than the default text section alignment. + ro.characteristics |= IMAGE_SCN_ALIGN_32BYTES + } + ro.checkSegment(&Segrodata) + pefile.rdataSect = ro + + var d *peSection + if ctxt.LinkMode != LinkExternal { + d = pefile.addSection(".data", int(Segdata.Length), int(Segdata.Filelen)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE + d.checkSegment(&Segdata) + pefile.dataSect = d + } else { + d = pefile.addSection(".data", int(Segdata.Filelen), int(Segdata.Filelen)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES + d.checkSegment(&Segdata) + pefile.dataSect = d + + b := pefile.addSection(".bss", int(Segdata.Length-Segdata.Filelen), 0) + b.characteristics = IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES + b.pointerToRawData = 0 + pefile.bssSect = b + } + + pefile.addSEH(ctxt) + pefile.addDWARF() + + if ctxt.LinkMode == LinkExternal { + pefile.ctorsSect = pefile.addInitArray(ctxt) + } + + ctxt.Out.SeekSet(int64(pefile.nextFileOffset)) + if ctxt.LinkMode != LinkExternal { + addimports(ctxt, d) + addexports(ctxt) + addPEBaseReloc(ctxt) + } + pefile.writeSymbolTableAndStringTable(ctxt) + addpersrc(ctxt) + if ctxt.LinkMode == LinkExternal { + pefile.emitRelocations(ctxt) + } + + pewrite(ctxt) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/seh.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/seh.go new file mode 100644 index 0000000000000000000000000000000000000000..9f0f747c042b154a337e2eaecf4df58823213f2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/seh.go @@ -0,0 +1,77 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" +) + +var sehp struct { + pdata []sym.LoaderSym + xdata []sym.LoaderSym +} + +func writeSEH(ctxt *Link) { + switch ctxt.Arch.Family { + case sys.AMD64: + writeSEHAMD64(ctxt) + } +} + +func writeSEHAMD64(ctxt *Link) { + ldr := ctxt.loader + mkSecSym := func(name string, kind sym.SymKind) *loader.SymbolBuilder { + s := ldr.CreateSymForUpdate(name, 0) + s.SetType(kind) + s.SetAlign(4) + return s + } + pdata := mkSecSym(".pdata", sym.SSEHSECT) + xdata := mkSecSym(".xdata", sym.SSEHSECT) + // The .xdata entries have very low cardinality + // as it only contains frame pointer operations, + // which are very similar across functions. + // These are referenced by .pdata entries using + // an RVA, so it is possible, and binary-size wise, + // to deduplicate .xdata entries. + uwcache := make(map[string]int64) // aux symbol name --> .xdata offset + for _, s := range ctxt.Textp { + if fi := ldr.FuncInfo(s); !fi.Valid() { + continue + } + uw := ldr.SEHUnwindSym(s) + if uw == 0 { + continue + } + name := ctxt.SymName(uw) + off, cached := uwcache[name] + if !cached { + off = xdata.Size() + uwcache[name] = off + xdata.AddBytes(ldr.Data(uw)) + // The SEH unwind data can contain relocations, + // make sure those are copied over. + rels := ldr.Relocs(uw) + for i := 0; i < rels.Count(); i++ { + r := rels.At(i) + rel, _ := xdata.AddRel(r.Type()) + rel.SetOff(int32(off) + r.Off()) + rel.SetSiz(r.Siz()) + rel.SetSym(r.Sym()) + rel.SetAdd(r.Add()) + } + } + + // Reference: + // https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-runtime_function + pdata.AddPEImageRelativeAddrPlus(ctxt.Arch, s, 0) + pdata.AddPEImageRelativeAddrPlus(ctxt.Arch, s, ldr.SymSize(s)) + pdata.AddPEImageRelativeAddrPlus(ctxt.Arch, xdata.Sym(), off) + } + sehp.pdata = append(sehp.pdata, pdata.Sym()) + sehp.xdata = append(sehp.xdata, xdata.Sym()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck.go new file mode 100644 index 0000000000000000000000000000000000000000..98e7edaeb1477b4d5c8136e51954f961b778639d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck.go @@ -0,0 +1,421 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/link/internal/loader" + "fmt" + "internal/buildcfg" + "sort" + "strings" +) + +type stackCheck struct { + ctxt *Link + ldr *loader.Loader + morestack loader.Sym + callSize int // The number of bytes added by a CALL + + // height records the maximum number of bytes a function and + // its callees can add to the stack without a split check. + height map[loader.Sym]int16 + + // graph records the out-edges from each symbol. This is only + // populated on a second pass if the first pass reveals an + // over-limit function. + graph map[loader.Sym][]stackCheckEdge +} + +type stackCheckEdge struct { + growth int // Stack growth in bytes at call to target + target loader.Sym // 0 for stack growth without a call +} + +// stackCheckCycle is a sentinel stored in the height map to detect if +// we've found a cycle. This is effectively an "infinite" stack +// height, so we use the closest value to infinity that we can. +const stackCheckCycle int16 = 1<<15 - 1 + +// stackCheckIndirect is a sentinel Sym value used to represent the +// target of an indirect/closure call. +const stackCheckIndirect loader.Sym = ^loader.Sym(0) + +// doStackCheck walks the call tree to check that there is always +// enough stack space for call frames, especially for a chain of +// nosplit functions. +// +// It walks all functions to accumulate the number of bytes they can +// grow the stack by without a split check and checks this against the +// limit. +func (ctxt *Link) doStackCheck() { + sc := newStackCheck(ctxt, false) + + // limit is number of bytes a splittable function ensures are + // available on the stack. If any call chain exceeds this + // depth, the stack check test fails. + // + // The call to morestack in every splittable function ensures + // that there are at least StackLimit bytes available below SP + // when morestack returns. + limit := objabi.StackNosplit(*flagRace) - sc.callSize + if buildcfg.GOARCH == "arm64" { + // Need an extra 8 bytes below SP to save FP. + limit -= 8 + } + + // Compute stack heights without any back-tracking information. + // This will almost certainly succeed and we can simply + // return. If it fails, we do a second pass with back-tracking + // to produce a good error message. + // + // This accumulates stack heights bottom-up so it only has to + // visit every function once. + var failed []loader.Sym + for _, s := range ctxt.Textp { + if sc.check(s) > limit { + failed = append(failed, s) + } + } + + if len(failed) > 0 { + // Something was over-limit, so now we do the more + // expensive work to report a good error. First, for + // the over-limit functions, redo the stack check but + // record the graph this time. + sc = newStackCheck(ctxt, true) + for _, s := range failed { + sc.check(s) + } + + // Find the roots of the graph (functions that are not + // called by any other function). + roots := sc.findRoots() + + // Find and report all paths that go over the limit. + // This accumulates stack depths top-down. This is + // much less efficient because we may have to visit + // the same function multiple times at different + // depths, but lets us find all paths. + for _, root := range roots { + ctxt.Errorf(root, "nosplit stack over %d byte limit", limit) + chain := []stackCheckChain{{stackCheckEdge{0, root}, false}} + sc.report(root, limit, &chain) + } + } +} + +func newStackCheck(ctxt *Link, graph bool) *stackCheck { + sc := &stackCheck{ + ctxt: ctxt, + ldr: ctxt.loader, + morestack: ctxt.loader.Lookup("runtime.morestack", 0), + height: make(map[loader.Sym]int16, len(ctxt.Textp)), + } + // Compute stack effect of a CALL operation. 0 on LR machines. + // 1 register pushed on non-LR machines. + if !ctxt.Arch.HasLR { + sc.callSize = ctxt.Arch.RegSize + } + + if graph { + // We're going to record the call graph. + sc.graph = make(map[loader.Sym][]stackCheckEdge) + } + + return sc +} + +func (sc *stackCheck) symName(sym loader.Sym) string { + switch sym { + case stackCheckIndirect: + return "indirect" + case 0: + return "leaf" + } + return fmt.Sprintf("%s<%d>", sc.ldr.SymName(sym), sc.ldr.SymVersion(sym)) +} + +// check returns the stack height of sym. It populates sc.height and +// sc.graph for sym and every function in its call tree. +func (sc *stackCheck) check(sym loader.Sym) int { + if h, ok := sc.height[sym]; ok { + // We've already visited this symbol or we're in a cycle. + return int(h) + } + // Store the sentinel so we can detect cycles. + sc.height[sym] = stackCheckCycle + // Compute and record the height and optionally edges. + h, edges := sc.computeHeight(sym, *flagDebugNosplit || sc.graph != nil) + if h > int(stackCheckCycle) { // Prevent integer overflow + h = int(stackCheckCycle) + } + sc.height[sym] = int16(h) + if sc.graph != nil { + sc.graph[sym] = edges + } + + if *flagDebugNosplit { + for _, edge := range edges { + fmt.Printf("nosplit: %s +%d", sc.symName(sym), edge.growth) + if edge.target == 0 { + // Local stack growth or leaf function. + fmt.Printf("\n") + } else { + fmt.Printf(" -> %s\n", sc.symName(edge.target)) + } + } + } + + return h +} + +// computeHeight returns the stack height of sym. If graph is true, it +// also returns the out-edges of sym. +// +// Caching is applied to this in check. Call check instead of calling +// this directly. +func (sc *stackCheck) computeHeight(sym loader.Sym, graph bool) (int, []stackCheckEdge) { + ldr := sc.ldr + + // Check special cases. + if sym == sc.morestack { + // morestack looks like it calls functions, but they + // either happen only when already on the system stack + // (where there is ~infinite space), or after + // switching to the system stack. Hence, its stack + // height on the user stack is 0. + return 0, nil + } + if sym == stackCheckIndirect { + // Assume that indirect/closure calls are always to + // splittable functions, so they just need enough room + // to call morestack. + return sc.callSize, []stackCheckEdge{{sc.callSize, sc.morestack}} + } + + // Ignore calls to external functions. Assume that these calls + // are only ever happening on the system stack, where there's + // plenty of room. + if ldr.AttrExternal(sym) { + return 0, nil + } + if info := ldr.FuncInfo(sym); !info.Valid() { // also external + return 0, nil + } + + // Track the maximum height of this function and, if we're + // recording the graph, its out-edges. + var edges []stackCheckEdge + maxHeight := 0 + ctxt := sc.ctxt + // addEdge adds a stack growth out of this function to + // function "target" or, if target == 0, a local stack growth + // within the function. + addEdge := func(growth int, target loader.Sym) { + if graph { + edges = append(edges, stackCheckEdge{growth, target}) + } + height := growth + if target != 0 { // Don't walk into the leaf "edge" + height += sc.check(target) + } + if height > maxHeight { + maxHeight = height + } + } + + if !ldr.IsNoSplit(sym) { + // Splittable functions start with a call to + // morestack, after which their height is 0. Account + // for the height of the call to morestack. + addEdge(sc.callSize, sc.morestack) + return maxHeight, edges + } + + // This function is nosplit, so it adjusts SP without a split + // check. + // + // Walk through SP adjustments in function, consuming relocs + // and following calls. + maxLocalHeight := 0 + relocs, ri := ldr.Relocs(sym), 0 + pcsp := obj.NewPCIter(uint32(ctxt.Arch.MinLC)) + for pcsp.Init(ldr.Data(ldr.Pcsp(sym))); !pcsp.Done; pcsp.Next() { + // pcsp.value is in effect for [pcsp.pc, pcsp.nextpc). + height := int(pcsp.Value) + if height > maxLocalHeight { + maxLocalHeight = height + } + + // Process calls in this span. + for ; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if uint32(r.Off()) >= pcsp.NextPC { + break + } + t := r.Type() + if t.IsDirectCall() || t == objabi.R_CALLIND { + growth := height + sc.callSize + var target loader.Sym + if t == objabi.R_CALLIND { + target = stackCheckIndirect + } else { + target = r.Sym() + } + addEdge(growth, target) + } + } + } + if maxLocalHeight > maxHeight { + // This is either a leaf function, or the function + // grew its stack to larger than the maximum call + // height between calls. Either way, record that local + // stack growth. + addEdge(maxLocalHeight, 0) + } + + return maxHeight, edges +} + +func (sc *stackCheck) findRoots() []loader.Sym { + // Collect all nodes. + nodes := make(map[loader.Sym]struct{}) + for k := range sc.graph { + nodes[k] = struct{}{} + } + + // Start a DFS from each node and delete all reachable + // children. If we encounter an unrooted cycle, this will + // delete everything in that cycle, so we detect this case and + // track the lowest-numbered node encountered in the cycle and + // put that node back as a root. + var walk func(origin, sym loader.Sym) (cycle bool, lowest loader.Sym) + walk = func(origin, sym loader.Sym) (cycle bool, lowest loader.Sym) { + if _, ok := nodes[sym]; !ok { + // We already deleted this node. + return false, 0 + } + delete(nodes, sym) + + if origin == sym { + // We found an unrooted cycle. We already + // deleted all children of this node. Walk + // back up, tracking the lowest numbered + // symbol in this cycle. + return true, sym + } + + // Delete children of this node. + for _, out := range sc.graph[sym] { + if c, l := walk(origin, out.target); c { + cycle = true + if lowest == 0 { + // On first cycle detection, + // add sym to the set of + // lowest-numbered candidates. + lowest = sym + } + if l < lowest { + lowest = l + } + } + } + return + } + for k := range nodes { + // Delete all children of k. + for _, out := range sc.graph[k] { + if cycle, lowest := walk(k, out.target); cycle { + // This is an unrooted cycle so we + // just deleted everything. Put back + // the lowest-numbered symbol. + nodes[lowest] = struct{}{} + } + } + } + + // Sort roots by height. This makes the result deterministic + // and also improves the error reporting. + var roots []loader.Sym + for k := range nodes { + roots = append(roots, k) + } + sort.Slice(roots, func(i, j int) bool { + h1, h2 := sc.height[roots[i]], sc.height[roots[j]] + if h1 != h2 { + return h1 > h2 + } + // Secondary sort by Sym. + return roots[i] < roots[j] + }) + return roots +} + +type stackCheckChain struct { + stackCheckEdge + printed bool +} + +func (sc *stackCheck) report(sym loader.Sym, depth int, chain *[]stackCheckChain) { + // Walk the out-edges of sym. We temporarily pull the edges + // out of the graph to detect cycles and prevent infinite + // recursion. + edges, ok := sc.graph[sym] + isCycle := !(ok || sym == 0) + delete(sc.graph, sym) + for _, out := range edges { + *chain = append(*chain, stackCheckChain{out, false}) + sc.report(out.target, depth-out.growth, chain) + *chain = (*chain)[:len(*chain)-1] + } + sc.graph[sym] = edges + + // If we've reached the end of a chain and it went over the + // stack limit or was a cycle that would eventually go over, + // print the whole chain. + // + // We should either be in morestack (which has no out-edges) + // or the sentinel 0 Sym "called" from a leaf function (which + // has no out-edges), or we came back around a cycle (possibly + // to ourselves) and edges was temporarily nil'd. + if len(edges) == 0 && (depth < 0 || isCycle) { + var indent string + for i := range *chain { + ent := &(*chain)[i] + if ent.printed { + // Already printed on an earlier part + // of this call tree. + continue + } + ent.printed = true + + if i == 0 { + // chain[0] is just the root function, + // not a stack growth. + fmt.Printf("%s\n", sc.symName(ent.target)) + continue + } + + indent = strings.Repeat(" ", i) + fmt.Print(indent) + // Grows the stack X bytes and (maybe) calls Y. + fmt.Printf("grows %d bytes", ent.growth) + if ent.target == 0 { + // Not a call, just a leaf. Print nothing. + } else { + fmt.Printf(", calls %s", sc.symName(ent.target)) + } + fmt.Printf("\n") + } + // Print how far over this chain went. + if isCycle { + fmt.Printf("%sinfinite cycle\n", indent) + } else { + fmt.Printf("%s%d bytes over limit\n", indent, -depth) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd7e20528f095958b7e1d869641250378807f972 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/stackcheck_test.go @@ -0,0 +1,87 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "fmt" + "internal/testenv" + "os" + "regexp" + "strconv" + "testing" +) + +// See also $GOROOT/test/nosplit.go for multi-platform edge case tests. + +func TestStackCheckOutput(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", os.DevNull, "./testdata/stackcheck") + // The rules for computing frame sizes on all of the + // architectures are complicated, so just do this on amd64. + cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux") + outB, err := cmd.CombinedOutput() + + if err == nil { + t.Fatalf("expected link to fail") + } + out := string(outB) + + t.Logf("linker output:\n%s", out) + + // Get expected limit. + limitRe := regexp.MustCompile(`nosplit stack over (\d+) byte limit`) + m := limitRe.FindStringSubmatch(out) + if m == nil { + t.Fatalf("no overflow errors in output") + } + limit, _ := strconv.Atoi(m[1]) + + wantMap := map[string]string{ + "main.startSelf": fmt.Sprintf( + `main.startSelf<0> + grows 1008 bytes + %d bytes over limit +`, 1008-limit), + "main.startChain": fmt.Sprintf( + `main.startChain<0> + grows 32 bytes, calls main.chain0<0> + grows 48 bytes, calls main.chainEnd<0> + grows 1008 bytes + %d bytes over limit + grows 32 bytes, calls main.chain2<0> + grows 80 bytes, calls main.chainEnd<0> + grows 1008 bytes + %d bytes over limit +`, 32+48+1008-limit, 32+80+1008-limit), + "main.startRec": `main.startRec<0> + grows 8 bytes, calls main.startRec0<0> + grows 8 bytes, calls main.startRec<0> + infinite cycle +`, + } + + // Parse stanzas + stanza := regexp.MustCompile(`^(.*): nosplit stack over \d+ byte limit\n(.*\n(?: .*\n)*)`) + // Strip comments from cmd/go + out = regexp.MustCompile(`(?m)^#.*\n`).ReplaceAllString(out, "") + for len(out) > 0 { + m := stanza.FindStringSubmatch(out) + if m == nil { + t.Fatalf("unexpected output:\n%s", out) + } + out = out[len(m[0]):] + fn := m[1] + got := m[2] + + want, ok := wantMap[fn] + if !ok { + t.Errorf("unexpected function: %s", fn) + } else if want != got { + t.Errorf("want:\n%sgot:\n%s", want, got) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/sym.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/sym.go new file mode 100644 index 0000000000000000000000000000000000000000..6ae110602e4946db0946d40a9fc170f1ce95a0a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/sym.go @@ -0,0 +1,117 @@ +// Derived from Inferno utils/6l/obj.c and utils/6l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "internal/buildcfg" + "log" + "runtime" +) + +func linknew(arch *sys.Arch) *Link { + ler := loader.ErrorReporter{AfterErrorAction: afterErrorAction} + ctxt := &Link{ + Target: Target{Arch: arch}, + version: sym.SymVerStatic, + outSem: make(chan int, 2*runtime.GOMAXPROCS(0)), + Out: NewOutBuf(arch), + LibraryByPkg: make(map[string]*sym.Library), + numelfsym: 1, + ErrorReporter: ErrorReporter{ErrorReporter: ler}, + generatorSyms: make(map[loader.Sym]generatorFunc), + } + + if buildcfg.GOARCH != arch.Name { + log.Fatalf("invalid buildcfg.GOARCH %s (want %s)", buildcfg.GOARCH, arch.Name) + } + + AtExit(func() { + if nerrors > 0 { + ctxt.Out.ErrorClose() + mayberemoveoutfile() + } + }) + + return ctxt +} + +// computeTLSOffset records the thread-local storage offset. +// Not used for Android where the TLS offset is determined at runtime. +func (ctxt *Link) computeTLSOffset() { + switch ctxt.HeadType { + default: + log.Fatalf("unknown thread-local storage offset for %v", ctxt.HeadType) + + case objabi.Hplan9, objabi.Hwindows, objabi.Hjs, objabi.Hwasip1, objabi.Haix: + break + + case objabi.Hlinux, + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd, + objabi.Hdragonfly, + objabi.Hsolaris: + /* + * ELF uses TLS offset negative from FS. + * Translate 0(FS) and 8(FS) into -16(FS) and -8(FS). + * Known to low-level assembly in package runtime and runtime/cgo. + */ + ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize + + case objabi.Hdarwin: + /* + * OS X system constants - offset from 0(GS) to our TLS. + */ + switch ctxt.Arch.Family { + default: + log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name) + + /* + * For x86, Apple has reserved a slot in the TLS for Go. See issue 23617. + * That slot is at offset 0x30 on amd64. + * The slot will hold the G pointer. + * These constants should match those in runtime/sys_darwin_amd64.s + * and runtime/cgo/gcc_darwin_amd64.c. + */ + case sys.AMD64: + ctxt.Tlsoffset = 0x30 + + case sys.ARM64: + ctxt.Tlsoffset = 0 // dummy value, not needed + } + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/symtab.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/symtab.go new file mode 100644 index 0000000000000000000000000000000000000000..01f9780d8b44805e89f741a96f553a91f5dc934a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/symtab.go @@ -0,0 +1,906 @@ +// Inferno utils/6l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ld + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "fmt" + "internal/buildcfg" + "path/filepath" + "strings" +) + +// Symbol table. + +func putelfstr(s string) int { + if len(elfstrdat) == 0 && s != "" { + // first entry must be empty string + putelfstr("") + } + + off := len(elfstrdat) + elfstrdat = append(elfstrdat, s...) + elfstrdat = append(elfstrdat, 0) + return off +} + +func putelfsyment(out *OutBuf, off int, addr int64, size int64, info uint8, shndx elf.SectionIndex, other int) { + if elf64 { + out.Write32(uint32(off)) + out.Write8(info) + out.Write8(uint8(other)) + out.Write16(uint16(shndx)) + out.Write64(uint64(addr)) + out.Write64(uint64(size)) + symSize += ELF64SYMSIZE + } else { + out.Write32(uint32(off)) + out.Write32(uint32(addr)) + out.Write32(uint32(size)) + out.Write8(info) + out.Write8(uint8(other)) + out.Write16(uint16(shndx)) + symSize += ELF32SYMSIZE + } +} + +func putelfsym(ctxt *Link, x loader.Sym, typ elf.SymType, curbind elf.SymBind) { + ldr := ctxt.loader + addr := ldr.SymValue(x) + size := ldr.SymSize(x) + + xo := x + if ldr.OuterSym(x) != 0 { + xo = ldr.OuterSym(x) + } + xot := ldr.SymType(xo) + xosect := ldr.SymSect(xo) + + var elfshnum elf.SectionIndex + if xot == sym.SDYNIMPORT || xot == sym.SHOSTOBJ || xot == sym.SUNDEFEXT { + elfshnum = elf.SHN_UNDEF + size = 0 + } else { + if xosect == nil { + ldr.Errorf(x, "missing section in putelfsym") + return + } + if xosect.Elfsect == nil { + ldr.Errorf(x, "missing ELF section in putelfsym") + return + } + elfshnum = xosect.Elfsect.(*ElfShdr).shnum + } + + sname := ldr.SymExtname(x) + sname = mangleABIName(ctxt, ldr, x, sname) + + // One pass for each binding: elf.STB_LOCAL, elf.STB_GLOBAL, + // maybe one day elf.STB_WEAK. + bind := elf.STB_GLOBAL + if ldr.IsFileLocal(x) && !isStaticTmp(sname) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + // Static tmp is package local, but a package can be shared among multiple DSOs. + // They need to have a single view of the static tmp that are writable. + bind = elf.STB_LOCAL + } + + // In external linking mode, we have to invoke gcc with -rdynamic + // to get the exported symbols put into the dynamic symbol table. + // To avoid filling the dynamic table with lots of unnecessary symbols, + // mark all Go symbols local (not global) in the final executable. + // But when we're dynamically linking, we need all those global symbols. + if !ctxt.DynlinkingGo() && ctxt.IsExternal() && !ldr.AttrCgoExportStatic(x) && elfshnum != elf.SHN_UNDEF { + bind = elf.STB_LOCAL + } + + if ctxt.LinkMode == LinkExternal && elfshnum != elf.SHN_UNDEF { + addr -= int64(xosect.Vaddr) + } + other := int(elf.STV_DEFAULT) + if ldr.AttrVisibilityHidden(x) { + // TODO(mwhudson): We only set AttrVisibilityHidden in ldelf, i.e. when + // internally linking. But STV_HIDDEN visibility only matters in object + // files and shared libraries, and as we are a long way from implementing + // internal linking for shared libraries and only create object files when + // externally linking, I don't think this makes a lot of sense. + other = int(elf.STV_HIDDEN) + } + if ctxt.IsPPC64() && typ == elf.STT_FUNC && ldr.AttrShared(x) { + // On ppc64 the top three bits of the st_other field indicate how many + // bytes separate the global and local entry points. For non-PCrel shared + // symbols this is always 8 bytes except for some special functions. + hasPCrel := buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" + + // This should match the preprocessing behavior in cmd/internal/obj/ppc64/obj9.go + // where the distinct global entry is inserted. + if !hasPCrel && ldr.SymName(x) != "runtime.duffzero" && ldr.SymName(x) != "runtime.duffcopy" { + other |= 3 << 5 + } + } + + // When dynamically linking, we create Symbols by reading the names from + // the symbol tables of the shared libraries and so the names need to + // match exactly. Tools like DTrace will have to wait for now. + if !ctxt.DynlinkingGo() { + // Rewrite · to . for ASCII-only tools like DTrace (sigh) + sname = strings.Replace(sname, "·", ".", -1) + } + + if ctxt.DynlinkingGo() && bind == elf.STB_GLOBAL && curbind == elf.STB_LOCAL && ldr.SymType(x) == sym.STEXT { + // When dynamically linking, we want references to functions defined + // in this module to always be to the function object, not to the + // PLT. We force this by writing an additional local symbol for every + // global function symbol and making all relocations against the + // global symbol refer to this local symbol instead (see + // (*sym.Symbol).ElfsymForReloc). This is approximately equivalent to the + // ELF linker -Bsymbolic-functions option, but that is buggy on + // several platforms. + putelfsyment(ctxt.Out, putelfstr("local."+sname), addr, size, elf.ST_INFO(elf.STB_LOCAL, typ), elfshnum, other) + ldr.SetSymLocalElfSym(x, int32(ctxt.numelfsym)) + ctxt.numelfsym++ + return + } else if bind != curbind { + return + } + + putelfsyment(ctxt.Out, putelfstr(sname), addr, size, elf.ST_INFO(bind, typ), elfshnum, other) + ldr.SetSymElfSym(x, int32(ctxt.numelfsym)) + ctxt.numelfsym++ +} + +func putelfsectionsym(ctxt *Link, out *OutBuf, s loader.Sym, shndx elf.SectionIndex) { + putelfsyment(out, 0, 0, 0, elf.ST_INFO(elf.STB_LOCAL, elf.STT_SECTION), shndx, 0) + ctxt.loader.SetSymElfSym(s, int32(ctxt.numelfsym)) + ctxt.numelfsym++ +} + +func genelfsym(ctxt *Link, elfbind elf.SymBind) { + ldr := ctxt.loader + + // runtime.text marker symbol(s). + s := ldr.Lookup("runtime.text", 0) + putelfsym(ctxt, s, elf.STT_FUNC, elfbind) + for k, sect := range Segtext.Sections[1:] { + n := k + 1 + if sect.Name != ".text" || (ctxt.IsAIX() && ctxt.IsExternal()) { + // On AIX, runtime.text.X are symbols already in the symtab. + break + } + s = ldr.Lookup(fmt.Sprintf("runtime.text.%d", n), 0) + if s == 0 { + break + } + if ldr.SymType(s) != sym.STEXT { + panic("unexpected type for runtime.text symbol") + } + putelfsym(ctxt, s, elf.STT_FUNC, elfbind) + } + + // Text symbols. + for _, s := range ctxt.Textp { + putelfsym(ctxt, s, elf.STT_FUNC, elfbind) + } + + // runtime.etext marker symbol. + s = ldr.Lookup("runtime.etext", 0) + if ldr.SymType(s) == sym.STEXT { + putelfsym(ctxt, s, elf.STT_FUNC, elfbind) + } + + shouldBeInSymbolTable := func(s loader.Sym) bool { + if ldr.AttrNotInSymbolTable(s) { + return false + } + // FIXME: avoid having to do name inspections here. + // NB: the restrictions below on file local symbols are a bit + // arbitrary -- if it turns out we need nameless static + // symbols they could be relaxed/removed. + sn := ldr.SymName(s) + if (sn == "" || sn[0] == '.') && ldr.IsFileLocal(s) { + panic(fmt.Sprintf("unexpected file local symbol %d %s<%d>\n", + s, sn, ldr.SymVersion(s))) + } + if (sn == "" || sn[0] == '.') && !ldr.IsFileLocal(s) { + return false + } + return true + } + + // Data symbols. + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + st := ldr.SymType(s) + if st >= sym.SELFRXSECT && st < sym.SXREF { + typ := elf.STT_OBJECT + if st == sym.STLSBSS { + if ctxt.IsInternal() { + continue + } + typ = elf.STT_TLS + } + if !shouldBeInSymbolTable(s) { + continue + } + putelfsym(ctxt, s, typ, elfbind) + continue + } + if st == sym.SHOSTOBJ || st == sym.SDYNIMPORT || st == sym.SUNDEFEXT { + putelfsym(ctxt, s, ldr.SymElfType(s), elfbind) + } + } +} + +func asmElfSym(ctxt *Link) { + + // the first symbol entry is reserved + putelfsyment(ctxt.Out, 0, 0, 0, elf.ST_INFO(elf.STB_LOCAL, elf.STT_NOTYPE), 0, 0) + + dwarfaddelfsectionsyms(ctxt) + + // Some linkers will add a FILE sym if one is not present. + // Avoid having the working directory inserted into the symbol table. + // It is added with a name to avoid problems with external linking + // encountered on some versions of Solaris. See issue #14957. + putelfsyment(ctxt.Out, putelfstr("go.go"), 0, 0, elf.ST_INFO(elf.STB_LOCAL, elf.STT_FILE), elf.SHN_ABS, 0) + ctxt.numelfsym++ + + bindings := []elf.SymBind{elf.STB_LOCAL, elf.STB_GLOBAL} + for _, elfbind := range bindings { + if elfbind == elf.STB_GLOBAL { + elfglobalsymndx = ctxt.numelfsym + } + genelfsym(ctxt, elfbind) + } +} + +func putplan9sym(ctxt *Link, ldr *loader.Loader, s loader.Sym, char SymbolType) { + t := int(char) + if ldr.IsFileLocal(s) { + t += 'a' - 'A' + } + l := 4 + addr := ldr.SymValue(s) + if ctxt.IsAMD64() && !flag8 { + ctxt.Out.Write32b(uint32(addr >> 32)) + l = 8 + } + + ctxt.Out.Write32b(uint32(addr)) + ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */ + + name := ldr.SymName(s) + name = mangleABIName(ctxt, ldr, s, name) + ctxt.Out.WriteString(name) + ctxt.Out.Write8(0) + + symSize += int32(l) + 1 + int32(len(name)) + 1 +} + +func asmbPlan9Sym(ctxt *Link) { + ldr := ctxt.loader + + // Add special runtime.text and runtime.etext symbols. + s := ldr.Lookup("runtime.text", 0) + if ldr.SymType(s) == sym.STEXT { + putplan9sym(ctxt, ldr, s, TextSym) + } + s = ldr.Lookup("runtime.etext", 0) + if ldr.SymType(s) == sym.STEXT { + putplan9sym(ctxt, ldr, s, TextSym) + } + + // Add text symbols. + for _, s := range ctxt.Textp { + putplan9sym(ctxt, ldr, s, TextSym) + } + + shouldBeInSymbolTable := func(s loader.Sym) bool { + if ldr.AttrNotInSymbolTable(s) { + return false + } + name := ldr.SymName(s) // TODO: try not to read the name + if name == "" || name[0] == '.' { + return false + } + return true + } + + // Add data symbols and external references. + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + t := ldr.SymType(s) + if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t == sym.STLSBSS { + continue + } + if !shouldBeInSymbolTable(s) { + continue + } + char := DataSym + if t == sym.SBSS || t == sym.SNOPTRBSS { + char = BSSSym + } + putplan9sym(ctxt, ldr, s, char) + } + } +} + +type byPkg []*sym.Library + +func (libs byPkg) Len() int { + return len(libs) +} + +func (libs byPkg) Less(a, b int) bool { + return libs[a].Pkg < libs[b].Pkg +} + +func (libs byPkg) Swap(a, b int) { + libs[a], libs[b] = libs[b], libs[a] +} + +// Create a table with information on the text sections. +// Return the symbol of the table, and number of sections. +func textsectionmap(ctxt *Link) (loader.Sym, uint32) { + ldr := ctxt.loader + t := ldr.CreateSymForUpdate("runtime.textsectionmap", 0) + t.SetType(sym.SRODATA) + nsections := int64(0) + + for _, sect := range Segtext.Sections { + if sect.Name == ".text" { + nsections++ + } else { + break + } + } + t.Grow(3 * nsections * int64(ctxt.Arch.PtrSize)) + + off := int64(0) + n := 0 + + // The vaddr for each text section is the difference between the section's + // Vaddr and the Vaddr for the first text section as determined at compile + // time. + + // The symbol for the first text section is named runtime.text as before. + // Additional text sections are named runtime.text.n where n is the + // order of creation starting with 1. These symbols provide the section's + // address after relocation by the linker. + + textbase := Segtext.Sections[0].Vaddr + for _, sect := range Segtext.Sections { + if sect.Name != ".text" { + break + } + // The fields written should match runtime/symtab.go:textsect. + // They are designed to minimize runtime calculations. + vaddr := sect.Vaddr - textbase + off = t.SetUint(ctxt.Arch, off, vaddr) // field vaddr + end := vaddr + sect.Length + off = t.SetUint(ctxt.Arch, off, end) // field end + name := "runtime.text" + if n != 0 { + name = fmt.Sprintf("runtime.text.%d", n) + } + s := ldr.Lookup(name, 0) + if s == 0 { + ctxt.Errorf(s, "Unable to find symbol %s\n", name) + } + off = t.SetAddr(ctxt.Arch, off, s) // field baseaddr + n++ + } + return t.Sym(), uint32(n) +} + +func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { + ldr := ctxt.loader + + if !ctxt.IsAIX() { + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared: + s := ldr.Lookup(*flagEntrySymbol, sym.SymVerABI0) + if s != 0 { + addinitarrdata(ctxt, ldr, s) + } + } + } + + // Define these so that they'll get put into the symbol table. + // data.c:/^address will provide the actual values. + ctxt.xdefine("runtime.rodata", sym.SRODATA, 0) + ctxt.xdefine("runtime.erodata", sym.SRODATA, 0) + ctxt.xdefine("runtime.types", sym.SRODATA, 0) + ctxt.xdefine("runtime.etypes", sym.SRODATA, 0) + ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, 0) + ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, 0) + ctxt.xdefine("runtime.data", sym.SDATA, 0) + ctxt.xdefine("runtime.edata", sym.SDATA, 0) + ctxt.xdefine("runtime.bss", sym.SBSS, 0) + ctxt.xdefine("runtime.ebss", sym.SBSS, 0) + ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.covctrs", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.ecovctrs", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.end", sym.SBSS, 0) + ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0) + ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0) + + // garbage collection symbols + s := ldr.CreateSymForUpdate("runtime.gcdata", 0) + s.SetType(sym.SRODATA) + s.SetSize(0) + ctxt.xdefine("runtime.egcdata", sym.SRODATA, 0) + + s = ldr.CreateSymForUpdate("runtime.gcbss", 0) + s.SetType(sym.SRODATA) + s.SetSize(0) + ctxt.xdefine("runtime.egcbss", sym.SRODATA, 0) + + // pseudo-symbols to mark locations of type, string, and go string data. + var symtype, symtyperel loader.Sym + if !ctxt.DynlinkingGo() { + if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) { + s = ldr.CreateSymForUpdate("type:*", 0) + s.SetType(sym.STYPE) + s.SetSize(0) + s.SetAlign(int32(ctxt.Arch.PtrSize)) + symtype = s.Sym() + + s = ldr.CreateSymForUpdate("typerel.*", 0) + s.SetType(sym.STYPERELRO) + s.SetSize(0) + s.SetAlign(int32(ctxt.Arch.PtrSize)) + symtyperel = s.Sym() + } else { + s = ldr.CreateSymForUpdate("type:*", 0) + s.SetType(sym.STYPE) + s.SetSize(0) + s.SetAlign(int32(ctxt.Arch.PtrSize)) + symtype = s.Sym() + symtyperel = s.Sym() + } + setCarrierSym(sym.STYPE, symtype) + setCarrierSym(sym.STYPERELRO, symtyperel) + } + + groupSym := func(name string, t sym.SymKind) loader.Sym { + s := ldr.CreateSymForUpdate(name, 0) + s.SetType(t) + s.SetSize(0) + s.SetAlign(int32(ctxt.Arch.PtrSize)) + s.SetLocal(true) + setCarrierSym(t, s.Sym()) + return s.Sym() + } + var ( + symgostring = groupSym("go:string.*", sym.SGOSTRING) + symgofunc = groupSym("go:func.*", sym.SGOFUNC) + symgcbits = groupSym("runtime.gcbits.*", sym.SGCBITS) + ) + + symgofuncrel := symgofunc + if ctxt.UseRelro() { + symgofuncrel = groupSym("go:funcrel.*", sym.SGOFUNCRELRO) + } + + symt := ldr.CreateSymForUpdate("runtime.symtab", 0) + symt.SetType(sym.SSYMTAB) + symt.SetSize(0) + symt.SetLocal(true) + + // assign specific types so that they sort together. + // within a type they sort by size, so the .* symbols + // just defined above will be first. + // hide the specific symbols. + // Some of these symbol section conditions are duplicated + // in cmd/internal/obj.contentHashSection. + nsym := loader.Sym(ldr.NSym()) + symGroupType := make([]sym.SymKind, nsym) + for s := loader.Sym(1); s < nsym; s++ { + if (!ctxt.IsExternal() && ldr.IsFileLocal(s) && !ldr.IsFromAssembly(s) && ldr.SymPkg(s) != "") || (ctxt.LinkMode == LinkInternal && ldr.SymType(s) == sym.SCOVERAGE_COUNTER) { + ldr.SetAttrNotInSymbolTable(s, true) + } + if !ldr.AttrReachable(s) || ldr.AttrSpecial(s) || (ldr.SymType(s) != sym.SRODATA && ldr.SymType(s) != sym.SGOFUNC) { + continue + } + + name := ldr.SymName(s) + switch { + case strings.HasPrefix(name, "go:string."): + symGroupType[s] = sym.SGOSTRING + ldr.SetAttrNotInSymbolTable(s, true) + ldr.SetCarrierSym(s, symgostring) + + case strings.HasPrefix(name, "runtime.gcbits."), + strings.HasPrefix(name, "type:.gcprog."): + symGroupType[s] = sym.SGCBITS + ldr.SetAttrNotInSymbolTable(s, true) + ldr.SetCarrierSym(s, symgcbits) + + case strings.HasSuffix(name, "·f"): + if !ctxt.DynlinkingGo() { + ldr.SetAttrNotInSymbolTable(s, true) + } + if ctxt.UseRelro() { + symGroupType[s] = sym.SGOFUNCRELRO + if !ctxt.DynlinkingGo() { + ldr.SetCarrierSym(s, symgofuncrel) + } + } else { + symGroupType[s] = sym.SGOFUNC + ldr.SetCarrierSym(s, symgofunc) + } + + case strings.HasPrefix(name, "gcargs."), + strings.HasPrefix(name, "gclocals."), + strings.HasPrefix(name, "gclocals·"), + ldr.SymType(s) == sym.SGOFUNC && s != symgofunc, // inltree, see pcln.go + strings.HasSuffix(name, ".opendefer"), + strings.HasSuffix(name, ".arginfo0"), + strings.HasSuffix(name, ".arginfo1"), + strings.HasSuffix(name, ".argliveinfo"), + strings.HasSuffix(name, ".wrapinfo"), + strings.HasSuffix(name, ".args_stackmap"), + strings.HasSuffix(name, ".stkobj"): + ldr.SetAttrNotInSymbolTable(s, true) + symGroupType[s] = sym.SGOFUNC + ldr.SetCarrierSym(s, symgofunc) + if ctxt.Debugvlog != 0 { + align := ldr.SymAlign(s) + liveness += (ldr.SymSize(s) + int64(align) - 1) &^ (int64(align) - 1) + } + + // Note: Check for "type:" prefix after checking for .arginfo1 suffix. + // That way symbols like "type:.eq.[2]interface {}.arginfo1" that belong + // in go:func.* end up there. + case strings.HasPrefix(name, "type:"): + if !ctxt.DynlinkingGo() { + ldr.SetAttrNotInSymbolTable(s, true) + } + if ctxt.UseRelro() { + symGroupType[s] = sym.STYPERELRO + if symtyperel != 0 { + ldr.SetCarrierSym(s, symtyperel) + } + } else { + symGroupType[s] = sym.STYPE + if symtyperel != 0 { + ldr.SetCarrierSym(s, symtype) + } + } + } + } + + if ctxt.BuildMode == BuildModeShared { + abihashgostr := ldr.CreateSymForUpdate("go:link.abihash."+filepath.Base(*flagOutfile), 0) + abihashgostr.SetType(sym.SRODATA) + hashsym := ldr.LookupOrCreateSym("go:link.abihashbytes", 0) + abihashgostr.AddAddr(ctxt.Arch, hashsym) + abihashgostr.AddUint(ctxt.Arch, uint64(ldr.SymSize(hashsym))) + } + if ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() { + for _, l := range ctxt.Library { + s := ldr.CreateSymForUpdate("go:link.pkghashbytes."+l.Pkg, 0) + s.SetType(sym.SRODATA) + s.SetSize(int64(len(l.Fingerprint))) + s.SetData(l.Fingerprint[:]) + str := ldr.CreateSymForUpdate("go:link.pkghash."+l.Pkg, 0) + str.SetType(sym.SRODATA) + str.AddAddr(ctxt.Arch, s.Sym()) + str.AddUint(ctxt.Arch, uint64(len(l.Fingerprint))) + } + } + + textsectionmapSym, nsections := textsectionmap(ctxt) + + // Information about the layout of the executable image for the + // runtime to use. Any changes here must be matched by changes to + // the definition of moduledata in runtime/symtab.go. + // This code uses several global variables that are set by pcln.go:pclntab. + moduledata := ldr.MakeSymbolUpdater(ctxt.Moduledata) + + slice := func(sym loader.Sym, len uint64) { + moduledata.AddAddr(ctxt.Arch, sym) + moduledata.AddUint(ctxt.Arch, len) + moduledata.AddUint(ctxt.Arch, len) + } + + sliceSym := func(sym loader.Sym) { + slice(sym, uint64(ldr.SymSize(sym))) + } + + nilSlice := func() { + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + } + + // The pcHeader + moduledata.AddAddr(ctxt.Arch, pcln.pcheader) + + // The function name slice + sliceSym(pcln.funcnametab) + + // The cutab slice + sliceSym(pcln.cutab) + + // The filetab slice + sliceSym(pcln.filetab) + + // The pctab slice + sliceSym(pcln.pctab) + + // The pclntab slice + slice(pcln.pclntab, uint64(ldr.SymSize(pcln.pclntab))) + + // The ftab slice + slice(pcln.pclntab, uint64(pcln.nfunc+1)) + + // findfunctab + moduledata.AddAddr(ctxt.Arch, pcln.findfunctab) + // minpc, maxpc + moduledata.AddAddr(ctxt.Arch, pcln.firstFunc) + moduledata.AddAddrPlus(ctxt.Arch, pcln.lastFunc, ldr.SymSize(pcln.lastFunc)) + // pointers to specific parts of the module + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.text", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.etext", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.noptrdata", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.enoptrdata", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.data", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.edata", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.bss", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.ebss", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.noptrbss", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.enoptrbss", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.covctrs", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.ecovctrs", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.end", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.gcdata", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.gcbss", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.types", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.etypes", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("runtime.rodata", 0)) + moduledata.AddAddr(ctxt.Arch, ldr.Lookup("go:func.*", 0)) + + if ctxt.IsAIX() && ctxt.IsExternal() { + // Add R_XCOFFREF relocation to prevent ld's garbage collection of + // the following symbols. They might not be referenced in the program. + addRef := func(name string) { + s := ldr.Lookup(name, 0) + if s == 0 { + return + } + r, _ := moduledata.AddRel(objabi.R_XCOFFREF) + r.SetSym(s) + r.SetSiz(uint8(ctxt.Arch.PtrSize)) + } + addRef("runtime.rodata") + addRef("runtime.erodata") + addRef("runtime.epclntab") + // As we use relative addressing for text symbols in functab, it is + // important that the offsets we computed stay unchanged by the external + // linker, i.e. all symbols in Textp should not be removed. + // Most of them are actually referenced (our deadcode pass ensures that), + // except go:buildid which is generated late and not used by the program. + addRef("go:buildid") + } + + // text section information + slice(textsectionmapSym, uint64(nsections)) + + // The typelinks slice + typelinkSym := ldr.Lookup("runtime.typelink", 0) + ntypelinks := uint64(ldr.SymSize(typelinkSym)) / 4 + slice(typelinkSym, ntypelinks) + + // The itablinks slice + itablinkSym := ldr.Lookup("runtime.itablink", 0) + nitablinks := uint64(ldr.SymSize(itablinkSym)) / uint64(ctxt.Arch.PtrSize) + slice(itablinkSym, nitablinks) + + // The ptab slice + if ptab := ldr.Lookup("go:plugin.tabs", 0); ptab != 0 && ldr.AttrReachable(ptab) { + ldr.SetAttrLocal(ptab, true) + if ldr.SymType(ptab) != sym.SRODATA { + panic(fmt.Sprintf("go:plugin.tabs is %v, not SRODATA", ldr.SymType(ptab))) + } + nentries := uint64(len(ldr.Data(ptab)) / 8) // sizeof(nameOff) + sizeof(typeOff) + slice(ptab, nentries) + } else { + nilSlice() + } + + if ctxt.BuildMode == BuildModePlugin { + addgostring(ctxt, ldr, moduledata, "go:link.thispluginpath", objabi.PathToPrefix(*flagPluginPath)) + + pkghashes := ldr.CreateSymForUpdate("go:link.pkghashes", 0) + pkghashes.SetLocal(true) + pkghashes.SetType(sym.SRODATA) + + for i, l := range ctxt.Library { + // pkghashes[i].name + addgostring(ctxt, ldr, pkghashes, fmt.Sprintf("go:link.pkgname.%d", i), l.Pkg) + // pkghashes[i].linktimehash + addgostring(ctxt, ldr, pkghashes, fmt.Sprintf("go:link.pkglinkhash.%d", i), string(l.Fingerprint[:])) + // pkghashes[i].runtimehash + hash := ldr.Lookup("go:link.pkghash."+l.Pkg, 0) + pkghashes.AddAddr(ctxt.Arch, hash) + } + slice(pkghashes.Sym(), uint64(len(ctxt.Library))) + } else { + moduledata.AddUint(ctxt.Arch, 0) // pluginpath + moduledata.AddUint(ctxt.Arch, 0) + nilSlice() // pkghashes slice + } + // Add inittasks slice + t := ctxt.mainInittasks + if t != 0 { + moduledata.AddAddr(ctxt.Arch, t) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(t)/int64(ctxt.Arch.PtrSize))) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(t)/int64(ctxt.Arch.PtrSize))) + } else { + // Some build modes have no inittasks, like a shared library. + // Its inittask list will be constructed by a higher-level + // linking step. + // This branch can also happen if there are no init tasks at all. + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + } + + if len(ctxt.Shlibs) > 0 { + thismodulename := filepath.Base(*flagOutfile) + switch ctxt.BuildMode { + case BuildModeExe, BuildModePIE: + // When linking an executable, outfile is just "a.out". Make + // it something slightly more comprehensible. + thismodulename = "the executable" + } + addgostring(ctxt, ldr, moduledata, "go:link.thismodulename", thismodulename) + + modulehashes := ldr.CreateSymForUpdate("go:link.abihashes", 0) + modulehashes.SetLocal(true) + modulehashes.SetType(sym.SRODATA) + + for i, shlib := range ctxt.Shlibs { + // modulehashes[i].modulename + modulename := filepath.Base(shlib.Path) + addgostring(ctxt, ldr, modulehashes, fmt.Sprintf("go:link.libname.%d", i), modulename) + + // modulehashes[i].linktimehash + addgostring(ctxt, ldr, modulehashes, fmt.Sprintf("go:link.linkhash.%d", i), string(shlib.Hash)) + + // modulehashes[i].runtimehash + abihash := ldr.LookupOrCreateSym("go:link.abihash."+modulename, 0) + ldr.SetAttrReachable(abihash, true) + modulehashes.AddAddr(ctxt.Arch, abihash) + } + + slice(modulehashes.Sym(), uint64(len(ctxt.Shlibs))) + } else { + moduledata.AddUint(ctxt.Arch, 0) // modulename + moduledata.AddUint(ctxt.Arch, 0) + nilSlice() // moduleshashes slice + } + + hasmain := ctxt.BuildMode == BuildModeExe || ctxt.BuildMode == BuildModePIE + if hasmain { + moduledata.AddUint8(1) + } else { + moduledata.AddUint8(0) + } + + // The rest of moduledata is zero initialized. + // When linking an object that does not contain the runtime we are + // creating the moduledata from scratch and it does not have a + // compiler-provided size, so read it from the type data. + moduledatatype := ldr.Lookup("type:runtime.moduledata", 0) + moduledata.SetSize(decodetypeSize(ctxt.Arch, ldr.Data(moduledatatype))) + moduledata.Grow(moduledata.Size()) + + lastmoduledatap := ldr.CreateSymForUpdate("runtime.lastmoduledatap", 0) + if lastmoduledatap.Type() != sym.SDYNIMPORT { + lastmoduledatap.SetType(sym.SNOPTRDATA) + lastmoduledatap.SetSize(0) // overwrite existing value + lastmoduledatap.SetData(nil) + lastmoduledatap.AddAddr(ctxt.Arch, moduledata.Sym()) + } + return symGroupType +} + +// CarrierSymByType tracks carrier symbols and their sizes. +var CarrierSymByType [sym.SXREF]struct { + Sym loader.Sym + Size int64 +} + +func setCarrierSym(typ sym.SymKind, s loader.Sym) { + if CarrierSymByType[typ].Sym != 0 { + panic(fmt.Sprintf("carrier symbol for type %v already set", typ)) + } + CarrierSymByType[typ].Sym = s +} + +func setCarrierSize(typ sym.SymKind, sz int64) { + if CarrierSymByType[typ].Size != 0 { + panic(fmt.Sprintf("carrier symbol size for type %v already set", typ)) + } + CarrierSymByType[typ].Size = sz +} + +func isStaticTmp(name string) bool { + return strings.Contains(name, "."+obj.StaticNamePref) +} + +// Mangle function name with ABI information. +func mangleABIName(ctxt *Link, ldr *loader.Loader, x loader.Sym, name string) string { + // For functions with ABI wrappers, we have to make sure that we + // don't wind up with two symbol table entries with the same + // name (since this will generated an error from the external + // linker). If we have wrappers, keep the ABIInternal name + // unmangled since we want cross-load-module calls to target + // ABIInternal, and rename other symbols. + // + // TODO: avoid the ldr.Lookup calls below by instead using an aux + // sym or marker relocation to associate the wrapper with the + // wrapped function. + if !buildcfg.Experiment.RegabiWrappers { + return name + } + + if ldr.SymType(x) == sym.STEXT && ldr.SymVersion(x) != sym.SymVerABIInternal && ldr.SymVersion(x) < sym.SymVerStatic { + if s2 := ldr.Lookup(name, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT { + name = fmt.Sprintf("%s.abi%d", name, ldr.SymVersion(x)) + } + } + + // When loading a shared library, if a symbol has only one ABI, + // and the name is not mangled, we don't know what ABI it is. + // So we always mangle ABIInternal function name in shared linkage, + // except symbols that are exported to C. Type symbols are always + // ABIInternal so they are not mangled. + if ctxt.IsShared() { + if ldr.SymType(x) == sym.STEXT && ldr.SymVersion(x) == sym.SymVerABIInternal && !ldr.AttrCgoExport(x) && !strings.HasPrefix(name, "type:") { + name = fmt.Sprintf("%s.abiinternal", name) + } + } + + return name +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/target.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/target.go new file mode 100644 index 0000000000000000000000000000000000000000..d0ce99f3e9f9efcde873b19ed6810adb1a37a2e0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/target.go @@ -0,0 +1,206 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "encoding/binary" +) + +// Target holds the configuration we're building for. +type Target struct { + Arch *sys.Arch + + HeadType objabi.HeadType + + LinkMode LinkMode + BuildMode BuildMode + + linkShared bool + canUsePlugins bool + IsELF bool +} + +// +// Target type functions +// + +func (t *Target) IsExe() bool { + return t.BuildMode == BuildModeExe +} + +func (t *Target) IsShared() bool { + return t.BuildMode == BuildModeShared +} + +func (t *Target) IsPlugin() bool { + return t.BuildMode == BuildModePlugin +} + +func (t *Target) IsInternal() bool { + return t.LinkMode == LinkInternal +} + +func (t *Target) IsExternal() bool { + return t.LinkMode == LinkExternal +} + +func (t *Target) IsPIE() bool { + return t.BuildMode == BuildModePIE +} + +func (t *Target) IsSharedGoLink() bool { + return t.linkShared +} + +func (t *Target) CanUsePlugins() bool { + return t.canUsePlugins +} + +func (t *Target) IsElf() bool { + t.mustSetHeadType() + return t.IsELF +} + +func (t *Target) IsDynlinkingGo() bool { + return t.IsShared() || t.IsSharedGoLink() || t.IsPlugin() || t.CanUsePlugins() +} + +// UseRelro reports whether to make use of "read only relocations" aka +// relro. +func (t *Target) UseRelro() bool { + switch t.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePIE, BuildModePlugin: + return t.IsELF || t.HeadType == objabi.Haix || t.HeadType == objabi.Hdarwin + default: + if t.HeadType == objabi.Hdarwin && t.IsARM64() { + // On darwin/ARM64, everything is PIE. + return true + } + return t.linkShared || (t.HeadType == objabi.Haix && t.LinkMode == LinkExternal) + } +} + +// +// Processor functions +// + +func (t *Target) Is386() bool { + return t.Arch.Family == sys.I386 +} + +func (t *Target) IsARM() bool { + return t.Arch.Family == sys.ARM +} + +func (t *Target) IsARM64() bool { + return t.Arch.Family == sys.ARM64 +} + +func (t *Target) IsAMD64() bool { + return t.Arch.Family == sys.AMD64 +} + +func (t *Target) IsMIPS() bool { + return t.Arch.Family == sys.MIPS +} + +func (t *Target) IsMIPS64() bool { + return t.Arch.Family == sys.MIPS64 +} + +func (t *Target) IsLOONG64() bool { + return t.Arch.Family == sys.Loong64 +} + +func (t *Target) IsPPC64() bool { + return t.Arch.Family == sys.PPC64 +} + +func (t *Target) IsRISCV64() bool { + return t.Arch.Family == sys.RISCV64 +} + +func (t *Target) IsS390X() bool { + return t.Arch.Family == sys.S390X +} + +func (t *Target) IsWasm() bool { + return t.Arch.Family == sys.Wasm +} + +// +// OS Functions +// + +func (t *Target) IsLinux() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hlinux +} + +func (t *Target) IsDarwin() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hdarwin +} + +func (t *Target) IsWindows() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hwindows +} + +func (t *Target) IsPlan9() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hplan9 +} + +func (t *Target) IsAIX() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Haix +} + +func (t *Target) IsSolaris() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hsolaris +} + +func (t *Target) IsNetbsd() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hnetbsd +} + +func (t *Target) IsOpenbsd() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hopenbsd +} + +func (t *Target) IsFreebsd() bool { + t.mustSetHeadType() + return t.HeadType == objabi.Hfreebsd +} + +func (t *Target) mustSetHeadType() { + if t.HeadType == objabi.Hunknown { + panic("HeadType is not set") + } +} + +// +// MISC +// + +func (t *Target) IsBigEndian() bool { + return t.Arch.ByteOrder == binary.BigEndian +} + +func (t *Target) UsesLibc() bool { + t.mustSetHeadType() + switch t.HeadType { + case objabi.Haix, objabi.Hdarwin, objabi.Hopenbsd, objabi.Hsolaris, objabi.Hwindows: + // platforms where we use libc for syscalls. + return true + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/globalmap.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/globalmap.go new file mode 100644 index 0000000000000000000000000000000000000000..35672fe7a392487edf5b4975cbc30fd3a05ca867 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/globalmap.go @@ -0,0 +1,26 @@ +package main + +import "os" + +// Too small to trigger deadcode (currently) +var small = map[string]int{"foo": 1} + +// Has side effects, which prevent deadcode +var effect = map[string]int{"foo": os.Getpid()} + +// Large and side-effect free +var large = map[string]int{ + "11": 1, "12": 2, "13": 3, "14": 4, "15": 5, "16": 6, "17": 7, "18": 8, "19": 9, "110": 10, + "21": 1, "22": 2, "23": 3, "24": 4, "25": 5, "26": 6, "27": 7, "28": 8, "29": 9, "210": 10, + "31": 1, "32": 2, "33": 3, "34": 4, "35": 5, "36": 6, "37": 7, "38": 8, "39": 9, "310": 10, + "41": 1, "42": 2, "43": 3, "44": 4, "45": 5, "46": 6, "47": 7, "48": 8, "49": 9, "410": 10, + "51": 1, "52": 2, "53": 3, "54": 4, "55": 5, "56": 6, "57": 7, "58": 8, "59": 9, "510": 10, + "61": 1, "62": 2, "63": 3, "64": 4, "65": 5, "66": 6, "67": 7, "68": 8, "69": 9, "610": 10, + "71": 1, "72": 2, "73": 3, "74": 4, "75": 5, "76": 6, "77": 7, "78": 8, "79": 9, "710": 10, + "81": 1, "82": 2, "83": 3, "84": 4, "85": 5, "86": 6, "87": 7, "88": 8, "89": 9, "810": 10, + "91": 1, "92": 2, "93": 3, "94": 4, "95": 5, "96": 6, "97": 7, "98": 8, "99": 9, "910": 10, + "101": 1, "102": 2, "103": 3, "104": 4, "105": 5, "106": 6, "107": 7, "108": 8, "109": 9, "1010": 10, "1021": 2, +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go new file mode 100644 index 0000000000000000000000000000000000000000..32a24cf6f00c9823f3c3dbb21c065c46f68a61ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a method of a reachable type is not necessarily +// live even if it matches an interface method, as long as +// the type is never converted to an interface. + +package main + +type I interface{ M() } + +type T int + +func (T) M() { println("XXX") } + +var p *T +var e interface{} + +func main() { + p = new(T) // used T, but never converted to interface in any reachable code + e.(I).M() // used I and I.M +} + +func Unused() { // convert T to interface, but this function is not reachable + var i I = T(0) + i.M() +} + +var Unused2 interface{} = T(1) // convert T to interface, in an unreachable global initializer diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod2.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod2.go new file mode 100644 index 0000000000000000000000000000000000000000..48ba55d35e763078d16847ee540b4a437a050646 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod2.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a method *is* live if it matches an interface +// method and the type is "indirectly" converted to an +// interface through reflection. + +package main + +import "reflect" + +type I interface{ M() } + +type T int + +func (T) M() { println("XXX") } + +func main() { + e := reflect.ValueOf([]T{1}).Index(0).Interface() + e.(I).M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go new file mode 100644 index 0000000000000000000000000000000000000000..37c89374cbebf8c2685fbec59411a9aa9be4eb11 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Like ifacemethod2.go, this tests that a method *is* live +// if the type is "indirectly" converted to an interface +// using reflection with a method descriptor as intermediate. + +package main + +import "reflect" + +type S int + +func (s S) M() { println("S.M") } + +type I interface{ M() } + +type T float64 + +func (t T) F(s S) {} + +func main() { + var t T + ft := reflect.TypeOf(t).Method(0).Type + at := ft.In(1) + v := reflect.New(at).Elem() + v.Interface().(I).M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go new file mode 100644 index 0000000000000000000000000000000000000000..4af47ad1fa53eccb792aff52ad9e9ea6a46ce77a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a live type's method is not live even if +// it matches an interface method, as long as the interface +// method is not used. + +package main + +type T int + +//go:noinline +func (T) M() {} + +type I interface{ M() } + +var p *T +var pp *I + +func main() { + p = new(T) // use type T + pp = new(I) // use type I + *pp = *p // convert T to I, build itab +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go new file mode 100644 index 0000000000000000000000000000000000000000..2f0bdcc745a1e9034bd61d1092315856a0ad6929 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Like ifacemethod2.go, this tests that a method *is* live +// if the type is "indirectly" converted to an interface +// using reflection with a method descriptor as intermediate. +// However, it uses MethodByName() with a constant name of +// a method to look up. This does not disable the DCE like +// Method(0) does. + +package main + +import "reflect" + +type S int + +func (s S) M() { println("S.M") } + +type I interface{ M() } + +type T float64 + +func (t T) F(s S) {} + +func main() { + var t T + meth, _ := reflect.TypeOf(t).MethodByName("F") + ft := meth.Type + at := ft.In(1) + v := reflect.New(at).Elem() + v.Interface().(I).M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go new file mode 100644 index 0000000000000000000000000000000000000000..7eb9419ef192eb3d8ca6767b033a5d86685774fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test only uses MethodByName() with constant names +// of methods to look up. These methods need to be kept, +// but other methods must be eliminated. + +package main + +import "reflect" + +type S int + +func (s S) M() { println("S.M") } + +func (s S) N() { println("S.N") } + +type T float64 + +func (t T) F(s S) {} + +func main() { + var t T + meth, _ := reflect.TypeOf(t).MethodByName("F") + ft := meth.Type + at := ft.In(1) + v := reflect.New(at).Elem() + methV := v.MethodByName("M") + methV.Call([]reflect.Value{v}) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/reflectcall.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/reflectcall.go new file mode 100644 index 0000000000000000000000000000000000000000..af95e466e8e9aaa6431608ad8b7beb01efcf42b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/reflectcall.go @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This example uses reflect.Value.Call, but not +// reflect.{Value,Type}.Method. This should not +// need to bring all methods live. + +package main + +import "reflect" + +func f() { println("call") } + +type T int + +func (T) M() {} + +func main() { + v := reflect.ValueOf(f) + v.Call(nil) + i := interface{}(T(1)) + println(i) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go new file mode 100644 index 0000000000000000000000000000000000000000..bec5f25fc0a7d3f0bf4cbef6ea100adc2e346a71 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Methods of reflect.rtype use StructOf and FuncOf which in turn depend on +// reflect.Value.Method. StructOf and FuncOf must not disable the DCE. + +package main + +import "reflect" + +type S int + +func (s S) M() { println("S.M") } + +func (s S) N() { println("S.N") } + +type T float64 + +func (t T) F(s S) {} + +func useStructOf() { + t := reflect.StructOf([]reflect.StructField{ + { + Name: "X", + Type: reflect.TypeOf(int(0)), + }, + }) + println(t.Name()) +} + +func useFuncOf() { + t := reflect.FuncOf( + []reflect.Type{reflect.TypeOf(int(0))}, + []reflect.Type{reflect.TypeOf(int(0))}, + false, + ) + println(t.Name()) +} + +func main() { + useStructOf() + useFuncOf() + + var t T + meth, _ := reflect.TypeOf(t).MethodByName("F") + ft := meth.Type + at := ft.In(1) + v := reflect.New(at).Elem() + methV := v.MethodByName("M") + methV.Call([]reflect.Value{v}) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/typedesc.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/typedesc.go new file mode 100644 index 0000000000000000000000000000000000000000..82460935e80b85ad421cfdeb4947796475f858c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/deadcode/typedesc.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a live variable doesn't bring its type +// descriptor live. + +package main + +type T [10]string + +var t T + +func main() { + println(t[8]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/httptest/main/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/httptest/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..1bce30119afdcdbb094bf2acd471321c437b52a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/httptest/main/main.go @@ -0,0 +1,22 @@ +// A small test program that uses the net/http package. There is +// nothing special about net/http here, this is just a convenient way +// to pull in a lot of code. + +package main + +import ( + "net/http" + "net/http/httptest" +) + +type statusHandler int + +func (h *statusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(int(*h)) +} + +func main() { + status := statusHandler(http.StatusNotFound) + s := httptest.NewServer(&status) + defer s.Close() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.go new file mode 100644 index 0000000000000000000000000000000000000000..5e8c09749f128f2a2438c42151f7e07432afeb32 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func undefined() + +func defined1() int { + // To check multiple errors for a single symbol, + // reference undefined more than once. + undefined() + undefined() + return 0 +} + +func defined2() { + undefined() + undefined() +} + +func init() { + _ = defined1() + defined2() +} + +// The "main" function remains undeclared. diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.s b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.s new file mode 100644 index 0000000000000000000000000000000000000000..1d00e76c1dd31dc1067db7b9049613d1bd5e8e5d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue10978/main.s @@ -0,0 +1 @@ +// This file is needed to make "go build" work for package with external functions. diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/a/a.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..6032d76f49f89c9a4222e4f98ecc02a79cbdfb55 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/a/a.go @@ -0,0 +1,27 @@ +package a + +const Always = true + +var Count int + +type FuncReturningInt func() int + +var PointerToConstIf FuncReturningInt + +func ConstIf() int { + if Always { + return 1 + } + var imdead [4]int + imdead[Count] = 1 + return imdead[0] +} + +func CallConstIf() int { + Count += 3 + return ConstIf() +} + +func Another() { + defer func() { PointerToConstIf = ConstIf; Count += 1 }() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/main/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..7b5796d7144b09a1f872b74961bac062ccd264f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue25459/main/main.go @@ -0,0 +1,10 @@ +package main + +import "cmd/link/internal/ld/testdata/issue25459/a" + +var Glob int + +func main() { + a.Another() + Glob += a.ConstIf() + a.CallConstIf() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/b.dir/b.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/b.dir/b.go new file mode 100644 index 0000000000000000000000000000000000000000..ca577490bcbfb81e1700c88c994a683e6e8f58ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/b.dir/b.go @@ -0,0 +1,16 @@ +package b + +var q int + +func Top(x int) int { + q += 1 + if q != x { + return 3 + } + return 4 +} + +func OOO(x int) int { + defer func() { q += x & 7 }() + return Top(x + 1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/main/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..fdb1223d86ae86984af318763f7d35c066981b71 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue26237/main/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + + b "cmd/link/internal/ld/testdata/issue26237/b.dir" +) + +var skyx int + +func main() { + skyx += b.OOO(skyx) + if b.Top(1) == 99 { + fmt.Printf("Beware the Jabberwock, my son!\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/ObjC.m b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/ObjC.m new file mode 100644 index 0000000000000000000000000000000000000000..946278803e2257f3a31c20f8102dfdefa8350e72 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/ObjC.m @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#import +#import + +BOOL function(void) { +#if defined(MAC_OS_X_VERSION_MIN_REQUIRED) && (MAC_OS_X_VERSION_MIN_REQUIRED > 101300) + NSAppearance *darkAppearance; + if (@available(macOS 10.14, *)) { + darkAppearance = [NSAppearance appearanceNamed:NSAppearanceNameDarkAqua]; + } +#endif + return NO; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/lib.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/lib.go new file mode 100644 index 0000000000000000000000000000000000000000..514b9b9a4ab7f466120e4955bf40686f2eb2e19a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/lib/lib.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lib + +/* +#cgo darwin CFLAGS: -D__MAC_OS_X_VERSION_MAX_ALLOWED=101450 +#cgo darwin LDFLAGS: -framework Foundation -framework AppKit +#include "stdlib.h" +int function(void); +*/ +import "C" +import "fmt" + +func DoC() { + C.function() + fmt.Println("called c function") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/main/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..ed0838afabc7ae72a67fd8037e9b011d0081bc5d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue32233/main/main.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "cmd/link/internal/ld/testdata/issue32233/lib" + +func main() { + lib.DoC() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/main.go new file mode 100644 index 0000000000000000000000000000000000000000..3b7df60f1567015d7d5350fea797520ddecdf5ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/main.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func singleInstruction() + +func main() { + singleInstruction() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/oneline.s b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/oneline.s new file mode 100644 index 0000000000000000000000000000000000000000..d059e7113a150b80bedd10a547d1985712c4c418 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue38192/oneline.s @@ -0,0 +1,8 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·singleInstruction(SB),NOSPLIT,$0 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.go new file mode 100644 index 0000000000000000000000000000000000000000..97bc1cc4078565c371a9ebb68faf08740f21dead --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + _ "unsafe" +) + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" +//go:cgo_import_dynamic libc_kill kill "libc.so" +//go:cgo_import_dynamic libc_close close "libc.so" +//go:cgo_import_dynamic libc_open open "libc.so" + +//go:cgo_import_dynamic _ _ "libc.so" + +func trampoline() + +func main() { + trampoline() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.s b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.s new file mode 100644 index 0000000000000000000000000000000000000000..41a54b2e04384317fcd94725972582d541826e80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39256/x.s @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·trampoline(SB),0,$0 + CALL libc_getpid(SB) + CALL libc_kill(SB) + CALL libc_open(SB) + CALL libc_close(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39757/issue39757main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39757/issue39757main.go new file mode 100644 index 0000000000000000000000000000000000000000..76e0ea1b08ce1557d47ee19d55645b9707457cea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue39757/issue39757main.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var G int + +func main() { + if G != 101 { + println("not 101") + } else { + println("well now that's interesting") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue42484/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue42484/main.go new file mode 100644 index 0000000000000000000000000000000000000000..60fc110ffaa5158e653be75e74034bec10be0a63 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/issue42484/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" +) + +func main() { + a := 0 + a++ + b := 0 + f1(a, b) +} + +func f1(a, b int) { + fmt.Printf("%d %d\n", a, b) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.go new file mode 100644 index 0000000000000000000000000000000000000000..b708cc5e704d78febe13f8fcaadc6b546682cb8d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { asmMain() } + +func asmMain() + +func startSelf() + +func startChain() +func chain0() +func chain1() +func chain2() +func chainEnd() + +func startRec() +func startRec0() diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.s b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.s new file mode 100644 index 0000000000000000000000000000000000000000..623fdda7020a48e12a02decb3b4191b19dd1940d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/testdata/stackcheck/main.s @@ -0,0 +1,41 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#define NOSPLIT 7 +#define NOFRAME 512 + +TEXT ·asmMain(SB),0,$0-0 + CALL ·startSelf(SB) + CALL ·startChain(SB) + CALL ·startRec(SB) + RET + +// Test reporting of basic over-the-limit +TEXT ·startSelf(SB),NOSPLIT,$1000-0 + RET + +// Test reporting of multiple over-the-limit chains +TEXT ·startChain(SB),NOSPLIT,$16-0 + CALL ·chain0(SB) + CALL ·chain1(SB) + CALL ·chain2(SB) + RET +TEXT ·chain0(SB),NOSPLIT,$32-0 + CALL ·chainEnd(SB) + RET +TEXT ·chain1(SB),NOSPLIT,$48-0 // Doesn't go over + RET +TEXT ·chain2(SB),NOSPLIT,$64-0 + CALL ·chainEnd(SB) + RET +TEXT ·chainEnd(SB),NOSPLIT,$1000-0 // Should be reported twice + RET + +// Test reporting of rootless recursion +TEXT ·startRec(SB),NOSPLIT|NOFRAME,$0-0 + CALL ·startRec0(SB) + RET +TEXT ·startRec0(SB),NOSPLIT|NOFRAME,$0-0 + CALL ·startRec(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/typelink.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/typelink.go new file mode 100644 index 0000000000000000000000000000000000000000..5eca6e01817e185bdb870471cc8ef6857164db4f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/typelink.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "sort" +) + +type byTypeStr []typelinkSortKey + +type typelinkSortKey struct { + TypeStr string + Type loader.Sym +} + +func (s byTypeStr) Less(i, j int) bool { return s[i].TypeStr < s[j].TypeStr } +func (s byTypeStr) Len() int { return len(s) } +func (s byTypeStr) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// typelink generates the typelink table which is used by reflect.typelinks(). +// Types that should be added to the typelinks table are marked with the +// MakeTypelink attribute by the compiler. +func (ctxt *Link) typelink() { + ldr := ctxt.loader + typelinks := byTypeStr{} + var itabs []loader.Sym + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + if ldr.IsTypelink(s) { + typelinks = append(typelinks, typelinkSortKey{decodetypeStr(ldr, ctxt.Arch, s), s}) + } else if ldr.IsItab(s) { + itabs = append(itabs, s) + } + } + sort.Sort(typelinks) + + tl := ldr.CreateSymForUpdate("runtime.typelink", 0) + tl.SetType(sym.STYPELINK) + ldr.SetAttrLocal(tl.Sym(), true) + tl.SetSize(int64(4 * len(typelinks))) + tl.Grow(tl.Size()) + relocs := tl.AddRelocs(len(typelinks)) + for i, s := range typelinks { + r := relocs.At(i) + r.SetSym(s.Type) + r.SetOff(int32(i * 4)) + r.SetSiz(4) + r.SetType(objabi.R_ADDROFF) + } + + ptrsize := ctxt.Arch.PtrSize + il := ldr.CreateSymForUpdate("runtime.itablink", 0) + il.SetType(sym.SITABLINK) + ldr.SetAttrLocal(il.Sym(), true) + il.SetSize(int64(ptrsize * len(itabs))) + il.Grow(il.Size()) + relocs = il.AddRelocs(len(itabs)) + for i, s := range itabs { + r := relocs.At(i) + r.SetSym(s) + r.SetOff(int32(i * ptrsize)) + r.SetSiz(uint8(ptrsize)) + r.SetType(objabi.R_ADDR) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/util.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/util.go new file mode 100644 index 0000000000000000000000000000000000000000..63b7e0d3299da11b14e6a32a36a2466fb3390558 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/util.go @@ -0,0 +1,117 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "cmd/link/internal/loader" + "encoding/binary" + "fmt" + "os" +) + +var atExitFuncs []func() + +func AtExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +// runAtExitFuncs runs the queued set of AtExit functions. +func runAtExitFuncs() { + for i := len(atExitFuncs) - 1; i >= 0; i-- { + atExitFuncs[i]() + } + atExitFuncs = nil +} + +// Exit exits with code after executing all atExitFuncs. +func Exit(code int) { + runAtExitFuncs() + os.Exit(code) +} + +// Exitf logs an error message then calls Exit(2). +func Exitf(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...) + nerrors++ + if *flagH { + panic("error") + } + Exit(2) +} + +// afterErrorAction updates 'nerrors' on error and invokes exit or +// panics in the proper circumstances. +func afterErrorAction() { + nerrors++ + if *flagH { + panic("error") + } + if nerrors > 20 { + Exitf("too many errors") + } +} + +// Errorf logs an error message. +// +// If more than 20 errors have been printed, exit with an error. +// +// Logging an error means that on exit cmd/link will delete any +// output file and return a non-zero error code. +// +// TODO: remove. Use ctxt.Errorf instead. +// All remaining calls use nil as first arg. +func Errorf(dummy *int, format string, args ...interface{}) { + format += "\n" + fmt.Fprintf(os.Stderr, format, args...) + afterErrorAction() +} + +// Errorf method logs an error message. +// +// If more than 20 errors have been printed, exit with an error. +// +// Logging an error means that on exit cmd/link will delete any +// output file and return a non-zero error code. +func (ctxt *Link) Errorf(s loader.Sym, format string, args ...interface{}) { + if ctxt.loader != nil { + ctxt.loader.Errorf(s, format, args...) + return + } + // Note: this is not expected to happen very often. + format = fmt.Sprintf("sym %d: %s", s, format) + format += "\n" + fmt.Fprintf(os.Stderr, format, args...) + afterErrorAction() +} + +func artrim(x []byte) string { + i := 0 + j := len(x) + for i < len(x) && x[i] == ' ' { + i++ + } + for j > i && x[j-1] == ' ' { + j-- + } + return string(x[i:j]) +} + +func stringtouint32(x []uint32, s string) { + for i := 0; len(s) > 0; i++ { + var buf [4]byte + s = s[copy(buf[:], s):] + x[i] = binary.LittleEndian.Uint32(buf[:]) + } +} + +// contains reports whether v is in s. +func contains(s []string, v string) bool { + for _, x := range s { + if x == v { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/xcoff.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/xcoff.go new file mode 100644 index 0000000000000000000000000000000000000000..d915ab393b541b5afed94caeddd9ab73230e4339 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ld/xcoff.go @@ -0,0 +1,1809 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/bits" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "cmd/internal/objabi" + "cmd/link/internal/loader" + "cmd/link/internal/sym" +) + +// This file handles all algorithms related to XCOFF files generation. +// Most of them are adaptations of the ones in cmd/link/internal/pe.go +// as PE and XCOFF are based on COFF files. +// XCOFF files generated are 64 bits. + +const ( + // Total amount of space to reserve at the start of the file + // for File Header, Auxiliary Header, and Section Headers. + // May waste some. + XCOFFHDRRESERVE = FILHSZ_64 + AOUTHSZ_EXEC64 + SCNHSZ_64*23 + + // base on dump -o, then rounded from 32B to 64B to + // match worst case elf text section alignment on ppc64. + XCOFFSECTALIGN int64 = 64 + + // XCOFF binaries should normally have all its sections position-independent. + // However, this is not yet possible for .text because of some R_ADDR relocations + // inside RODATA symbols. + // .data and .bss are position-independent so their address start inside an unreachable + // segment during execution to force segfault if something is wrong. + XCOFFTEXTBASE = 0x100000000 // Start of text address + XCOFFDATABASE = 0x200000000 // Start of data address +) + +// File Header +type XcoffFileHdr64 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint64 // Byte offset to symbol table start + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags + Fnsyms int32 // Number of entries in symbol table +} + +const ( + U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF +) + +// Flags that describe the type of the object file. +const ( + F_RELFLG = 0x0001 + F_EXEC = 0x0002 + F_LNNO = 0x0004 + F_FDPR_PROF = 0x0010 + F_FDPR_OPTI = 0x0020 + F_DSA = 0x0040 + F_VARPG = 0x0100 + F_DYNLOAD = 0x1000 + F_SHROBJ = 0x2000 + F_LOADONLY = 0x4000 +) + +// Auxiliary Header +type XcoffAoutHdr64 struct { + Omagic int16 // Flags - Ignored If Vstamp Is 1 + Ovstamp int16 // Version + Odebugger uint32 // Reserved For Debugger + Otextstart uint64 // Virtual Address Of Text + Odatastart uint64 // Virtual Address Of Data + Otoc uint64 // Toc Address + Osnentry int16 // Section Number For Entry Point + Osntext int16 // Section Number For Text + Osndata int16 // Section Number For Data + Osntoc int16 // Section Number For Toc + Osnloader int16 // Section Number For Loader + Osnbss int16 // Section Number For Bss + Oalgntext int16 // Max Text Alignment + Oalgndata int16 // Max Data Alignment + Omodtype [2]byte // Module Type Field + Ocpuflag uint8 // Bit Flags - Cputypes Of Objects + Ocputype uint8 // Reserved for CPU type + Otextpsize uint8 // Requested text page size + Odatapsize uint8 // Requested data page size + Ostackpsize uint8 // Requested stack page size + Oflags uint8 // Flags And TLS Alignment + Otsize uint64 // Text Size In Bytes + Odsize uint64 // Data Size In Bytes + Obsize uint64 // Bss Size In Bytes + Oentry uint64 // Entry Point Address + Omaxstack uint64 // Max Stack Size Allowed + Omaxdata uint64 // Max Data Size Allowed + Osntdata int16 // Section Number For Tdata Section + Osntbss int16 // Section Number For Tbss Section + Ox64flags uint16 // Additional Flags For 64-Bit Objects + Oresv3a int16 // Reserved + Oresv3 [2]int32 // Reserved +} + +// Section Header +type XcoffScnHdr64 struct { + Sname [8]byte // Section Name + Spaddr uint64 // Physical Address + Svaddr uint64 // Virtual Address + Ssize uint64 // Section Size + Sscnptr uint64 // File Offset To Raw Data + Srelptr uint64 // File Offset To Relocation + Slnnoptr uint64 // File Offset To Line Numbers + Snreloc uint32 // Number Of Relocation Entries + Snlnno uint32 // Number Of Line Number Entries + Sflags uint32 // flags +} + +// Flags defining the section type. +const ( + STYP_DWARF = 0x0010 + STYP_TEXT = 0x0020 + STYP_DATA = 0x0040 + STYP_BSS = 0x0080 + STYP_EXCEPT = 0x0100 + STYP_INFO = 0x0200 + STYP_TDATA = 0x0400 + STYP_TBSS = 0x0800 + STYP_LOADER = 0x1000 + STYP_DEBUG = 0x2000 + STYP_TYPCHK = 0x4000 + STYP_OVRFLO = 0x8000 +) +const ( + SSUBTYP_DWINFO = 0x10000 // DWARF info section + SSUBTYP_DWLINE = 0x20000 // DWARF line-number section + SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section + SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section + SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section + SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section + SSUBTYP_DWSTR = 0x70000 // DWARF strings section + SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section + SSUBTYP_DWLOC = 0x90000 // DWARF location lists section + SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section + SSUBTYP_DWMAC = 0xB0000 // DWARF macros section +) + +// Headers size +const ( + FILHSZ_32 = 20 + FILHSZ_64 = 24 + AOUTHSZ_EXEC32 = 72 + AOUTHSZ_EXEC64 = 120 + SCNHSZ_32 = 40 + SCNHSZ_64 = 72 + LDHDRSZ_32 = 32 + LDHDRSZ_64 = 56 + LDSYMSZ_64 = 24 + RELSZ_64 = 14 +) + +// Type representing all XCOFF symbols. +type xcoffSym interface { +} + +// Symbol Table Entry +type XcoffSymEnt64 struct { + Nvalue uint64 // Symbol value + Noffset uint32 // Offset of the name in string table or .debug section + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass uint8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +const SYMESZ = 18 + +const ( + // Nscnum + N_DEBUG = -2 + N_ABS = -1 + N_UNDEF = 0 + + //Ntype + SYM_V_INTERNAL = 0x1000 + SYM_V_HIDDEN = 0x2000 + SYM_V_PROTECTED = 0x3000 + SYM_V_EXPORTED = 0x4000 + SYM_TYPE_FUNC = 0x0020 // is function +) + +// Storage Class. +const ( + C_NULL = 0 // Symbol table entry marked for deletion + C_EXT = 2 // External symbol + C_STAT = 3 // Static symbol + C_BLOCK = 100 // Beginning or end of inner block + C_FCN = 101 // Beginning or end of function + C_FILE = 103 // Source file name and compiler information + C_HIDEXT = 107 // Unnamed external symbol + C_BINCL = 108 // Beginning of include file + C_EINCL = 109 // End of include file + C_WEAKEXT = 111 // Weak external symbol + C_DWARF = 112 // DWARF symbol + C_GSYM = 128 // Global variable + C_LSYM = 129 // Automatic variable allocated on stack + C_PSYM = 130 // Argument to subroutine allocated on stack + C_RSYM = 131 // Register variable + C_RPSYM = 132 // Argument to function or procedure stored in register + C_STSYM = 133 // Statically allocated symbol + C_BCOMM = 135 // Beginning of common block + C_ECOML = 136 // Local member of common block + C_ECOMM = 137 // End of common block + C_DECL = 140 // Declaration of object + C_ENTRY = 141 // Alternate entry + C_FUN = 142 // Function or procedure + C_BSTAT = 143 // Beginning of static block + C_ESTAT = 144 // End of static block + C_GTLS = 145 // Global thread-local variable + C_STTLS = 146 // Static thread-local variable +) + +// File Auxiliary Entry +type XcoffAuxFile64 struct { + Xzeroes uint32 // The name is always in the string table + Xoffset uint32 // Offset in the string table + X_pad1 [6]byte + Xftype uint8 // Source file string type + X_pad2 [2]byte + Xauxtype uint8 // Type of auxiliary entry +} + +// Function Auxiliary Entry +type XcoffAuxFcn64 struct { + Xlnnoptr uint64 // File pointer to line number + Xfsize uint32 // Size of function in bytes + Xendndx uint32 // Symbol table index of next entry + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// csect Auxiliary Entry. +type XcoffAuxCSect64 struct { + Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xscnlenhi uint32 // Upper 4 bytes of length or symbol table index + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// DWARF Auxiliary Entry +type XcoffAuxDWARF64 struct { + Xscnlen uint64 // Length of this symbol section + X_pad [9]byte + Xauxtype uint8 // Type of auxiliary entry +} + +// Auxiliary type +const ( + _AUX_EXCEPT = 255 + _AUX_FCN = 254 + _AUX_SYM = 253 + _AUX_FILE = 252 + _AUX_CSECT = 251 + _AUX_SECT = 250 +) + +// Xftype field +const ( + XFT_FN = 0 // Source File Name + XFT_CT = 1 // Compile Time Stamp + XFT_CV = 2 // Compiler Version Number + XFT_CD = 128 // Compiler Defined Information/ + +) + +// Symbol type field. +const ( + XTY_ER = 0 // External reference + XTY_SD = 1 // Section definition + XTY_LD = 2 // Label definition + XTY_CM = 3 // Common csect definition + XTY_WK = 0x8 // Weak symbol + XTY_EXP = 0x10 // Exported symbol + XTY_ENT = 0x20 // Entry point symbol + XTY_IMP = 0x40 // Imported symbol +) + +// Storage-mapping class. +const ( + XMC_PR = 0 // Program code + XMC_RO = 1 // Read-only constant + XMC_DB = 2 // Debug dictionary table + XMC_TC = 3 // TOC entry + XMC_UA = 4 // Unclassified + XMC_RW = 5 // Read/Write data + XMC_GL = 6 // Global linkage + XMC_XO = 7 // Extended operation + XMC_SV = 8 // 32-bit supervisor call descriptor + XMC_BS = 9 // BSS class + XMC_DS = 10 // Function descriptor + XMC_UC = 11 // Unnamed FORTRAN common + XMC_TC0 = 15 // TOC anchor + XMC_TD = 16 // Scalar data entry in the TOC + XMC_SV64 = 17 // 64-bit supervisor call descriptor + XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit + XMC_TL = 20 // Read/Write thread-local data + XMC_UL = 21 // Read/Write thread-local data (.tbss) + XMC_TE = 22 // TOC entry +) + +// Loader Header +type XcoffLdHdr64 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Lstlen uint32 // Length of string table + Limpoff uint64 // Offset to start of import file IDs + Lstoff uint64 // Offset to start of string table + Lsymoff uint64 // Offset to start of symbol table + Lrldoff uint64 // Offset to start of relocation entries +} + +// Loader Symbol +type XcoffLdSym64 struct { + Lvalue uint64 // Address field + Loffset uint32 // Byte offset into string table of symbol name + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type xcoffLoaderSymbol struct { + sym loader.Sym + smtype int8 + smclas int8 +} + +type XcoffLdImportFile64 struct { + Limpidpath string + Limpidbase string + Limpidmem string +} + +type XcoffLdRel64 struct { + Lvaddr uint64 // Address Field + Lrtype uint16 // Relocation Size and Type + Lrsecnm int16 // Section Number being relocated + Lsymndx int32 // Loader-Section symbol table index +} + +// xcoffLoaderReloc holds information about a relocation made by the loader. +type xcoffLoaderReloc struct { + sym loader.Sym + roff int32 + rtype uint16 + symndx int32 +} + +const ( + XCOFF_R_POS = 0x00 // A(sym) Positive Relocation + XCOFF_R_NEG = 0x01 // -A(sym) Negative Relocation + XCOFF_R_REL = 0x02 // A(sym-*) Relative to self + XCOFF_R_TOC = 0x03 // A(sym-TOC) Relative to TOC + XCOFF_R_TRL = 0x12 // A(sym-TOC) TOC Relative indirect load. + + XCOFF_R_TRLA = 0x13 // A(sym-TOC) TOC Rel load address. modifiable inst + XCOFF_R_GL = 0x05 // A(external TOC of sym) Global Linkage + XCOFF_R_TCL = 0x06 // A(local TOC of sym) Local object TOC address + XCOFF_R_RL = 0x0C // A(sym) Pos indirect load. modifiable instruction + XCOFF_R_RLA = 0x0D // A(sym) Pos Load Address. modifiable instruction + XCOFF_R_REF = 0x0F // AL0(sym) Non relocating ref. No garbage collect + XCOFF_R_BA = 0x08 // A(sym) Branch absolute. Cannot modify instruction + XCOFF_R_RBA = 0x18 // A(sym) Branch absolute. modifiable instruction + XCOFF_R_BR = 0x0A // A(sym-*) Branch rel to self. non modifiable + XCOFF_R_RBR = 0x1A // A(sym-*) Branch rel to self. modifiable instr + + XCOFF_R_TLS = 0x20 // General-dynamic reference to TLS symbol + XCOFF_R_TLS_IE = 0x21 // Initial-exec reference to TLS symbol + XCOFF_R_TLS_LD = 0x22 // Local-dynamic reference to TLS symbol + XCOFF_R_TLS_LE = 0x23 // Local-exec reference to TLS symbol + XCOFF_R_TLSM = 0x24 // Module reference to TLS symbol + XCOFF_R_TLSML = 0x25 // Module reference to local (own) module + + XCOFF_R_TOCU = 0x30 // Relative to TOC - high order bits + XCOFF_R_TOCL = 0x31 // Relative to TOC - low order bits +) + +type XcoffLdStr64 struct { + size uint16 + name string +} + +// xcoffFile is used to build XCOFF file. +type xcoffFile struct { + xfhdr XcoffFileHdr64 + xahdr XcoffAoutHdr64 + sections []*XcoffScnHdr64 + sectText *XcoffScnHdr64 + sectData *XcoffScnHdr64 + sectBss *XcoffScnHdr64 + stringTable xcoffStringTable + sectNameToScnum map[string]int16 + loaderSize uint64 + symtabOffset int64 // offset to the start of symbol table + symbolCount uint32 // number of symbol table records written + symtabSym []xcoffSym // XCOFF symbols for the symbol table + dynLibraries map[string]int // Dynamic libraries in .loader section. The integer represents its import file number (- 1) + loaderSymbols []*xcoffLoaderSymbol // symbols inside .loader symbol table + loaderReloc []*xcoffLoaderReloc // Reloc that must be made inside loader + sync.Mutex // currently protect loaderReloc +} + +// Var used by XCOFF Generation algorithms +var ( + xfile xcoffFile +) + +// xcoffStringTable is a XCOFF string table. +type xcoffStringTable struct { + strings []string + stringsLen int +} + +// size returns size of string table t. +func (t *xcoffStringTable) size() int { + // string table starts with 4-byte length at the beginning + return t.stringsLen + 4 +} + +// add adds string str to string table t. +func (t *xcoffStringTable) add(str string) int { + off := t.size() + t.strings = append(t.strings, str) + t.stringsLen += len(str) + 1 // each string will have 0 appended to it + return off +} + +// write writes string table t into the output file. +func (t *xcoffStringTable) write(out *OutBuf) { + out.Write32(uint32(t.size())) + for _, s := range t.strings { + out.WriteString(s) + out.Write8(0) + } +} + +// write writes XCOFF section sect into the output file. +func (sect *XcoffScnHdr64) write(ctxt *Link) { + binary.Write(ctxt.Out, binary.BigEndian, sect) + ctxt.Out.Write32(0) // Add 4 empty bytes at the end to match alignment +} + +// addSection adds section to the XCOFF file f. +func (f *xcoffFile) addSection(name string, addr uint64, size uint64, fileoff uint64, flags uint32) *XcoffScnHdr64 { + sect := &XcoffScnHdr64{ + Spaddr: addr, + Svaddr: addr, + Ssize: size, + Sscnptr: fileoff, + Sflags: flags, + } + copy(sect.Sname[:], name) // copy string to [8]byte + f.sections = append(f.sections, sect) + f.sectNameToScnum[name] = int16(len(f.sections)) + return sect +} + +// addDwarfSection adds a dwarf section to the XCOFF file f. +// This function is similar to addSection, but Dwarf section names +// must be modified to conventional names and they are various subtypes. +func (f *xcoffFile) addDwarfSection(s *sym.Section) *XcoffScnHdr64 { + newName, subtype := xcoffGetDwarfSubtype(s.Name) + return f.addSection(newName, 0, s.Length, s.Seg.Fileoff+s.Vaddr-s.Seg.Vaddr, STYP_DWARF|subtype) +} + +// xcoffGetDwarfSubtype returns the XCOFF name of the DWARF section str +// and its subtype constant. +func xcoffGetDwarfSubtype(str string) (string, uint32) { + switch str { + default: + Exitf("unknown DWARF section name for XCOFF: %s", str) + case ".debug_abbrev": + return ".dwabrev", SSUBTYP_DWABREV + case ".debug_info": + return ".dwinfo", SSUBTYP_DWINFO + case ".debug_frame": + return ".dwframe", SSUBTYP_DWFRAME + case ".debug_line": + return ".dwline", SSUBTYP_DWLINE + case ".debug_loc": + return ".dwloc", SSUBTYP_DWLOC + case ".debug_pubnames": + return ".dwpbnms", SSUBTYP_DWPBNMS + case ".debug_pubtypes": + return ".dwpbtyp", SSUBTYP_DWPBTYP + case ".debug_ranges": + return ".dwrnges", SSUBTYP_DWRNGES + } + // never used + return "", 0 +} + +// getXCOFFscnum returns the XCOFF section number of a Go section. +func (f *xcoffFile) getXCOFFscnum(sect *sym.Section) int16 { + switch sect.Seg { + case &Segtext: + return f.sectNameToScnum[".text"] + case &Segdata: + if sect.Name == ".noptrbss" || sect.Name == ".bss" { + return f.sectNameToScnum[".bss"] + } + if sect.Name == ".tbss" { + return f.sectNameToScnum[".tbss"] + } + return f.sectNameToScnum[".data"] + case &Segdwarf: + name, _ := xcoffGetDwarfSubtype(sect.Name) + return f.sectNameToScnum[name] + case &Segrelrodata: + return f.sectNameToScnum[".data"] + } + Errorf(nil, "getXCOFFscnum not implemented for section %s", sect.Name) + return -1 +} + +// Xcoffinit initialised some internal value and setups +// already known header information. +func Xcoffinit(ctxt *Link) { + xfile.dynLibraries = make(map[string]int) + + HEADR = int32(Rnd(XCOFFHDRRESERVE, XCOFFSECTALIGN)) + if *FlagRound != -1 { + Errorf(nil, "-R not available on AIX") + } + *FlagRound = XCOFFSECTALIGN + if *FlagTextAddr != -1 { + Errorf(nil, "-T not available on AIX") + } + *FlagTextAddr = Rnd(XCOFFTEXTBASE, *FlagRound) + int64(HEADR) +} + +// SYMBOL TABLE + +// type records C_FILE information needed for genasmsym in XCOFF. +type xcoffSymSrcFile struct { + name string + file *XcoffSymEnt64 // Symbol of this C_FILE + csectAux *XcoffAuxCSect64 // Symbol for the current .csect + csectSymNb uint64 // Symbol number for the current .csect + csectVAStart int64 + csectVAEnd int64 +} + +var ( + currDwscnoff = make(map[string]uint64) // Needed to create C_DWARF symbols + currSymSrcFile xcoffSymSrcFile + outerSymSize = make(map[string]int64) +) + +// xcoffUpdateOuterSize stores the size of outer symbols in order to have it +// in the symbol table. +func xcoffUpdateOuterSize(ctxt *Link, size int64, stype sym.SymKind) { + if size == 0 { + return + } + // TODO: use CarrierSymByType + + ldr := ctxt.loader + switch stype { + default: + Errorf(nil, "unknown XCOFF outer symbol for type %s", stype.String()) + case sym.SRODATA, sym.SRODATARELRO, sym.SFUNCTAB, sym.SSTRING: + // Nothing to do + case sym.STYPERELRO: + if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) { + // runtime.types size must be removed, as it's a real symbol. + tsize := ldr.SymSize(ldr.Lookup("runtime.types", 0)) + outerSymSize["typerel.*"] = size - tsize + return + } + fallthrough + case sym.STYPE: + if !ctxt.DynlinkingGo() { + // runtime.types size must be removed, as it's a real symbol. + tsize := ldr.SymSize(ldr.Lookup("runtime.types", 0)) + outerSymSize["type:*"] = size - tsize + } + case sym.SGOSTRING: + outerSymSize["go:string.*"] = size + case sym.SGOFUNC: + if !ctxt.DynlinkingGo() { + outerSymSize["go:func.*"] = size + } + case sym.SGOFUNCRELRO: + outerSymSize["go:funcrel.*"] = size + case sym.SGCBITS: + outerSymSize["runtime.gcbits.*"] = size + case sym.SPCLNTAB: + outerSymSize["runtime.pclntab"] = size + } +} + +// addSymbol writes a symbol or an auxiliary symbol entry on ctxt.out. +func (f *xcoffFile) addSymbol(sym xcoffSym) { + f.symtabSym = append(f.symtabSym, sym) + f.symbolCount++ +} + +// xcoffAlign returns the log base 2 of the symbol's alignment. +func xcoffAlign(ldr *loader.Loader, x loader.Sym, t SymbolType) uint8 { + align := ldr.SymAlign(x) + if align == 0 { + if t == TextSym { + align = int32(Funcalign) + } else { + align = symalign(ldr, x) + } + } + return logBase2(int(align)) +} + +// logBase2 returns the log in base 2 of a. +func logBase2(a int) uint8 { + return uint8(bits.Len(uint(a)) - 1) +} + +// Write symbols needed when a new file appeared: +// - a C_FILE with one auxiliary entry for its name +// - C_DWARF symbols to provide debug information +// - a C_HIDEXT which will be a csect containing all of its functions +// It needs several parameters to create .csect symbols such as its entry point and its section number. +// +// Currently, a new file is in fact a new package. It seems to be OK, but it might change +// in the future. +func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint64, extnum int16) { + ldr := ctxt.loader + /* C_FILE */ + s := &XcoffSymEnt64{ + Noffset: uint32(f.stringTable.add(".file")), + Nsclass: C_FILE, + Nscnum: N_DEBUG, + Ntype: 0, // Go isn't inside predefined language. + Nnumaux: 1, + } + f.addSymbol(s) + currSymSrcFile.file = s + + // Auxiliary entry for file name. + auxf := &XcoffAuxFile64{ + Xoffset: uint32(f.stringTable.add(name)), + Xftype: XFT_FN, + Xauxtype: _AUX_FILE, + } + f.addSymbol(auxf) + + /* Dwarf */ + for _, sect := range Segdwarf.Sections { + var dwsize uint64 + if ctxt.LinkMode == LinkInternal { + // Find the size of this corresponding package DWARF compilation unit. + // This size is set during DWARF generation (see dwarf.go). + dwsize = getDwsectCUSize(sect.Name, name) + // .debug_abbrev is common to all packages and not found with the previous function + if sect.Name == ".debug_abbrev" { + dwsize = uint64(ldr.SymSize(loader.Sym(sect.Sym))) + + } + } else { + // There is only one .FILE with external linking. + dwsize = sect.Length + } + + // get XCOFF name + name, _ := xcoffGetDwarfSubtype(sect.Name) + s := &XcoffSymEnt64{ + Nvalue: currDwscnoff[sect.Name], + Noffset: uint32(f.stringTable.add(name)), + Nsclass: C_DWARF, + Nscnum: f.getXCOFFscnum(sect), + Nnumaux: 1, + } + + if currSymSrcFile.csectAux == nil { + // Dwarf relocations need the symbol number of .dw* symbols. + // It doesn't need to know it for each package, one is enough. + // currSymSrcFile.csectAux == nil means first package. + ldr.SetSymDynid(loader.Sym(sect.Sym), int32(f.symbolCount)) + + if sect.Name == ".debug_frame" && ctxt.LinkMode != LinkExternal { + // CIE size must be added to the first package. + dwsize += 48 + } + } + + f.addSymbol(s) + + // update the DWARF section offset in this file + if sect.Name != ".debug_abbrev" { + currDwscnoff[sect.Name] += dwsize + } + + // Auxiliary dwarf section + auxd := &XcoffAuxDWARF64{ + Xscnlen: dwsize, + Xauxtype: _AUX_SECT, + } + + f.addSymbol(auxd) + } + + /* .csect */ + // Check if extnum is in text. + // This is temporary and only here to check if this algorithm is correct. + if extnum != 1 { + Exitf("XCOFF symtab: A new file was detected with its first symbol not in .text") + } + + currSymSrcFile.csectSymNb = uint64(f.symbolCount) + + // No offset because no name + s = &XcoffSymEnt64{ + Nvalue: firstEntry, + Nscnum: extnum, + Nsclass: C_HIDEXT, + Ntype: 0, // check visibility ? + Nnumaux: 1, + } + f.addSymbol(s) + + aux := &XcoffAuxCSect64{ + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD | logBase2(Funcalign)<<3, + Xauxtype: _AUX_CSECT, + } + f.addSymbol(aux) + + currSymSrcFile.csectAux = aux + currSymSrcFile.csectVAStart = int64(firstEntry) + currSymSrcFile.csectVAEnd = int64(firstEntry) +} + +// Update values for the previous package. +// - Svalue of the C_FILE symbol: if it is the last one, this Svalue must be -1 +// - Xsclen of the csect symbol. +func (f *xcoffFile) updatePreviousFile(ctxt *Link, last bool) { + // first file + if currSymSrcFile.file == nil { + return + } + + // Update C_FILE + cfile := currSymSrcFile.file + if last { + cfile.Nvalue = 0xFFFFFFFFFFFFFFFF + } else { + cfile.Nvalue = uint64(f.symbolCount) + } + + // update csect scnlen in this auxiliary entry + aux := currSymSrcFile.csectAux + csectSize := currSymSrcFile.csectVAEnd - currSymSrcFile.csectVAStart + aux.Xscnlenlo = uint32(csectSize & 0xFFFFFFFF) + aux.Xscnlenhi = uint32(csectSize >> 32) +} + +// Write symbol representing a .text function. +// The symbol table is split with C_FILE corresponding to each package +// and not to each source file as it should be. +func (f *xcoffFile) writeSymbolFunc(ctxt *Link, x loader.Sym) []xcoffSym { + // New XCOFF symbols which will be written. + syms := []xcoffSym{} + + // Check if a new file is detected. + ldr := ctxt.loader + name := ldr.SymName(x) + if strings.Contains(name, "-tramp") || strings.HasPrefix(name, "runtime.text.") { + // Trampoline don't have a FILE so there are considered + // in the current file. + // Same goes for runtime.text.X symbols. + } else if ldr.SymPkg(x) == "" { // Undefined global symbol + // If this happens, the algorithm must be redone. + if currSymSrcFile.name != "" { + Exitf("undefined global symbol found inside another file") + } + } else { + // Current file has changed. New C_FILE, C_DWARF, etc must be generated. + if currSymSrcFile.name != ldr.SymPkg(x) { + if ctxt.LinkMode == LinkInternal { + // update previous file values + xfile.updatePreviousFile(ctxt, false) + currSymSrcFile.name = ldr.SymPkg(x) + f.writeSymbolNewFile(ctxt, ldr.SymPkg(x), uint64(ldr.SymValue(x)), xfile.getXCOFFscnum(ldr.SymSect(x))) + } else { + // With external linking, ld will crash if there is several + // .FILE and DWARF debugging enable, somewhere during + // the relocation phase. + // Therefore, all packages are merged under a fake .FILE + // "go_functions". + // TODO(aix); remove once ld has been fixed or the triggering + // relocation has been found and fixed. + if currSymSrcFile.name == "" { + currSymSrcFile.name = ldr.SymPkg(x) + f.writeSymbolNewFile(ctxt, "go_functions", uint64(ldr.SymValue(x)), xfile.getXCOFFscnum(ldr.SymSect(x))) + } + } + + } + } + + name = ldr.SymExtname(x) + name = mangleABIName(ctxt, ldr, x, name) + + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(name)), + Nvalue: uint64(ldr.SymValue(x)), + Nscnum: f.getXCOFFscnum(ldr.SymSect(x)), + Ntype: SYM_TYPE_FUNC, + Nnumaux: 2, + } + + if ldr.IsFileLocal(x) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + s.Nsclass = C_HIDEXT + } + + ldr.SetSymDynid(x, int32(xfile.symbolCount)) + syms = append(syms, s) + + // Keep track of the section size by tracking the VA range. Individual + // alignment differences may introduce a few extra bytes of padding + // which are not fully accounted for by ldr.SymSize(x). + sv := ldr.SymValue(x) + ldr.SymSize(x) + if currSymSrcFile.csectVAEnd < sv { + currSymSrcFile.csectVAEnd = sv + } + + // create auxiliary entries + a2 := &XcoffAuxFcn64{ + Xfsize: uint32(ldr.SymSize(x)), + Xlnnoptr: 0, // TODO + Xendndx: xfile.symbolCount + 3, // this symbol + 2 aux entries + Xauxtype: _AUX_FCN, + } + syms = append(syms, a2) + + a4 := &XcoffAuxCSect64{ + Xscnlenlo: uint32(currSymSrcFile.csectSymNb & 0xFFFFFFFF), + Xscnlenhi: uint32(currSymSrcFile.csectSymNb >> 32), + Xsmclas: XMC_PR, // Program Code + Xsmtyp: XTY_LD, // label definition (based on C) + Xauxtype: _AUX_CSECT, + } + a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3) + + syms = append(syms, a4) + return syms +} + +// put function used by genasmsym to write symbol table. +func putaixsym(ctxt *Link, x loader.Sym, t SymbolType) { + // All XCOFF symbols generated by this GO symbols + // Can be a symbol entry or an auxiliary entry + syms := []xcoffSym{} + + ldr := ctxt.loader + name := ldr.SymName(x) + if t == UndefinedSym { + name = ldr.SymExtname(x) + } + + switch t { + default: + return + + case TextSym: + if ldr.SymPkg(x) != "" || strings.Contains(name, "-tramp") || strings.HasPrefix(name, "runtime.text.") { + // Function within a file + syms = xfile.writeSymbolFunc(ctxt, x) + } else { + // Only runtime.text and runtime.etext come through this way + if name != "runtime.text" && name != "runtime.etext" && name != "go:buildid" { + Exitf("putaixsym: unknown text symbol %s", name) + } + s := &XcoffSymEnt64{ + Nsclass: C_HIDEXT, + Noffset: uint32(xfile.stringTable.add(name)), + Nvalue: uint64(ldr.SymValue(x)), + Nscnum: xfile.getXCOFFscnum(ldr.SymSect(x)), + Ntype: SYM_TYPE_FUNC, + Nnumaux: 1, + } + ldr.SetSymDynid(x, int32(xfile.symbolCount)) + syms = append(syms, s) + + size := uint64(ldr.SymSize(x)) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD, + } + a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3) + syms = append(syms, a4) + } + + case DataSym, BSSSym: + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(name)), + Nvalue: uint64(ldr.SymValue(x)), + Nscnum: xfile.getXCOFFscnum(ldr.SymSect(x)), + Nnumaux: 1, + } + + if ldr.IsFileLocal(x) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + // There is more symbols in the case of a global data + // which are related to the assembly generated + // to access such symbols. + // But as Golang as its own way to check if a symbol is + // global or local (the capital letter), we don't need to + // implement them yet. + s.Nsclass = C_HIDEXT + } + + ldr.SetSymDynid(x, int32(xfile.symbolCount)) + syms = append(syms, s) + + // Create auxiliary entry + + // Normally, size should be the size of csect containing all + // the data and bss symbols of one file/package. + // However, it's easier to just have a csect for each symbol. + // It might change + size := uint64(ldr.SymSize(x)) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + } + + if ty := ldr.SymType(x); ty >= sym.STYPE && ty <= sym.SPCLNTAB { + if ctxt.IsExternal() && strings.HasPrefix(ldr.SymSect(x).Name, ".data.rel.ro") { + // During external linking, read-only datas with relocation + // must be in .data. + a4.Xsmclas = XMC_RW + } else { + // Read only data + a4.Xsmclas = XMC_RO + } + } else if /*ty == sym.SDATA &&*/ strings.HasPrefix(ldr.SymName(x), "TOC.") && ctxt.IsExternal() { + a4.Xsmclas = XMC_TC + } else if ldr.SymName(x) == "TOC" { + a4.Xsmclas = XMC_TC0 + } else { + a4.Xsmclas = XMC_RW + } + if t == DataSym { + a4.Xsmtyp |= XTY_SD + } else { + a4.Xsmtyp |= XTY_CM + } + + a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, t) << 3) + + syms = append(syms, a4) + + case UndefinedSym: + if ty := ldr.SymType(x); ty != sym.SDYNIMPORT && ty != sym.SHOSTOBJ && ty != sym.SUNDEFEXT { + return + } + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(name)), + Nnumaux: 1, + } + ldr.SetSymDynid(x, int32(xfile.symbolCount)) + syms = append(syms, s) + + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xsmclas: XMC_DS, + Xsmtyp: XTY_ER | XTY_IMP, + } + + if ldr.SymName(x) == "__n_pthreads" { + // Currently, all imported symbols made by cgo_import_dynamic are + // syscall functions, except __n_pthreads which is a variable. + // TODO(aix): Find a way to detect variables imported by cgo. + a4.Xsmclas = XMC_RW + } + + syms = append(syms, a4) + + case TLSSym: + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(name)), + Nscnum: xfile.getXCOFFscnum(ldr.SymSect(x)), + Nvalue: uint64(ldr.SymValue(x)), + Nnumaux: 1, + } + + ldr.SetSymDynid(x, int32(xfile.symbolCount)) + syms = append(syms, s) + + size := uint64(ldr.SymSize(x)) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xsmclas: XMC_UL, + Xsmtyp: XTY_CM, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + } + + syms = append(syms, a4) + } + + for _, s := range syms { + xfile.addSymbol(s) + } +} + +// Generate XCOFF Symbol table. +// It will be written in out file in Asmbxcoff, because it must be +// at the very end, especially after relocation sections which needs symbols' index. +func (f *xcoffFile) asmaixsym(ctxt *Link) { + ldr := ctxt.loader + // Get correct size for symbols wrapping others symbols like go.string.* + // sym.Size can be used directly as the symbols have already been written. + for name, size := range outerSymSize { + sym := ldr.Lookup(name, 0) + if sym == 0 { + Errorf(nil, "unknown outer symbol with name %s", name) + } else { + s := ldr.MakeSymbolUpdater(sym) + s.SetSize(size) + } + } + + // These symbols won't show up in the first loop below because we + // skip sym.STEXT symbols. Normal sym.STEXT symbols are emitted by walking textp. + s := ldr.Lookup("runtime.text", 0) + if ldr.SymType(s) == sym.STEXT { + // We've already included this symbol in ctxt.Textp on AIX with external linker. + // See data.go:/textaddress + if !ctxt.IsExternal() { + putaixsym(ctxt, s, TextSym) + } + } + + n := 1 + // Generate base addresses for all text sections if there are multiple + for _, sect := range Segtext.Sections[1:] { + if sect.Name != ".text" || ctxt.IsExternal() { + // On AIX, runtime.text.X are symbols already in the symtab. + break + } + s = ldr.Lookup(fmt.Sprintf("runtime.text.%d", n), 0) + if s == 0 { + break + } + if ldr.SymType(s) == sym.STEXT { + putaixsym(ctxt, s, TextSym) + } + n++ + } + + s = ldr.Lookup("runtime.etext", 0) + if ldr.SymType(s) == sym.STEXT { + // We've already included this symbol in ctxt.Textp + // on AIX with external linker. + // See data.go:/textaddress + if !ctxt.IsExternal() { + putaixsym(ctxt, s, TextSym) + } + } + + shouldBeInSymbolTable := func(s loader.Sym, name string) bool { + if ldr.AttrNotInSymbolTable(s) { + return false + } + if (name == "" || name[0] == '.') && !ldr.IsFileLocal(s) && name != ".TOC." { + return false + } + return true + } + + for s, nsym := loader.Sym(1), loader.Sym(ldr.NSym()); s < nsym; s++ { + if !shouldBeInSymbolTable(s, ldr.SymName(s)) { + continue + } + st := ldr.SymType(s) + switch { + case st == sym.STLSBSS: + if ctxt.IsExternal() { + putaixsym(ctxt, s, TLSSym) + } + + case st == sym.SBSS, st == sym.SNOPTRBSS, st == sym.SLIBFUZZER_8BIT_COUNTER, st == sym.SCOVERAGE_COUNTER: + if ldr.AttrReachable(s) { + data := ldr.Data(s) + if len(data) > 0 { + ldr.Errorf(s, "should not be bss (size=%d type=%v special=%v)", len(data), ldr.SymType(s), ldr.AttrSpecial(s)) + } + putaixsym(ctxt, s, BSSSym) + } + + case st >= sym.SELFRXSECT && st < sym.SXREF: // data sections handled in dodata + if ldr.AttrReachable(s) { + putaixsym(ctxt, s, DataSym) + } + + case st == sym.SUNDEFEXT: + putaixsym(ctxt, s, UndefinedSym) + + case st == sym.SDYNIMPORT: + if ldr.AttrReachable(s) { + putaixsym(ctxt, s, UndefinedSym) + } + } + } + + for _, s := range ctxt.Textp { + putaixsym(ctxt, s, TextSym) + } + + if ctxt.Debugvlog != 0 { + ctxt.Logf("symsize = %d\n", uint32(symSize)) + } + xfile.updatePreviousFile(ctxt, true) +} + +func (f *xcoffFile) genDynSym(ctxt *Link) { + ldr := ctxt.loader + var dynsyms []loader.Sym + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if !ldr.AttrReachable(s) { + continue + } + if t := ldr.SymType(s); t != sym.SHOSTOBJ && t != sym.SDYNIMPORT { + continue + } + dynsyms = append(dynsyms, s) + } + + for _, s := range dynsyms { + f.adddynimpsym(ctxt, s) + + if _, ok := f.dynLibraries[ldr.SymDynimplib(s)]; !ok { + f.dynLibraries[ldr.SymDynimplib(s)] = len(f.dynLibraries) + } + } +} + +// (*xcoffFile)adddynimpsym adds the dynamic symbol "s" to a XCOFF file. +// A new symbol named s.Extname() is created to be the actual dynamic symbol +// in the .loader section and in the symbol table as an External Reference. +// The symbol "s" is transformed to SXCOFFTOC to end up in .data section. +// However, there is no writing protection on those symbols and +// it might need to be added. +// TODO(aix): Handles dynamic symbols without library. +func (f *xcoffFile) adddynimpsym(ctxt *Link, s loader.Sym) { + // Check that library name is given. + // Pattern is already checked when compiling. + ldr := ctxt.loader + if ctxt.IsInternal() && ldr.SymDynimplib(s) == "" { + ctxt.Errorf(s, "imported symbol must have a given library") + } + + sb := ldr.MakeSymbolUpdater(s) + sb.SetReachable(true) + sb.SetType(sym.SXCOFFTOC) + + // Create new dynamic symbol + extsym := ldr.CreateSymForUpdate(ldr.SymExtname(s), 0) + extsym.SetType(sym.SDYNIMPORT) + extsym.SetDynimplib(ldr.SymDynimplib(s)) + extsym.SetExtname(ldr.SymExtname(s)) + extsym.SetDynimpvers(ldr.SymDynimpvers(s)) + + // Add loader symbol + lds := &xcoffLoaderSymbol{ + sym: extsym.Sym(), + smtype: XTY_IMP, + smclas: XMC_DS, + } + if ldr.SymName(s) == "__n_pthreads" { + // Currently, all imported symbols made by cgo_import_dynamic are + // syscall functions, except __n_pthreads which is a variable. + // TODO(aix): Find a way to detect variables imported by cgo. + lds.smclas = XMC_RW + } + f.loaderSymbols = append(f.loaderSymbols, lds) + + // Relocation to retrieve the external address + sb.AddBytes(make([]byte, 8)) + r, _ := sb.AddRel(objabi.R_ADDR) + r.SetSym(extsym.Sym()) + r.SetSiz(uint8(ctxt.Arch.PtrSize)) + // TODO: maybe this could be + // sb.SetSize(0) + // sb.SetData(nil) + // sb.AddAddr(ctxt.Arch, extsym.Sym()) + // If the size is not 0 to begin with, I don't think the added 8 bytes + // of zeros are necessary. +} + +// Xcoffadddynrel adds a dynamic relocation in a XCOFF file. +// This relocation will be made by the loader. +func Xcoffadddynrel(target *Target, ldr *loader.Loader, syms *ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + if target.IsExternal() { + return true + } + if ldr.SymType(s) <= sym.SPCLNTAB { + ldr.Errorf(s, "cannot have a relocation to %s in a text section symbol", ldr.SymName(r.Sym())) + return false + } + + xldr := &xcoffLoaderReloc{ + sym: s, + roff: r.Off(), + } + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + default: + ldr.Errorf(s, "unexpected .loader relocation to symbol: %s (type: %s)", ldr.SymName(targ), r.Type().String()) + return false + case objabi.R_ADDR: + if ldr.SymType(s) == sym.SXCOFFTOC && targType == sym.SDYNIMPORT { + // Imported symbol relocation + for i, dynsym := range xfile.loaderSymbols { + if ldr.SymName(dynsym.sym) == ldr.SymName(targ) { + xldr.symndx = int32(i + 3) // +3 because of 3 section symbols + break + } + } + } else if t := ldr.SymType(s); t == sym.SDATA || t == sym.SNOPTRDATA || t == sym.SBUILDINFO || t == sym.SXCOFFTOC { + switch ldr.SymSect(targ).Seg { + default: + ldr.Errorf(s, "unknown segment for .loader relocation with symbol %s", ldr.SymName(targ)) + case &Segtext: + case &Segrodata: + xldr.symndx = 0 // .text + case &Segdata: + if targType == sym.SBSS || targType == sym.SNOPTRBSS { + xldr.symndx = 2 // .bss + } else { + xldr.symndx = 1 // .data + } + } + + } else { + ldr.Errorf(s, "unexpected type for .loader relocation R_ADDR for symbol %s: %s to %s", ldr.SymName(targ), ldr.SymType(s), ldr.SymType(targ)) + return false + } + + xldr.rtype = 0x3F<<8 + XCOFF_R_POS + } + + xfile.Lock() + xfile.loaderReloc = append(xfile.loaderReloc, xldr) + xfile.Unlock() + return true +} + +func (ctxt *Link) doxcoff() { + ldr := ctxt.loader + + // TOC + toc := ldr.CreateSymForUpdate("TOC", 0) + toc.SetType(sym.SXCOFFTOC) + toc.SetVisibilityHidden(true) + + // Add entry point to .loader symbols. + ep := ldr.Lookup(*flagEntrySymbol, 0) + if ep == 0 || !ldr.AttrReachable(ep) { + Exitf("wrong entry point") + } + + xfile.loaderSymbols = append(xfile.loaderSymbols, &xcoffLoaderSymbol{ + sym: ep, + smtype: XTY_ENT | XTY_SD, + smclas: XMC_DS, + }) + + xfile.genDynSym(ctxt) + + for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { + if strings.HasPrefix(ldr.SymName(s), "TOC.") { + sb := ldr.MakeSymbolUpdater(s) + sb.SetType(sym.SXCOFFTOC) + } + } + + if ctxt.IsExternal() { + // Change rt0_go name to match name in runtime/cgo:main(). + rt0 := ldr.Lookup("runtime.rt0_go", 0) + ldr.SetSymExtname(rt0, "runtime_rt0_go") + + nsym := loader.Sym(ldr.NSym()) + for s := loader.Sym(1); s < nsym; s++ { + if !ldr.AttrCgoExport(s) { + continue + } + if ldr.IsFileLocal(s) { + panic("cgo_export on static symbol") + } + + if ldr.SymType(s) == sym.STEXT { + // On AIX, an exported function must have two symbols: + // - a .text symbol which must start with a ".". + // - a .data symbol which is a function descriptor. + name := ldr.SymExtname(s) + ldr.SetSymExtname(s, "."+name) + + desc := ldr.MakeSymbolUpdater(ldr.CreateExtSym(name, 0)) + desc.SetReachable(true) + desc.SetType(sym.SNOPTRDATA) + desc.AddAddr(ctxt.Arch, s) + desc.AddAddr(ctxt.Arch, toc.Sym()) + desc.AddUint64(ctxt.Arch, 0) + } + } + } +} + +// Loader section +// Currently, this section is created from scratch when assembling the XCOFF file +// according to information retrieved in xfile object. + +// Create loader section and returns its size. +func Loaderblk(ctxt *Link, off uint64) { + xfile.writeLdrScn(ctxt, off) +} + +func (f *xcoffFile) writeLdrScn(ctxt *Link, globalOff uint64) { + var symtab []*XcoffLdSym64 + var strtab []*XcoffLdStr64 + var importtab []*XcoffLdImportFile64 + var reloctab []*XcoffLdRel64 + var dynimpreloc []*XcoffLdRel64 + + // As the string table is updated in any loader subsection, + // its length must be computed at the same time. + stlen := uint32(0) + + // Loader Header + hdr := &XcoffLdHdr64{ + Lversion: 2, + Lsymoff: LDHDRSZ_64, + } + + ldr := ctxt.loader + /* Symbol table */ + for _, s := range f.loaderSymbols { + lds := &XcoffLdSym64{ + Loffset: uint32(stlen + 2), + Lsmtype: s.smtype, + Lsmclas: s.smclas, + } + sym := s.sym + switch s.smtype { + default: + ldr.Errorf(sym, "unexpected loader symbol type: 0x%x", s.smtype) + case XTY_ENT | XTY_SD: + lds.Lvalue = uint64(ldr.SymValue(sym)) + lds.Lscnum = f.getXCOFFscnum(ldr.SymSect(sym)) + case XTY_IMP: + lds.Lifile = int32(f.dynLibraries[ldr.SymDynimplib(sym)] + 1) + } + ldstr := &XcoffLdStr64{ + size: uint16(len(ldr.SymName(sym)) + 1), // + null terminator + name: ldr.SymName(sym), + } + stlen += uint32(2 + ldstr.size) // 2 = sizeof ldstr.size + symtab = append(symtab, lds) + strtab = append(strtab, ldstr) + + } + + hdr.Lnsyms = int32(len(symtab)) + hdr.Lrldoff = hdr.Lsymoff + uint64(24*hdr.Lnsyms) // 24 = sizeof one symbol + off := hdr.Lrldoff // current offset is the same of reloc offset + + /* Reloc */ + // Ensure deterministic order + sort.Slice(f.loaderReloc, func(i, j int) bool { + r1, r2 := f.loaderReloc[i], f.loaderReloc[j] + if r1.sym != r2.sym { + return r1.sym < r2.sym + } + if r1.roff != r2.roff { + return r1.roff < r2.roff + } + if r1.rtype != r2.rtype { + return r1.rtype < r2.rtype + } + return r1.symndx < r2.symndx + }) + + ep := ldr.Lookup(*flagEntrySymbol, 0) + xldr := &XcoffLdRel64{ + Lvaddr: uint64(ldr.SymValue(ep)), + Lrtype: 0x3F00, + Lrsecnm: f.getXCOFFscnum(ldr.SymSect(ep)), + Lsymndx: 0, + } + off += 16 + reloctab = append(reloctab, xldr) + + off += uint64(16 * len(f.loaderReloc)) + for _, r := range f.loaderReloc { + symp := r.sym + if symp == 0 { + panic("unexpected 0 sym value") + } + xldr = &XcoffLdRel64{ + Lvaddr: uint64(ldr.SymValue(symp) + int64(r.roff)), + Lrtype: r.rtype, + Lsymndx: r.symndx, + } + + if ldr.SymSect(symp) != nil { + xldr.Lrsecnm = f.getXCOFFscnum(ldr.SymSect(symp)) + } + + reloctab = append(reloctab, xldr) + } + + off += uint64(16 * len(dynimpreloc)) + reloctab = append(reloctab, dynimpreloc...) + + hdr.Lnreloc = int32(len(reloctab)) + hdr.Limpoff = off + + /* Import */ + // Default import: /usr/lib:/lib + ldimpf := &XcoffLdImportFile64{ + Limpidpath: "/usr/lib:/lib", + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + + // The map created by adddynimpsym associates the name to a number + // This number represents the librairie index (- 1) in this import files section + // Therefore, they must be sorted before being put inside the section + libsOrdered := make([]string, len(f.dynLibraries)) + for key, val := range f.dynLibraries { + if libsOrdered[val] != "" { + continue + } + libsOrdered[val] = key + } + + for _, lib := range libsOrdered { + // lib string is defined as base.a/mem.o or path/base.a/mem.o + n := strings.Split(lib, "/") + path := "" + base := n[len(n)-2] + mem := n[len(n)-1] + if len(n) > 2 { + path = lib[:len(lib)-len(base)-len(mem)-2] + + } + ldimpf = &XcoffLdImportFile64{ + Limpidpath: path, + Limpidbase: base, + Limpidmem: mem, + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + } + + hdr.Lnimpid = int32(len(importtab)) + hdr.Listlen = uint32(off - hdr.Limpoff) + hdr.Lstoff = off + hdr.Lstlen = stlen + + /* Writing */ + ctxt.Out.SeekSet(int64(globalOff)) + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, hdr) + + for _, s := range symtab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, s) + + } + for _, r := range reloctab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, r) + } + for _, f := range importtab { + ctxt.Out.WriteString(f.Limpidpath) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidbase) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidmem) + ctxt.Out.Write8(0) + } + for _, s := range strtab { + ctxt.Out.Write16(s.size) + ctxt.Out.WriteString(s.name) + ctxt.Out.Write8(0) // null terminator + } + + f.loaderSize = off + uint64(stlen) +} + +// XCOFF assembling and writing file + +func (f *xcoffFile) writeFileHeader(ctxt *Link) { + // File header + f.xfhdr.Fmagic = U64_TOCMAGIC + f.xfhdr.Fnscns = uint16(len(f.sections)) + f.xfhdr.Ftimedat = 0 + + if !*FlagS { + f.xfhdr.Fsymptr = uint64(f.symtabOffset) + f.xfhdr.Fnsyms = int32(f.symbolCount) + } + + if ctxt.BuildMode == BuildModeExe && ctxt.LinkMode == LinkInternal { + ldr := ctxt.loader + f.xfhdr.Fopthdr = AOUTHSZ_EXEC64 + f.xfhdr.Fflags = F_EXEC + + // auxiliary header + f.xahdr.Ovstamp = 1 // based on dump -o + f.xahdr.Omagic = 0x10b + copy(f.xahdr.Omodtype[:], "1L") + entry := ldr.Lookup(*flagEntrySymbol, 0) + f.xahdr.Oentry = uint64(ldr.SymValue(entry)) + f.xahdr.Osnentry = f.getXCOFFscnum(ldr.SymSect(entry)) + toc := ldr.Lookup("TOC", 0) + f.xahdr.Otoc = uint64(ldr.SymValue(toc)) + f.xahdr.Osntoc = f.getXCOFFscnum(ldr.SymSect(toc)) + + f.xahdr.Oalgntext = int16(logBase2(int(XCOFFSECTALIGN))) + f.xahdr.Oalgndata = 0x5 + + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + binary.Write(ctxt.Out, binary.BigEndian, &f.xahdr) + } else { + f.xfhdr.Fopthdr = 0 + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + } + +} + +func xcoffwrite(ctxt *Link) { + ctxt.Out.SeekSet(0) + + xfile.writeFileHeader(ctxt) + + for _, sect := range xfile.sections { + sect.write(ctxt) + } +} + +// Generate XCOFF assembly file. +func asmbXcoff(ctxt *Link) { + ctxt.Out.SeekSet(0) + fileoff := int64(Segdwarf.Fileoff + Segdwarf.Filelen) + fileoff = int64(Rnd(int64(fileoff), *FlagRound)) + + xfile.sectNameToScnum = make(map[string]int16) + + // Add sections + s := xfile.addSection(".text", Segtext.Vaddr, Segtext.Length, Segtext.Fileoff, STYP_TEXT) + xfile.xahdr.Otextstart = s.Svaddr + xfile.xahdr.Osntext = xfile.sectNameToScnum[".text"] + xfile.xahdr.Otsize = s.Ssize + xfile.sectText = s + + segdataVaddr := Segdata.Vaddr + segdataFilelen := Segdata.Filelen + segdataFileoff := Segdata.Fileoff + segbssFilelen := Segdata.Length - Segdata.Filelen + if len(Segrelrodata.Sections) > 0 { + // Merge relro segment to data segment as + // relro data are inside data segment on AIX. + segdataVaddr = Segrelrodata.Vaddr + segdataFileoff = Segrelrodata.Fileoff + segdataFilelen = Segdata.Vaddr + Segdata.Filelen - Segrelrodata.Vaddr + } + + s = xfile.addSection(".data", segdataVaddr, segdataFilelen, segdataFileoff, STYP_DATA) + xfile.xahdr.Odatastart = s.Svaddr + xfile.xahdr.Osndata = xfile.sectNameToScnum[".data"] + xfile.xahdr.Odsize = s.Ssize + xfile.sectData = s + + s = xfile.addSection(".bss", segdataVaddr+segdataFilelen, segbssFilelen, 0, STYP_BSS) + xfile.xahdr.Osnbss = xfile.sectNameToScnum[".bss"] + xfile.xahdr.Obsize = s.Ssize + xfile.sectBss = s + + if ctxt.LinkMode == LinkExternal { + var tbss *sym.Section + for _, s := range Segdata.Sections { + if s.Name == ".tbss" { + tbss = s + break + } + } + s = xfile.addSection(".tbss", tbss.Vaddr, tbss.Length, 0, STYP_TBSS) + } + + // add dwarf sections + for _, sect := range Segdwarf.Sections { + xfile.addDwarfSection(sect) + } + + // add and write remaining sections + if ctxt.LinkMode == LinkInternal { + // Loader section + if ctxt.BuildMode == BuildModeExe { + Loaderblk(ctxt, uint64(fileoff)) + s = xfile.addSection(".loader", 0, xfile.loaderSize, uint64(fileoff), STYP_LOADER) + xfile.xahdr.Osnloader = xfile.sectNameToScnum[".loader"] + + // Update fileoff for symbol table + fileoff += int64(xfile.loaderSize) + } + } + + // Create Symbol table + xfile.asmaixsym(ctxt) + + if ctxt.LinkMode == LinkExternal { + xfile.emitRelocations(ctxt, fileoff) + } + + // Write Symbol table + xfile.symtabOffset = ctxt.Out.Offset() + for _, s := range xfile.symtabSym { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, s) + } + // write string table + xfile.stringTable.write(ctxt.Out) + + // write headers + xcoffwrite(ctxt) +} + +// emitRelocations emits relocation entries for go.o in external linking. +func (f *xcoffFile) emitRelocations(ctxt *Link, fileoff int64) { + ctxt.Out.SeekSet(fileoff) + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) + } + + ldr := ctxt.loader + // relocsect relocates symbols from first in section sect, and returns + // the total number of relocations emitted. + relocsect := func(sect *sym.Section, syms []loader.Sym, base uint64) uint32 { + // ctxt.Logf("%s 0x%x\n", sect.Name, sect.Vaddr) + // If main section has no bits, nothing to relocate. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return 0 + } + sect.Reloff = uint64(ctxt.Out.Offset()) + for i, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if uint64(ldr.SymValue(s)) >= sect.Vaddr { + syms = syms[i:] + break + } + } + eaddr := int64(sect.Vaddr + sect.Length) + for _, s := range syms { + if !ldr.AttrReachable(s) { + continue + } + if ldr.SymValue(s) >= int64(eaddr) { + break + } + + // Compute external relocations on the go, and pass to Xcoffreloc1 to stream out. + // Relocation must be ordered by address, so create a list of sorted indices. + relocs := ldr.Relocs(s) + sorted := make([]int, relocs.Count()) + for i := 0; i < relocs.Count(); i++ { + sorted[i] = i + } + sort.Slice(sorted, func(i, j int) bool { + return relocs.At(sorted[i]).Off() < relocs.At(sorted[j]).Off() + }) + + for _, ri := range sorted { + r := relocs.At(ri) + rr, ok := extreloc(ctxt, ldr, s, r) + if !ok { + continue + } + if rr.Xsym == 0 { + ldr.Errorf(s, "missing xsym in relocation") + continue + } + if ldr.SymDynid(rr.Xsym) < 0 { + ldr.Errorf(s, "reloc %s to non-coff symbol %s (outer=%s) %d %d", r.Type(), ldr.SymName(r.Sym()), ldr.SymName(rr.Xsym), ldr.SymType(r.Sym()), ldr.SymDynid(rr.Xsym)) + } + if !thearch.Xcoffreloc1(ctxt.Arch, ctxt.Out, ldr, s, rr, int64(uint64(ldr.SymValue(s)+int64(r.Off()))-base)) { + ldr.Errorf(s, "unsupported obj reloc %d(%s)/%d to %s", r.Type(), r.Type(), r.Siz(), ldr.SymName(r.Sym())) + } + } + } + sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff + return uint32(sect.Rellen) / RELSZ_64 + } + sects := []struct { + xcoffSect *XcoffScnHdr64 + segs []*sym.Segment + }{ + {f.sectText, []*sym.Segment{&Segtext}}, + {f.sectData, []*sym.Segment{&Segrelrodata, &Segdata}}, + } + for _, s := range sects { + s.xcoffSect.Srelptr = uint64(ctxt.Out.Offset()) + n := uint32(0) + for _, seg := range s.segs { + for _, sect := range seg.Sections { + if sect.Name == ".text" { + n += relocsect(sect, ctxt.Textp, 0) + } else { + n += relocsect(sect, ctxt.datap, 0) + } + } + } + s.xcoffSect.Snreloc += n + } + +dwarfLoop: + for i := 0; i < len(Segdwarf.Sections); i++ { + sect := Segdwarf.Sections[i] + si := dwarfp[i] + if si.secSym() != loader.Sym(sect.Sym) || + ldr.SymSect(si.secSym()) != sect { + panic("inconsistency between dwarfp and Segdwarf") + } + for _, xcoffSect := range f.sections { + _, subtyp := xcoffGetDwarfSubtype(sect.Name) + if xcoffSect.Sflags&0xF0000 == subtyp { + xcoffSect.Srelptr = uint64(ctxt.Out.Offset()) + xcoffSect.Snreloc = relocsect(sect, si.syms, sect.Vaddr) + continue dwarfLoop + } + } + Errorf(nil, "emitRelocations: could not find %q section", sect.Name) + } +} + +// xcoffCreateExportFile creates a file with exported symbols for +// -Wl,-bE option. +// ld won't export symbols unless they are listed in an export file. +func xcoffCreateExportFile(ctxt *Link) (fname string) { + fname = filepath.Join(*flagTmpdir, "export_file.exp") + var buf bytes.Buffer + + ldr := ctxt.loader + for s, nsym := loader.Sym(1), loader.Sym(ldr.NSym()); s < nsym; s++ { + if !ldr.AttrCgoExport(s) { + continue + } + extname := ldr.SymExtname(s) + if !strings.HasPrefix(extname, "._cgoexp_") { + continue + } + if ldr.IsFileLocal(s) { + continue // Only export non-static symbols + } + + // Retrieve the name of the initial symbol + // exported by cgo. + // The corresponding Go symbol is: + // _cgoexp_hashcode_symname. + name := strings.SplitN(extname, "_", 4)[3] + + buf.Write([]byte(name + "\n")) + } + + err := os.WriteFile(fname, buf.Bytes(), 0666) + if err != nil { + Errorf(nil, "WriteFile %s failed: %v", fname, err) + } + + return fname +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loadelf/ldelf.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadelf/ldelf.go new file mode 100644 index 0000000000000000000000000000000000000000..82e7dc30b72912b6573275ebfb5e749a41f96c42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadelf/ldelf.go @@ -0,0 +1,1194 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadelf implements an ELF file reader. +package loadelf + +import ( + "bytes" + "cmd/internal/bio" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "encoding/binary" + "fmt" + "io" + "log" + "strings" +) + +/* +Derived from Plan 9 from User Space's src/libmach/elf.h, elf.c +https://github.com/9fans/plan9port/tree/master/src/libmach/ + + Copyright © 2004 Russ Cox. + Portions Copyright © 2008-2010 Google Inc. + Portions Copyright © 2010 The Go Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +const ( + SHT_ARM_ATTRIBUTES = 0x70000003 +) + +type ElfSect struct { + name string + nameoff uint32 + type_ elf.SectionType + flags elf.SectionFlag + addr uint64 + off uint64 + size uint64 + link uint32 + info uint32 + align uint64 + entsize uint64 + base []byte + readOnlyMem bool // Is this section in readonly memory? + sym loader.Sym +} + +type ElfObj struct { + f *bio.Reader + base int64 // offset in f where ELF begins + length int64 // length of ELF + is64 int + name string + e binary.ByteOrder + sect []ElfSect + nsect uint + nsymtab int + symtab *ElfSect + symstr *ElfSect + type_ uint32 + machine uint32 + version uint32 + entry uint64 + phoff uint64 + shoff uint64 + flags uint32 + ehsize uint32 + phentsize uint32 + phnum uint32 + shentsize uint32 + shnum uint32 + shstrndx uint32 +} + +type ElfSym struct { + name string + value uint64 + size uint64 + bind elf.SymBind + type_ elf.SymType + other uint8 + shndx elf.SectionIndex + sym loader.Sym +} + +const ( + TagFile = 1 + TagCPUName = 4 + TagCPURawName = 5 + TagCompatibility = 32 + TagNoDefaults = 64 + TagAlsoCompatibleWith = 65 + TagABIVFPArgs = 28 +) + +type elfAttribute struct { + tag uint64 + sval string + ival uint64 +} + +type elfAttributeList struct { + data []byte + err error +} + +func (a *elfAttributeList) string() string { + if a.err != nil { + return "" + } + nul := bytes.IndexByte(a.data, 0) + if nul < 0 { + a.err = io.EOF + return "" + } + s := string(a.data[:nul]) + a.data = a.data[nul+1:] + return s +} + +func (a *elfAttributeList) uleb128() uint64 { + if a.err != nil { + return 0 + } + v, size := binary.Uvarint(a.data) + a.data = a.data[size:] + return v +} + +// Read an elfAttribute from the list following the rules used on ARM systems. +func (a *elfAttributeList) armAttr() elfAttribute { + attr := elfAttribute{tag: a.uleb128()} + switch { + case attr.tag == TagCompatibility: + attr.ival = a.uleb128() + attr.sval = a.string() + + case attr.tag == TagNoDefaults: // Tag_nodefaults has no argument + + case attr.tag == TagAlsoCompatibleWith: + // Not really, but we don't actually care about this tag. + attr.sval = a.string() + + // Tag with string argument + case attr.tag == TagCPUName || attr.tag == TagCPURawName || (attr.tag >= 32 && attr.tag&1 != 0): + attr.sval = a.string() + + default: // Tag with integer argument + attr.ival = a.uleb128() + } + return attr +} + +func (a *elfAttributeList) done() bool { + if a.err != nil || len(a.data) == 0 { + return true + } + return false +} + +// Look for the attribute that indicates the object uses the hard-float ABI (a +// file-level attribute with tag Tag_VFP_arch and value 1). Unfortunately the +// format used means that we have to parse all of the file-level attributes to +// find the one we are looking for. This format is slightly documented in "ELF +// for the ARM Architecture" but mostly this is derived from reading the source +// to gold and readelf. +func parseArmAttributes(e binary.ByteOrder, data []byte) (found bool, ehdrFlags uint32, err error) { + found = false + if data[0] != 'A' { + return false, 0, fmt.Errorf(".ARM.attributes has unexpected format %c\n", data[0]) + } + data = data[1:] + for len(data) != 0 { + sectionlength := e.Uint32(data) + sectiondata := data[4:sectionlength] + data = data[sectionlength:] + + nulIndex := bytes.IndexByte(sectiondata, 0) + if nulIndex < 0 { + return false, 0, fmt.Errorf("corrupt .ARM.attributes (section name not NUL-terminated)\n") + } + name := string(sectiondata[:nulIndex]) + sectiondata = sectiondata[nulIndex+1:] + + if name != "aeabi" { + continue + } + for len(sectiondata) != 0 { + subsectiontag, sz := binary.Uvarint(sectiondata) + subsectionsize := e.Uint32(sectiondata[sz:]) + subsectiondata := sectiondata[sz+4 : subsectionsize] + sectiondata = sectiondata[subsectionsize:] + + if subsectiontag != TagFile { + continue + } + attrList := elfAttributeList{data: subsectiondata} + for !attrList.done() { + attr := attrList.armAttr() + if attr.tag == TagABIVFPArgs && attr.ival == 1 { + found = true + ehdrFlags = 0x5000402 // has entry point, Version5 EABI, hard-float ABI + } + } + if attrList.err != nil { + return false, 0, fmt.Errorf("could not parse .ARM.attributes\n") + } + } + } + return found, ehdrFlags, nil +} + +// Load loads the ELF file pn from f. +// Symbols are installed into the loader, and a slice of the text symbols is returned. +// +// On ARM systems, Load will attempt to determine what ELF header flags to +// emit by scanning the attributes in the ELF file being loaded. The +// parameter initEhdrFlags contains the current header flags for the output +// object, and the returned ehdrFlags contains what this Load function computes. +// TODO: find a better place for this logic. +func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, pkg string, length int64, pn string, initEhdrFlags uint32) (textp []loader.Sym, ehdrFlags uint32, err error) { + newSym := func(name string, version int) loader.Sym { + return l.CreateStaticSym(name) + } + lookup := l.LookupOrCreateCgoExport + errorf := func(str string, args ...interface{}) ([]loader.Sym, uint32, error) { + return nil, 0, fmt.Errorf("loadelf: %s: %v", pn, fmt.Sprintf(str, args...)) + } + + ehdrFlags = initEhdrFlags + + base := f.Offset() + + var hdrbuf [64]byte + if _, err := io.ReadFull(f, hdrbuf[:]); err != nil { + return errorf("malformed elf file: %v", err) + } + + var e binary.ByteOrder + switch elf.Data(hdrbuf[elf.EI_DATA]) { + case elf.ELFDATA2LSB: + e = binary.LittleEndian + + case elf.ELFDATA2MSB: + e = binary.BigEndian + + default: + return errorf("malformed elf file, unknown header") + } + + hdr := new(elf.Header32) + binary.Read(bytes.NewReader(hdrbuf[:]), e, hdr) + + if string(hdr.Ident[:elf.EI_CLASS]) != elf.ELFMAG { + return errorf("malformed elf file, bad header") + } + + // read header + elfobj := new(ElfObj) + + elfobj.e = e + elfobj.f = f + elfobj.base = base + elfobj.length = length + elfobj.name = pn + + is64 := 0 + class := elf.Class(hdrbuf[elf.EI_CLASS]) + if class == elf.ELFCLASS64 { + is64 = 1 + hdr := new(elf.Header64) + binary.Read(bytes.NewReader(hdrbuf[:]), e, hdr) + elfobj.type_ = uint32(hdr.Type) + elfobj.machine = uint32(hdr.Machine) + elfobj.version = hdr.Version + elfobj.entry = hdr.Entry + elfobj.phoff = hdr.Phoff + elfobj.shoff = hdr.Shoff + elfobj.flags = hdr.Flags + elfobj.ehsize = uint32(hdr.Ehsize) + elfobj.phentsize = uint32(hdr.Phentsize) + elfobj.phnum = uint32(hdr.Phnum) + elfobj.shentsize = uint32(hdr.Shentsize) + elfobj.shnum = uint32(hdr.Shnum) + elfobj.shstrndx = uint32(hdr.Shstrndx) + } else { + elfobj.type_ = uint32(hdr.Type) + elfobj.machine = uint32(hdr.Machine) + elfobj.version = hdr.Version + elfobj.entry = uint64(hdr.Entry) + elfobj.phoff = uint64(hdr.Phoff) + elfobj.shoff = uint64(hdr.Shoff) + elfobj.flags = hdr.Flags + elfobj.ehsize = uint32(hdr.Ehsize) + elfobj.phentsize = uint32(hdr.Phentsize) + elfobj.phnum = uint32(hdr.Phnum) + elfobj.shentsize = uint32(hdr.Shentsize) + elfobj.shnum = uint32(hdr.Shnum) + elfobj.shstrndx = uint32(hdr.Shstrndx) + } + + elfobj.is64 = is64 + + if v := uint32(hdrbuf[elf.EI_VERSION]); v != elfobj.version { + return errorf("malformed elf version: got %d, want %d", v, elfobj.version) + } + + if elf.Type(elfobj.type_) != elf.ET_REL { + return errorf("elf but not elf relocatable object") + } + + mach := elf.Machine(elfobj.machine) + switch arch.Family { + default: + return errorf("elf %s unimplemented", arch.Name) + + case sys.MIPS: + if mach != elf.EM_MIPS || class != elf.ELFCLASS32 { + return errorf("elf object but not mips") + } + + case sys.MIPS64: + if mach != elf.EM_MIPS || class != elf.ELFCLASS64 { + return errorf("elf object but not mips64") + } + case sys.Loong64: + if mach != elf.EM_LOONGARCH || class != elf.ELFCLASS64 { + return errorf("elf object but not loong64") + } + + case sys.ARM: + if e != binary.LittleEndian || mach != elf.EM_ARM || class != elf.ELFCLASS32 { + return errorf("elf object but not arm") + } + + case sys.AMD64: + if e != binary.LittleEndian || mach != elf.EM_X86_64 || class != elf.ELFCLASS64 { + return errorf("elf object but not amd64") + } + + case sys.ARM64: + if e != binary.LittleEndian || mach != elf.EM_AARCH64 || class != elf.ELFCLASS64 { + return errorf("elf object but not arm64") + } + + case sys.I386: + if e != binary.LittleEndian || mach != elf.EM_386 || class != elf.ELFCLASS32 { + return errorf("elf object but not 386") + } + + case sys.PPC64: + if mach != elf.EM_PPC64 || class != elf.ELFCLASS64 { + return errorf("elf object but not ppc64") + } + + case sys.RISCV64: + if mach != elf.EM_RISCV || class != elf.ELFCLASS64 { + return errorf("elf object but not riscv64") + } + + case sys.S390X: + if mach != elf.EM_S390 || class != elf.ELFCLASS64 { + return errorf("elf object but not s390x") + } + } + + // load section list into memory. + elfobj.sect = make([]ElfSect, elfobj.shnum) + + elfobj.nsect = uint(elfobj.shnum) + for i := 0; uint(i) < elfobj.nsect; i++ { + f.MustSeek(int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) + sect := &elfobj.sect[i] + if is64 != 0 { + var b elf.Section64 + if err := binary.Read(f, e, &b); err != nil { + return errorf("malformed elf file: %v", err) + } + + sect.nameoff = b.Name + sect.type_ = elf.SectionType(b.Type) + sect.flags = elf.SectionFlag(b.Flags) + sect.addr = b.Addr + sect.off = b.Off + sect.size = b.Size + sect.link = b.Link + sect.info = b.Info + sect.align = b.Addralign + sect.entsize = b.Entsize + } else { + var b elf.Section32 + + if err := binary.Read(f, e, &b); err != nil { + return errorf("malformed elf file: %v", err) + } + sect.nameoff = b.Name + sect.type_ = elf.SectionType(b.Type) + sect.flags = elf.SectionFlag(b.Flags) + sect.addr = uint64(b.Addr) + sect.off = uint64(b.Off) + sect.size = uint64(b.Size) + sect.link = b.Link + sect.info = b.Info + sect.align = uint64(b.Addralign) + sect.entsize = uint64(b.Entsize) + } + } + + // read section string table and translate names + if elfobj.shstrndx >= uint32(elfobj.nsect) { + return errorf("malformed elf file: shstrndx out of range %d >= %d", elfobj.shstrndx, elfobj.nsect) + } + + sect := &elfobj.sect[elfobj.shstrndx] + if err := elfmap(elfobj, sect); err != nil { + return errorf("malformed elf file: %v", err) + } + for i := 0; uint(i) < elfobj.nsect; i++ { + if elfobj.sect[i].nameoff != 0 { + elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:]) + } + } + + // load string table for symbols into memory. + elfobj.symtab = section(elfobj, ".symtab") + + if elfobj.symtab == nil { + // our work is done here - no symbols means nothing can refer to this file + return + } + + if elfobj.symtab.link <= 0 || elfobj.symtab.link >= uint32(elfobj.nsect) { + return errorf("elf object has symbol table with invalid string table link") + } + + elfobj.symstr = &elfobj.sect[elfobj.symtab.link] + if is64 != 0 { + elfobj.nsymtab = int(elfobj.symtab.size / elf.Sym64Size) + } else { + elfobj.nsymtab = int(elfobj.symtab.size / elf.Sym32Size) + } + + if err := elfmap(elfobj, elfobj.symtab); err != nil { + return errorf("malformed elf file: %v", err) + } + if err := elfmap(elfobj, elfobj.symstr); err != nil { + return errorf("malformed elf file: %v", err) + } + + // load text and data segments into memory. + // they are not as small as the section lists, but we'll need + // the memory anyway for the symbol images, so we might + // as well use one large chunk. + + // create symbols for elfmapped sections + sectsymNames := make(map[string]bool) + counter := 0 + for i := 0; uint(i) < elfobj.nsect; i++ { + sect = &elfobj.sect[i] + if sect.type_ == SHT_ARM_ATTRIBUTES && sect.name == ".ARM.attributes" { + if err := elfmap(elfobj, sect); err != nil { + return errorf("%s: malformed elf file: %v", pn, err) + } + // We assume the soft-float ABI unless we see a tag indicating otherwise. + if initEhdrFlags == 0x5000002 { + ehdrFlags = 0x5000202 + } else { + ehdrFlags = initEhdrFlags + } + found, newEhdrFlags, err := parseArmAttributes(e, sect.base[:sect.size]) + if err != nil { + // TODO(dfc) should this return an error? + log.Printf("%s: %v", pn, err) + } + if found { + ehdrFlags = newEhdrFlags + } + } + if (sect.type_ != elf.SHT_PROGBITS && sect.type_ != elf.SHT_NOBITS) || sect.flags&elf.SHF_ALLOC == 0 { + continue + } + if sect.type_ != elf.SHT_NOBITS { + if err := elfmap(elfobj, sect); err != nil { + return errorf("%s: malformed elf file: %v", pn, err) + } + } + + name := fmt.Sprintf("%s(%s)", pkg, sect.name) + for sectsymNames[name] { + counter++ + name = fmt.Sprintf("%s(%s%d)", pkg, sect.name, counter) + } + sectsymNames[name] = true + + sb := l.MakeSymbolUpdater(lookup(name, localSymVersion)) + + switch sect.flags & (elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_EXECINSTR) { + default: + return errorf("%s: unexpected flags for ELF section %s", pn, sect.name) + + case elf.SHF_ALLOC: + sb.SetType(sym.SRODATA) + + case elf.SHF_ALLOC + elf.SHF_WRITE: + if sect.type_ == elf.SHT_NOBITS { + sb.SetType(sym.SNOPTRBSS) + } else { + sb.SetType(sym.SNOPTRDATA) + } + + case elf.SHF_ALLOC + elf.SHF_EXECINSTR: + sb.SetType(sym.STEXT) + } + + if sect.name == ".got" || sect.name == ".toc" { + sb.SetType(sym.SELFGOT) + } + if sect.type_ == elf.SHT_PROGBITS { + sb.SetData(sect.base[:sect.size]) + sb.SetExternal(true) + } + + sb.SetSize(int64(sect.size)) + sb.SetAlign(int32(sect.align)) + sb.SetReadOnly(sect.readOnlyMem) + + sect.sym = sb.Sym() + } + + // enter sub-symbols into symbol table. + // symbol 0 is the null symbol. + symbols := make([]loader.Sym, elfobj.nsymtab) + + for i := 1; i < elfobj.nsymtab; i++ { + var elfsym ElfSym + if err := readelfsym(newSym, lookup, l, arch, elfobj, i, &elfsym, 1, localSymVersion); err != nil { + return errorf("%s: malformed elf file: %v", pn, err) + } + symbols[i] = elfsym.sym + if elfsym.type_ != elf.STT_FUNC && elfsym.type_ != elf.STT_OBJECT && elfsym.type_ != elf.STT_NOTYPE && elfsym.type_ != elf.STT_COMMON { + continue + } + if elfsym.shndx == elf.SHN_COMMON || elfsym.type_ == elf.STT_COMMON { + sb := l.MakeSymbolUpdater(elfsym.sym) + if uint64(sb.Size()) < elfsym.size { + sb.SetSize(int64(elfsym.size)) + } + if sb.Type() == 0 || sb.Type() == sym.SXREF { + sb.SetType(sym.SNOPTRBSS) + } + continue + } + + if uint(elfsym.shndx) >= elfobj.nsect || elfsym.shndx == 0 { + continue + } + + // even when we pass needSym == 1 to readelfsym, it might still return nil to skip some unwanted symbols + if elfsym.sym == 0 { + continue + } + sect = &elfobj.sect[elfsym.shndx] + if sect.sym == 0 { + if elfsym.type_ == 0 { + if strings.HasPrefix(sect.name, ".debug_") && elfsym.name == "" { + // clang on arm and riscv64. + // This reportedly happens with clang 3.7 on ARM. + // See issue 13139. + continue + } + if strings.HasPrefix(elfsym.name, ".Ldebug_") || elfsym.name == ".L0 " { + // gcc on riscv64. + continue + } + if elfsym.name == ".Lline_table_start0" { + // clang on riscv64. + continue + } + + if strings.HasPrefix(elfsym.name, "$d") && sect.name == ".debug_frame" { + // "$d" is a marker, not a real symbol. + // This happens with gcc on ARM64. + // See https://sourceware.org/bugzilla/show_bug.cgi?id=21809 + continue + } + } + + if strings.HasPrefix(elfsym.name, ".Linfo_string") { + // clang does this + continue + } + + if strings.HasPrefix(elfsym.name, ".LASF") || strings.HasPrefix(elfsym.name, ".LLRL") || strings.HasPrefix(elfsym.name, ".LLST") { + // gcc on s390x and riscv64 does this. + continue + } + + return errorf("%v: sym#%d (%q): ignoring symbol in section %d (%q) (type %d)", elfsym.sym, i, elfsym.name, elfsym.shndx, sect.name, elfsym.type_) + } + + s := elfsym.sym + if l.OuterSym(s) != 0 { + if l.AttrDuplicateOK(s) { + continue + } + return errorf("duplicate symbol reference: %s in both %s and %s", + l.SymName(s), l.SymName(l.OuterSym(s)), l.SymName(sect.sym)) + } + + sectsb := l.MakeSymbolUpdater(sect.sym) + sb := l.MakeSymbolUpdater(s) + + sb.SetType(sectsb.Type()) + sectsb.AddInteriorSym(s) + if !l.AttrCgoExportDynamic(s) { + sb.SetDynimplib("") // satisfy dynimport + } + sb.SetValue(int64(elfsym.value)) + sb.SetSize(int64(elfsym.size)) + if sectsb.Type() == sym.STEXT { + if l.AttrExternal(s) && !l.AttrDuplicateOK(s) { + return errorf("%s: duplicate symbol definition", sb.Name()) + } + l.SetAttrExternal(s, true) + } + + if elf.Machine(elfobj.machine) == elf.EM_PPC64 { + flag := int(elfsym.other) >> 5 + switch flag { + case 0: + // No local entry. R2 is preserved. + case 1: + // This is kind of a hack, but pass the hint about this symbol's + // usage of R2 (R2 is a caller-save register not a TOC pointer, and + // this function does not have a distinct local entry) by setting + // its SymLocalentry to 1. + l.SetSymLocalentry(s, 1) + case 7: + return errorf("%s: invalid sym.other 0x%x", sb.Name(), elfsym.other) + default: + // Convert the word sized offset into bytes. + l.SetSymLocalentry(s, 4<= uint32(elfobj.nsect) || elfobj.sect[rsect.info].base == nil { + continue + } + sect = &elfobj.sect[rsect.info] + if err := elfmap(elfobj, rsect); err != nil { + return errorf("malformed elf file: %v", err) + } + rela := 0 + if rsect.type_ == elf.SHT_RELA { + rela = 1 + } + n := int(rsect.size / uint64(4+4*is64) / uint64(2+rela)) + p := rsect.base + sb := l.MakeSymbolUpdater(sect.sym) + for j := 0; j < n; j++ { + var add uint64 + var symIdx int + var relocType uint64 + var rOff int32 + var rAdd int64 + var rSym loader.Sym + + if is64 != 0 { + // 64-bit rel/rela + rOff = int32(e.Uint64(p)) + + p = p[8:] + switch arch.Family { + case sys.MIPS64: + // https://www.linux-mips.org/pub/linux/mips/doc/ABI/elf64-2.4.pdf + // The doc shows it's different with general Linux ELF + symIdx = int(e.Uint32(p)) + relocType = uint64(p[7]) + default: + info := e.Uint64(p) + relocType = info & 0xffffffff + symIdx = int(info >> 32) + } + p = p[8:] + if rela != 0 { + add = e.Uint64(p) + p = p[8:] + } + } else { + // 32-bit rel/rela + rOff = int32(e.Uint32(p)) + + p = p[4:] + info := e.Uint32(p) + relocType = uint64(info & 0xff) + symIdx = int(info >> 8) + p = p[4:] + if rela != 0 { + add = uint64(e.Uint32(p)) + p = p[4:] + } + } + + if relocType == 0 { // skip R_*_NONE relocation + j-- + n-- + continue + } + + if symIdx == 0 { // absolute relocation, don't bother reading the null symbol + rSym = 0 + } else { + var elfsym ElfSym + if err := readelfsym(newSym, lookup, l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil { + return errorf("malformed elf file: %v", err) + } + elfsym.sym = symbols[symIdx] + if elfsym.sym == 0 { + return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, int(symIdx), elfsym.name, elfsym.shndx, elfsym.type_) + } + + rSym = elfsym.sym + } + + rType := objabi.ElfRelocOffset + objabi.RelocType(relocType) + rSize, addendSize, err := relSize(arch, pn, uint32(relocType)) + if err != nil { + return nil, 0, err + } + if rela != 0 { + rAdd = int64(add) + } else { + // load addend from image + if rSize == 4 { + rAdd = int64(e.Uint32(sect.base[rOff:])) + } else if rSize == 8 { + rAdd = int64(e.Uint64(sect.base[rOff:])) + } else { + return errorf("invalid rela size %d", rSize) + } + } + + if addendSize == 2 { + rAdd = int64(int16(rAdd)) + } + if addendSize == 4 { + rAdd = int64(int32(rAdd)) + } + + r, _ := sb.AddRel(rType) + r.SetOff(rOff) + r.SetSiz(rSize) + r.SetSym(rSym) + r.SetAdd(rAdd) + } + + sb.SortRelocs() // just in case + } + + return textp, ehdrFlags, nil +} + +func section(elfobj *ElfObj, name string) *ElfSect { + for i := 0; uint(i) < elfobj.nsect; i++ { + if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name { + return &elfobj.sect[i] + } + } + return nil +} + +func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { + if sect.base != nil { + return nil + } + + if sect.off+sect.size > uint64(elfobj.length) { + err = fmt.Errorf("elf section past end of file") + return err + } + + elfobj.f.MustSeek(int64(uint64(elfobj.base)+sect.off), 0) + sect.base, sect.readOnlyMem, err = elfobj.f.Slice(uint64(sect.size)) + if err != nil { + return fmt.Errorf("short read: %v", err) + } + + return nil +} + +func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, arch *sys.Arch, elfobj *ElfObj, i int, elfsym *ElfSym, needSym int, localSymVersion int) (err error) { + if i >= elfobj.nsymtab || i < 0 { + err = fmt.Errorf("invalid elf symbol index") + return err + } + + if i == 0 { + return fmt.Errorf("readym: read null symbol!") + } + + if elfobj.is64 != 0 { + b := new(elf.Sym64) + binary.Read(bytes.NewReader(elfobj.symtab.base[i*elf.Sym64Size:(i+1)*elf.Sym64Size]), elfobj.e, b) + elfsym.name = cstring(elfobj.symstr.base[b.Name:]) + elfsym.value = b.Value + elfsym.size = b.Size + elfsym.shndx = elf.SectionIndex(b.Shndx) + elfsym.bind = elf.ST_BIND(b.Info) + elfsym.type_ = elf.ST_TYPE(b.Info) + elfsym.other = b.Other + } else { + b := new(elf.Sym32) + binary.Read(bytes.NewReader(elfobj.symtab.base[i*elf.Sym32Size:(i+1)*elf.Sym32Size]), elfobj.e, b) + elfsym.name = cstring(elfobj.symstr.base[b.Name:]) + elfsym.value = uint64(b.Value) + elfsym.size = uint64(b.Size) + elfsym.shndx = elf.SectionIndex(b.Shndx) + elfsym.bind = elf.ST_BIND(b.Info) + elfsym.type_ = elf.ST_TYPE(b.Info) + elfsym.other = b.Other + } + + var s loader.Sym + + if elfsym.name == "_GLOBAL_OFFSET_TABLE_" { + elfsym.name = ".got" + } + if elfsym.name == ".TOC." { + // Magic symbol on ppc64. Will be set to this object + // file's .got+0x8000. + elfsym.bind = elf.STB_LOCAL + } + + switch elfsym.type_ { + case elf.STT_SECTION: + s = elfobj.sect[elfsym.shndx].sym + + case elf.STT_OBJECT, elf.STT_FUNC, elf.STT_NOTYPE, elf.STT_COMMON: + switch elfsym.bind { + case elf.STB_GLOBAL: + if needSym != 0 { + s = lookup(elfsym.name, 0) + + // for global scoped hidden symbols we should insert it into + // symbol hash table, but mark them as hidden. + // __i686.get_pc_thunk.bx is allowed to be duplicated, to + // workaround that we set dupok. + // TODO(minux): correctly handle __i686.get_pc_thunk.bx without + // set dupok generally. See https://golang.org/cl/5823055 + // comment #5 for details. + if s != 0 && elfsym.other == 2 { + if !l.IsExternal(s) { + l.MakeSymbolUpdater(s) + } + l.SetAttrDuplicateOK(s, true) + l.SetAttrVisibilityHidden(s, true) + } + } + + case elf.STB_LOCAL: + if (arch.Family == sys.ARM || arch.Family == sys.ARM64) && (strings.HasPrefix(elfsym.name, "$a") || strings.HasPrefix(elfsym.name, "$d") || strings.HasPrefix(elfsym.name, "$x")) { + // binutils for arm and arm64 generate these mapping + // symbols, ignore these + break + } + + if elfsym.name == ".TOC." { + // We need to be able to look this up, + // so put it in the hash table. + if needSym != 0 { + s = lookup(elfsym.name, localSymVersion) + l.SetAttrVisibilityHidden(s, true) + } + break + } + + if needSym != 0 { + // local names and hidden global names are unique + // and should only be referenced by their index, not name, so we + // don't bother to add them into the hash table + // FIXME: pass empty string here for name? This would + // reduce mem use, but also (possibly) make it harder + // to debug problems. + s = newSym(elfsym.name, localSymVersion) + l.SetAttrVisibilityHidden(s, true) + } + + case elf.STB_WEAK: + if needSym != 0 { + s = lookup(elfsym.name, 0) + if elfsym.other == 2 { + l.SetAttrVisibilityHidden(s, true) + } + + // Allow weak symbols to be duplicated when already defined. + if l.OuterSym(s) != 0 { + l.SetAttrDuplicateOK(s, true) + } + } + + default: + err = fmt.Errorf("%s: invalid symbol binding %d", elfsym.name, elfsym.bind) + return err + } + } + + if s != 0 && l.SymType(s) == 0 && elfsym.type_ != elf.STT_SECTION { + sb := l.MakeSymbolUpdater(s) + sb.SetType(sym.SXREF) + } + elfsym.sym = s + + return nil +} + +// Return the size of the relocated field, and the size of the addend as the first +// and second values. Note, the addend may be larger than the relocation field in +// some cases when a relocated value is split across multiple relocations. +func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + // TODO(mdempsky): Replace this with a struct-valued switch statement + // once golang.org/issue/15164 is fixed or found to not impair cmd/link + // performance. + + const ( + AMD64 = uint32(sys.AMD64) + ARM = uint32(sys.ARM) + ARM64 = uint32(sys.ARM64) + I386 = uint32(sys.I386) + LOONG64 = uint32(sys.Loong64) + MIPS = uint32(sys.MIPS) + MIPS64 = uint32(sys.MIPS64) + PPC64 = uint32(sys.PPC64) + RISCV64 = uint32(sys.RISCV64) + S390X = uint32(sys.S390X) + ) + + switch uint32(arch.Family) | elftype<<16 { + default: + return 0, 0, fmt.Errorf("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) + + case MIPS | uint32(elf.R_MIPS_HI16)<<16, + MIPS | uint32(elf.R_MIPS_LO16)<<16, + MIPS | uint32(elf.R_MIPS_GOT16)<<16, + MIPS | uint32(elf.R_MIPS_GOT_HI16)<<16, + MIPS | uint32(elf.R_MIPS_GOT_LO16)<<16, + MIPS | uint32(elf.R_MIPS_GPREL16)<<16, + MIPS | uint32(elf.R_MIPS_GOT_PAGE)<<16, + MIPS | uint32(elf.R_MIPS_JALR)<<16, + MIPS | uint32(elf.R_MIPS_GOT_OFST)<<16, + MIPS64 | uint32(elf.R_MIPS_HI16)<<16, + MIPS64 | uint32(elf.R_MIPS_LO16)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT16)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_HI16)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_LO16)<<16, + MIPS64 | uint32(elf.R_MIPS_GPREL16)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_PAGE)<<16, + MIPS64 | uint32(elf.R_MIPS_JALR)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_OFST)<<16, + MIPS64 | uint32(elf.R_MIPS_CALL16)<<16, + MIPS64 | uint32(elf.R_MIPS_GPREL32)<<16, + MIPS64 | uint32(elf.R_MIPS_64)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16, + MIPS64 | uint32(elf.R_MIPS_PC32)<<16: + return 4, 4, nil + + case LOONG64 | uint32(elf.R_LARCH_ADD8)<<16, + LOONG64 | uint32(elf.R_LARCH_SUB8)<<16: + return 1, 1, nil + + case LOONG64 | uint32(elf.R_LARCH_ADD16)<<16, + LOONG64 | uint32(elf.R_LARCH_SUB16)<<16: + return 2, 2, nil + + case LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_PCREL)<<16, + LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_GPREL)<<16, + LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_ABSOLUTE)<<16, + LOONG64 | uint32(elf.R_LARCH_MARK_LA)<<16, + LOONG64 | uint32(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)<<16, + LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16, + LOONG64 | uint32(elf.R_LARCH_ADD24)<<16, + LOONG64 | uint32(elf.R_LARCH_ADD32)<<16, + LOONG64 | uint32(elf.R_LARCH_SUB24)<<16, + LOONG64 | uint32(elf.R_LARCH_SUB32)<<16, + LOONG64 | uint32(elf.R_LARCH_B26)<<16, + LOONG64 | uint32(elf.R_LARCH_32_PCREL)<<16: + return 4, 4, nil + + case LOONG64 | uint32(elf.R_LARCH_64)<<16, + LOONG64 | uint32(elf.R_LARCH_ADD64)<<16, + LOONG64 | uint32(elf.R_LARCH_SUB64)<<16, + LOONG64 | uint32(elf.R_LARCH_64_PCREL)<<16: + return 8, 8, nil + + case S390X | uint32(elf.R_390_8)<<16: + return 1, 1, nil + + case PPC64 | uint32(elf.R_PPC64_TOC16)<<16, + S390X | uint32(elf.R_390_16)<<16, + S390X | uint32(elf.R_390_GOT16)<<16, + S390X | uint32(elf.R_390_PC16)<<16, + S390X | uint32(elf.R_390_PC16DBL)<<16, + S390X | uint32(elf.R_390_PLT16DBL)<<16: + return 2, 2, nil + + case ARM | uint32(elf.R_ARM_ABS32)<<16, + ARM | uint32(elf.R_ARM_GOT32)<<16, + ARM | uint32(elf.R_ARM_PLT32)<<16, + ARM | uint32(elf.R_ARM_GOTOFF)<<16, + ARM | uint32(elf.R_ARM_GOTPC)<<16, + ARM | uint32(elf.R_ARM_THM_PC22)<<16, + ARM | uint32(elf.R_ARM_REL32)<<16, + ARM | uint32(elf.R_ARM_CALL)<<16, + ARM | uint32(elf.R_ARM_V4BX)<<16, + ARM | uint32(elf.R_ARM_GOT_PREL)<<16, + ARM | uint32(elf.R_ARM_PC24)<<16, + ARM | uint32(elf.R_ARM_JUMP24)<<16, + ARM64 | uint32(elf.R_AARCH64_CALL26)<<16, + ARM64 | uint32(elf.R_AARCH64_ADR_GOT_PAGE)<<16, + ARM64 | uint32(elf.R_AARCH64_LD64_GOT_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_ADR_PREL_PG_HI21)<<16, + ARM64 | uint32(elf.R_AARCH64_ADD_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_LDST8_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_LDST16_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_LDST32_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_LDST64_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_LDST128_ABS_LO12_NC)<<16, + ARM64 | uint32(elf.R_AARCH64_PREL32)<<16, + ARM64 | uint32(elf.R_AARCH64_JUMP26)<<16, + AMD64 | uint32(elf.R_X86_64_PC32)<<16, + AMD64 | uint32(elf.R_X86_64_PLT32)<<16, + AMD64 | uint32(elf.R_X86_64_GOTPCREL)<<16, + AMD64 | uint32(elf.R_X86_64_GOTPCRELX)<<16, + AMD64 | uint32(elf.R_X86_64_REX_GOTPCRELX)<<16, + I386 | uint32(elf.R_386_32)<<16, + I386 | uint32(elf.R_386_PC32)<<16, + I386 | uint32(elf.R_386_GOT32)<<16, + I386 | uint32(elf.R_386_PLT32)<<16, + I386 | uint32(elf.R_386_GOTOFF)<<16, + I386 | uint32(elf.R_386_GOTPC)<<16, + I386 | uint32(elf.R_386_GOT32X)<<16, + PPC64 | uint32(elf.R_PPC64_REL24)<<16, + PPC64 | uint32(elf.R_PPC64_REL24_NOTOC)<<16, + PPC64 | uint32(elf.R_PPC64_REL24_P9NOTOC)<<16, + PPC64 | uint32(elf.R_PPC_REL32)<<16, + S390X | uint32(elf.R_390_32)<<16, + S390X | uint32(elf.R_390_PC32)<<16, + S390X | uint32(elf.R_390_GOT32)<<16, + S390X | uint32(elf.R_390_PLT32)<<16, + S390X | uint32(elf.R_390_PC32DBL)<<16, + S390X | uint32(elf.R_390_PLT32DBL)<<16, + S390X | uint32(elf.R_390_GOTPCDBL)<<16, + S390X | uint32(elf.R_390_GOTENT)<<16: + return 4, 4, nil + + case AMD64 | uint32(elf.R_X86_64_64)<<16, + AMD64 | uint32(elf.R_X86_64_PC64)<<16, + ARM64 | uint32(elf.R_AARCH64_ABS64)<<16, + ARM64 | uint32(elf.R_AARCH64_PREL64)<<16, + PPC64 | uint32(elf.R_PPC64_ADDR64)<<16, + PPC64 | uint32(elf.R_PPC64_PCREL34)<<16, + PPC64 | uint32(elf.R_PPC64_GOT_PCREL34)<<16, + PPC64 | uint32(elf.R_PPC64_PLT_PCREL34_NOTOC)<<16, + S390X | uint32(elf.R_390_GLOB_DAT)<<16, + S390X | uint32(elf.R_390_RELATIVE)<<16, + S390X | uint32(elf.R_390_GOTOFF)<<16, + S390X | uint32(elf.R_390_GOTPC)<<16, + S390X | uint32(elf.R_390_64)<<16, + S390X | uint32(elf.R_390_PC64)<<16, + S390X | uint32(elf.R_390_GOT64)<<16, + S390X | uint32(elf.R_390_PLT64)<<16: + return 8, 8, nil + + case RISCV64 | uint32(elf.R_RISCV_SET6)<<16, + RISCV64 | uint32(elf.R_RISCV_SUB6)<<16, + RISCV64 | uint32(elf.R_RISCV_SET8)<<16, + RISCV64 | uint32(elf.R_RISCV_SUB8)<<16: + return 1, 1, nil + + case RISCV64 | uint32(elf.R_RISCV_RVC_BRANCH)<<16, + RISCV64 | uint32(elf.R_RISCV_RVC_JUMP)<<16, + RISCV64 | uint32(elf.R_RISCV_SET16)<<16, + RISCV64 | uint32(elf.R_RISCV_SUB16)<<16: + return 2, 2, nil + + case RISCV64 | uint32(elf.R_RISCV_32)<<16, + RISCV64 | uint32(elf.R_RISCV_BRANCH)<<16, + RISCV64 | uint32(elf.R_RISCV_HI20)<<16, + RISCV64 | uint32(elf.R_RISCV_LO12_I)<<16, + RISCV64 | uint32(elf.R_RISCV_LO12_S)<<16, + RISCV64 | uint32(elf.R_RISCV_GOT_HI20)<<16, + RISCV64 | uint32(elf.R_RISCV_PCREL_HI20)<<16, + RISCV64 | uint32(elf.R_RISCV_PCREL_LO12_I)<<16, + RISCV64 | uint32(elf.R_RISCV_PCREL_LO12_S)<<16, + RISCV64 | uint32(elf.R_RISCV_ADD32)<<16, + RISCV64 | uint32(elf.R_RISCV_SET32)<<16, + RISCV64 | uint32(elf.R_RISCV_SUB32)<<16, + RISCV64 | uint32(elf.R_RISCV_32_PCREL)<<16, + RISCV64 | uint32(elf.R_RISCV_RELAX)<<16: + return 4, 4, nil + + case RISCV64 | uint32(elf.R_RISCV_64)<<16, + RISCV64 | uint32(elf.R_RISCV_CALL)<<16, + RISCV64 | uint32(elf.R_RISCV_CALL_PLT)<<16: + return 8, 8, nil + + case PPC64 | uint32(elf.R_PPC64_TOC16_LO)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_HI)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_HA)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_DS)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_LO_DS)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_LO)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_HI)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_HA)<<16, + PPC64 | uint32(elf.R_PPC64_PLT16_HA)<<16, + PPC64 | uint32(elf.R_PPC64_PLT16_LO_DS)<<16: + return 2, 4, nil + + // PPC64 inline PLT sequence hint relocations (-fno-plt) + // These are informational annotations to assist linker optimizations. + case PPC64 | uint32(elf.R_PPC64_PLTSEQ)<<16, + PPC64 | uint32(elf.R_PPC64_PLTCALL)<<16, + PPC64 | uint32(elf.R_PPC64_PLTCALL_NOTOC)<<16, + PPC64 | uint32(elf.R_PPC64_PLTSEQ_NOTOC)<<16: + return 0, 0, nil + + } +} + +func cstring(x []byte) string { + i := bytes.IndexByte(x, '\x00') + if i >= 0 { + x = x[:i] + } + return string(x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader.go new file mode 100644 index 0000000000000000000000000000000000000000..3edb5e2f6f16474d8f7db96dbcf40bc6088eb996 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader.go @@ -0,0 +1,2670 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "bytes" + "cmd/internal/bio" + "cmd/internal/goobj" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/sym" + "debug/elf" + "fmt" + "internal/abi" + "io" + "log" + "math/bits" + "os" + "sort" + "strings" +) + +var _ = fmt.Print + +// Sym encapsulates a global symbol index, used to identify a specific +// Go symbol. The 0-valued Sym is corresponds to an invalid symbol. +type Sym = sym.LoaderSym + +// Relocs encapsulates the set of relocations on a given symbol; an +// instance of this type is returned by the Loader Relocs() method. +type Relocs struct { + rs []goobj.Reloc + + li uint32 // local index of symbol whose relocs we're examining + r *oReader // object reader for containing package + l *Loader // loader +} + +// ExtReloc contains the payload for an external relocation. +type ExtReloc struct { + Xsym Sym + Xadd int64 + Type objabi.RelocType + Size uint8 +} + +// Reloc holds a "handle" to access a relocation record from an +// object file. +type Reloc struct { + *goobj.Reloc + r *oReader + l *Loader +} + +func (rel Reloc) Type() objabi.RelocType { return objabi.RelocType(rel.Reloc.Type()) &^ objabi.R_WEAK } +func (rel Reloc) Weak() bool { return objabi.RelocType(rel.Reloc.Type())&objabi.R_WEAK != 0 } +func (rel Reloc) SetType(t objabi.RelocType) { rel.Reloc.SetType(uint16(t)) } +func (rel Reloc) Sym() Sym { return rel.l.resolve(rel.r, rel.Reloc.Sym()) } +func (rel Reloc) SetSym(s Sym) { rel.Reloc.SetSym(goobj.SymRef{PkgIdx: 0, SymIdx: uint32(s)}) } +func (rel Reloc) IsMarker() bool { return rel.Siz() == 0 } + +// Aux holds a "handle" to access an aux symbol record from an +// object file. +type Aux struct { + *goobj.Aux + r *oReader + l *Loader +} + +func (a Aux) Sym() Sym { return a.l.resolve(a.r, a.Aux.Sym()) } + +// oReader is a wrapper type of obj.Reader, along with some +// extra information. +type oReader struct { + *goobj.Reader + unit *sym.CompilationUnit + version int // version of static symbol + pkgprefix string + syms []Sym // Sym's global index, indexed by local index + pkg []uint32 // indices of referenced package by PkgIdx (index into loader.objs array) + ndef int // cache goobj.Reader.NSym() + nhashed64def int // cache goobj.Reader.NHashed64Def() + nhasheddef int // cache goobj.Reader.NHashedDef() + objidx uint32 // index of this reader in the objs slice +} + +// Total number of defined symbols (package symbols, hashed symbols, and +// non-package symbols). +func (r *oReader) NAlldef() int { return r.ndef + r.nhashed64def + r.nhasheddef + r.NNonpkgdef() } + +type objIdx struct { + r *oReader + i Sym // start index +} + +// objSym represents a symbol in an object file. It is a tuple of +// the object and the symbol's local index. +// For external symbols, objidx is the index of l.extReader (extObj), +// s is its index into the payload array. +// {0, 0} represents the nil symbol. +type objSym struct { + objidx uint32 // index of the object (in l.objs array) + s uint32 // local index +} + +type nameVer struct { + name string + v int +} + +type Bitmap []uint32 + +// set the i-th bit. +func (bm Bitmap) Set(i Sym) { + n, r := uint(i)/32, uint(i)%32 + bm[n] |= 1 << r +} + +// unset the i-th bit. +func (bm Bitmap) Unset(i Sym) { + n, r := uint(i)/32, uint(i)%32 + bm[n] &^= (1 << r) +} + +// whether the i-th bit is set. +func (bm Bitmap) Has(i Sym) bool { + n, r := uint(i)/32, uint(i)%32 + return bm[n]&(1< curLen { + b = append(b, MakeBitmap(reqLen+1-curLen)...) + } + return b +} + +type symAndSize struct { + sym Sym + size uint32 +} + +// A Loader loads new object files and resolves indexed symbol references. +// +// Notes on the layout of global symbol index space: +// +// - Go object files are read before host object files; each Go object +// read adds its defined package symbols to the global index space. +// Nonpackage symbols are not yet added. +// +// - In loader.LoadNonpkgSyms, add non-package defined symbols and +// references in all object files to the global index space. +// +// - Host object file loading happens; the host object loader does a +// name/version lookup for each symbol it finds; this can wind up +// extending the external symbol index space range. The host object +// loader stores symbol payloads in loader.payloads using SymbolBuilder. +// +// - Each symbol gets a unique global index. For duplicated and +// overwriting/overwritten symbols, the second (or later) appearance +// of the symbol gets the same global index as the first appearance. +type Loader struct { + start map[*oReader]Sym // map from object file to its start index + objs []objIdx // sorted by start index (i.e. objIdx.i) + extStart Sym // from this index on, the symbols are externally defined + builtinSyms []Sym // global index of builtin symbols + + objSyms []objSym // global index mapping to local index + + symsByName [2]map[string]Sym // map symbol name to index, two maps are for ABI0 and ABIInternal + extStaticSyms map[nameVer]Sym // externally defined static symbols, keyed by name + + extReader *oReader // a dummy oReader, for external symbols + payloadBatch []extSymPayload + payloads []*extSymPayload // contents of linker-materialized external syms + values []int64 // symbol values, indexed by global sym index + + sects []*sym.Section // sections + symSects []uint16 // symbol's section, index to sects array + + align []uint8 // symbol 2^N alignment, indexed by global index + + deferReturnTramp map[Sym]bool // whether the symbol is a trampoline of a deferreturn call + + objByPkg map[string]uint32 // map package path to the index of its Go object reader + + anonVersion int // most recently assigned ext static sym pseudo-version + + // Bitmaps and other side structures used to store data used to store + // symbol flags/attributes; these are to be accessed via the + // corresponding loader "AttrXXX" and "SetAttrXXX" methods. Please + // visit the comments on these methods for more details on the + // semantics / interpretation of the specific flags or attribute. + attrReachable Bitmap // reachable symbols, indexed by global index + attrOnList Bitmap // "on list" symbols, indexed by global index + attrLocal Bitmap // "local" symbols, indexed by global index + attrNotInSymbolTable Bitmap // "not in symtab" symbols, indexed by global idx + attrUsedInIface Bitmap // "used in interface" symbols, indexed by global idx + attrSpecial Bitmap // "special" frame symbols, indexed by global idx + attrVisibilityHidden Bitmap // hidden symbols, indexed by ext sym index + attrDuplicateOK Bitmap // dupOK symbols, indexed by ext sym index + attrShared Bitmap // shared symbols, indexed by ext sym index + attrExternal Bitmap // external symbols, indexed by ext sym index + generatedSyms Bitmap // symbols that generate their content, indexed by ext sym idx + + attrReadOnly map[Sym]bool // readonly data for this sym + attrCgoExportDynamic map[Sym]struct{} // "cgo_export_dynamic" symbols + attrCgoExportStatic map[Sym]struct{} // "cgo_export_static" symbols + + // Outer and Sub relations for symbols. + outer []Sym // indexed by global index + sub map[Sym]Sym + + dynimplib map[Sym]string // stores Dynimplib symbol attribute + dynimpvers map[Sym]string // stores Dynimpvers symbol attribute + localentry map[Sym]uint8 // stores Localentry symbol attribute + extname map[Sym]string // stores Extname symbol attribute + elfType map[Sym]elf.SymType // stores elf type symbol property + elfSym map[Sym]int32 // stores elf sym symbol property + localElfSym map[Sym]int32 // stores "local" elf sym symbol property + symPkg map[Sym]string // stores package for symbol, or library for shlib-derived syms + plt map[Sym]int32 // stores dynimport for pe objects + got map[Sym]int32 // stores got for pe objects + dynid map[Sym]int32 // stores Dynid for symbol + + relocVariant map[relocId]sym.RelocVariant // stores variant relocs + + // Used to implement field tracking; created during deadcode if + // field tracking is enabled. Reachparent[K] contains the index of + // the symbol that triggered the marking of symbol K as live. + Reachparent []Sym + + // CgoExports records cgo-exported symbols by SymName. + CgoExports map[string]Sym + + flags uint32 + + strictDupMsgs int // number of strict-dup warning/errors, when FlagStrictDups is enabled + + errorReporter *ErrorReporter + + npkgsyms int // number of package symbols, for accounting + nhashedsyms int // number of hashed symbols, for accounting +} + +const ( + pkgDef = iota + hashed64Def + hashedDef + nonPkgDef + nonPkgRef +) + +// objidx +const ( + nilObj = iota + extObj + goObjStart +) + +// extSymPayload holds the payload (data + relocations) for linker-synthesized +// external symbols (note that symbol value is stored in a separate slice). +type extSymPayload struct { + name string // TODO: would this be better as offset into str table? + size int64 + ver int + kind sym.SymKind + objidx uint32 // index of original object if sym made by cloneToExternal + relocs []goobj.Reloc + data []byte + auxs []goobj.Aux +} + +const ( + // Loader.flags + FlagStrictDups = 1 << iota +) + +func NewLoader(flags uint32, reporter *ErrorReporter) *Loader { + nbuiltin := goobj.NBuiltin() + extReader := &oReader{objidx: extObj} + ldr := &Loader{ + start: make(map[*oReader]Sym), + objs: []objIdx{{}, {extReader, 0}}, // reserve index 0 for nil symbol, 1 for external symbols + objSyms: make([]objSym, 1, 1), // This will get overwritten later. + extReader: extReader, + symsByName: [2]map[string]Sym{make(map[string]Sym, 80000), make(map[string]Sym, 50000)}, // preallocate ~2MB for ABI0 and ~1MB for ABI1 symbols + objByPkg: make(map[string]uint32), + sub: make(map[Sym]Sym), + dynimplib: make(map[Sym]string), + dynimpvers: make(map[Sym]string), + localentry: make(map[Sym]uint8), + extname: make(map[Sym]string), + attrReadOnly: make(map[Sym]bool), + elfType: make(map[Sym]elf.SymType), + elfSym: make(map[Sym]int32), + localElfSym: make(map[Sym]int32), + symPkg: make(map[Sym]string), + plt: make(map[Sym]int32), + got: make(map[Sym]int32), + dynid: make(map[Sym]int32), + attrCgoExportDynamic: make(map[Sym]struct{}), + attrCgoExportStatic: make(map[Sym]struct{}), + deferReturnTramp: make(map[Sym]bool), + extStaticSyms: make(map[nameVer]Sym), + builtinSyms: make([]Sym, nbuiltin), + flags: flags, + errorReporter: reporter, + sects: []*sym.Section{nil}, // reserve index 0 for nil section + } + reporter.ldr = ldr + return ldr +} + +// Add object file r, return the start index. +func (l *Loader) addObj(pkg string, r *oReader) Sym { + if _, ok := l.start[r]; ok { + panic("already added") + } + pkg = objabi.PathToPrefix(pkg) // the object file contains escaped package path + if _, ok := l.objByPkg[pkg]; !ok { + l.objByPkg[pkg] = r.objidx + } + i := Sym(len(l.objSyms)) + l.start[r] = i + l.objs = append(l.objs, objIdx{r, i}) + return i +} + +// Add a symbol from an object file, return the global index. +// If the symbol already exist, it returns the index of that symbol. +func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind int, osym *goobj.Sym) Sym { + l := st.l + if l.extStart != 0 { + panic("addSym called after external symbol is created") + } + i := Sym(len(l.objSyms)) + if int(i) != len(l.objSyms) { // overflow + panic("too many symbols") + } + addToGlobal := func() { + l.objSyms = append(l.objSyms, objSym{r.objidx, li}) + } + if name == "" && kind != hashed64Def && kind != hashedDef { + addToGlobal() + return i // unnamed aux symbol + } + if ver == r.version { + // Static symbol. Add its global index but don't + // add to name lookup table, as it cannot be + // referenced by name. + addToGlobal() + return i + } + switch kind { + case pkgDef: + // Defined package symbols cannot be dup to each other. + // We load all the package symbols first, so we don't need + // to check dup here. + // We still add it to the lookup table, as it may still be + // referenced by name (e.g. through linkname). + l.symsByName[ver][name] = i + addToGlobal() + return i + case hashed64Def, hashedDef: + // Hashed (content-addressable) symbol. Check the hash + // but don't add to name lookup table, as they are not + // referenced by name. Also no need to do overwriting + // check, as same hash indicates same content. + var checkHash func() (symAndSize, bool) + var addToHashMap func(symAndSize) + var h64 uint64 // only used for hashed64Def + var h *goobj.HashType // only used for hashedDef + if kind == hashed64Def { + checkHash = func() (symAndSize, bool) { + h64 = r.Hash64(li - uint32(r.ndef)) + s, existed := st.hashed64Syms[h64] + return s, existed + } + addToHashMap = func(ss symAndSize) { st.hashed64Syms[h64] = ss } + } else { + checkHash = func() (symAndSize, bool) { + h = r.Hash(li - uint32(r.ndef+r.nhashed64def)) + s, existed := st.hashedSyms[*h] + return s, existed + } + addToHashMap = func(ss symAndSize) { st.hashedSyms[*h] = ss } + } + siz := osym.Siz() + if s, existed := checkHash(); existed { + // The content hash is built from symbol data and relocations. In the + // object file, the symbol data may not always contain trailing zeros, + // e.g. for [5]int{1,2,3} and [100]int{1,2,3}, the data is same + // (although the size is different). + // Also, for short symbols, the content hash is the identity function of + // the 8 bytes, and trailing zeros doesn't change the hash value, e.g. + // hash("A") == hash("A\0\0\0"). + // So when two symbols have the same hash, we need to use the one with + // larger size. + if siz > s.size { + // New symbol has larger size, use the new one. Rewrite the index mapping. + l.objSyms[s.sym] = objSym{r.objidx, li} + addToHashMap(symAndSize{s.sym, siz}) + } + return s.sym + } + addToHashMap(symAndSize{i, siz}) + addToGlobal() + return i + } + + // Non-package (named) symbol. Check if it already exists. + oldi, existed := l.symsByName[ver][name] + if !existed { + l.symsByName[ver][name] = i + addToGlobal() + return i + } + // symbol already exists + if osym.Dupok() { + if l.flags&FlagStrictDups != 0 { + l.checkdup(name, r, li, oldi) + } + // Fix for issue #47185 -- given two dupok symbols with + // different sizes, favor symbol with larger size. See + // also issue #46653. + szdup := l.SymSize(oldi) + sz := int64(r.Sym(li).Siz()) + if szdup < sz { + // new symbol overwrites old symbol. + l.objSyms[oldi] = objSym{r.objidx, li} + } + return oldi + } + oldr, oldli := l.toLocal(oldi) + oldsym := oldr.Sym(oldli) + if oldsym.Dupok() { + return oldi + } + overwrite := r.DataSize(li) != 0 + if overwrite { + // new symbol overwrites old symbol. + oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] + if !(oldtyp.IsData() && oldr.DataSize(oldli) == 0) { + log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) + } + l.objSyms[oldi] = objSym{r.objidx, li} + } else { + // old symbol overwrites new symbol. + typ := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] + if !typ.IsData() { // only allow overwriting data symbol + log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) + } + } + return oldi +} + +// newExtSym creates a new external sym with the specified +// name/version. +func (l *Loader) newExtSym(name string, ver int) Sym { + i := Sym(len(l.objSyms)) + if int(i) != len(l.objSyms) { // overflow + panic("too many symbols") + } + if l.extStart == 0 { + l.extStart = i + } + l.growValues(int(i) + 1) + l.growOuter(int(i) + 1) + l.growAttrBitmaps(int(i) + 1) + pi := l.newPayload(name, ver) + l.objSyms = append(l.objSyms, objSym{l.extReader.objidx, uint32(pi)}) + l.extReader.syms = append(l.extReader.syms, i) + return i +} + +// LookupOrCreateSym looks up the symbol with the specified name/version, +// returning its Sym index if found. If the lookup fails, a new external +// Sym will be created, entered into the lookup tables, and returned. +func (l *Loader) LookupOrCreateSym(name string, ver int) Sym { + i := l.Lookup(name, ver) + if i != 0 { + return i + } + i = l.newExtSym(name, ver) + static := ver >= sym.SymVerStatic || ver < 0 + if static { + l.extStaticSyms[nameVer{name, ver}] = i + } else { + l.symsByName[ver][name] = i + } + return i +} + +// AddCgoExport records a cgo-exported symbol in l.CgoExports. +// This table is used to identify the correct Go symbol ABI to use +// to resolve references from host objects (which don't have ABIs). +func (l *Loader) AddCgoExport(s Sym) { + if l.CgoExports == nil { + l.CgoExports = make(map[string]Sym) + } + l.CgoExports[l.SymName(s)] = s +} + +// LookupOrCreateCgoExport is like LookupOrCreateSym, but if ver +// indicates a global symbol, it uses the CgoExport table to determine +// the appropriate symbol version (ABI) to use. ver must be either 0 +// or a static symbol version. +func (l *Loader) LookupOrCreateCgoExport(name string, ver int) Sym { + if ver >= sym.SymVerStatic { + return l.LookupOrCreateSym(name, ver) + } + if ver != 0 { + panic("ver must be 0 or a static version") + } + // Look for a cgo-exported symbol from Go. + if s, ok := l.CgoExports[name]; ok { + return s + } + // Otherwise, this must just be a symbol in the host object. + // Create a version 0 symbol for it. + return l.LookupOrCreateSym(name, 0) +} + +func (l *Loader) IsExternal(i Sym) bool { + r, _ := l.toLocal(i) + return l.isExtReader(r) +} + +func (l *Loader) isExtReader(r *oReader) bool { + return r == l.extReader +} + +// For external symbol, return its index in the payloads array. +// XXX result is actually not a global index. We (ab)use the Sym type +// so we don't need conversion for accessing bitmaps. +func (l *Loader) extIndex(i Sym) Sym { + _, li := l.toLocal(i) + return Sym(li) +} + +// Get a new payload for external symbol, return its index in +// the payloads array. +func (l *Loader) newPayload(name string, ver int) int { + pi := len(l.payloads) + pp := l.allocPayload() + pp.name = name + pp.ver = ver + l.payloads = append(l.payloads, pp) + l.growExtAttrBitmaps() + return pi +} + +// getPayload returns a pointer to the extSymPayload struct for an +// external symbol if the symbol has a payload. Will panic if the +// symbol in question is bogus (zero or not an external sym). +func (l *Loader) getPayload(i Sym) *extSymPayload { + if !l.IsExternal(i) { + panic(fmt.Sprintf("bogus symbol index %d in getPayload", i)) + } + pi := l.extIndex(i) + return l.payloads[pi] +} + +// allocPayload allocates a new payload. +func (l *Loader) allocPayload() *extSymPayload { + batch := l.payloadBatch + if len(batch) == 0 { + batch = make([]extSymPayload, 1000) + } + p := &batch[0] + l.payloadBatch = batch[1:] + return p +} + +func (ms *extSymPayload) Grow(siz int64) { + if int64(int(siz)) != siz { + log.Fatalf("symgrow size %d too long", siz) + } + if int64(len(ms.data)) >= siz { + return + } + if cap(ms.data) < int(siz) { + cl := len(ms.data) + ms.data = append(ms.data, make([]byte, int(siz)+1-cl)...) + ms.data = ms.data[0:cl] + } + ms.data = ms.data[:siz] +} + +// Convert a local index to a global index. +func (l *Loader) toGlobal(r *oReader, i uint32) Sym { + return r.syms[i] +} + +// Convert a global index to a local index. +func (l *Loader) toLocal(i Sym) (*oReader, uint32) { + return l.objs[l.objSyms[i].objidx].r, l.objSyms[i].s +} + +// Resolve a local symbol reference. Return global index. +func (l *Loader) resolve(r *oReader, s goobj.SymRef) Sym { + var rr *oReader + switch p := s.PkgIdx; p { + case goobj.PkgIdxInvalid: + // {0, X} with non-zero X is never a valid sym reference from a Go object. + // We steal this space for symbol references from external objects. + // In this case, X is just the global index. + if l.isExtReader(r) { + return Sym(s.SymIdx) + } + if s.SymIdx != 0 { + panic("bad sym ref") + } + return 0 + case goobj.PkgIdxHashed64: + i := int(s.SymIdx) + r.ndef + return r.syms[i] + case goobj.PkgIdxHashed: + i := int(s.SymIdx) + r.ndef + r.nhashed64def + return r.syms[i] + case goobj.PkgIdxNone: + i := int(s.SymIdx) + r.ndef + r.nhashed64def + r.nhasheddef + return r.syms[i] + case goobj.PkgIdxBuiltin: + if bi := l.builtinSyms[s.SymIdx]; bi != 0 { + return bi + } + l.reportMissingBuiltin(int(s.SymIdx), r.unit.Lib.Pkg) + return 0 + case goobj.PkgIdxSelf: + rr = r + default: + rr = l.objs[r.pkg[p]].r + } + return l.toGlobal(rr, s.SymIdx) +} + +// reportMissingBuiltin issues an error in the case where we have a +// relocation against a runtime builtin whose definition is not found +// when the runtime package is built. The canonical example is +// "runtime.racefuncenter" -- currently if you do something like +// +// go build -gcflags=-race myprogram.go +// +// the compiler will insert calls to the builtin runtime.racefuncenter, +// but the version of the runtime used for linkage won't actually contain +// definitions of that symbol. See issue #42396 for details. +// +// As currently implemented, this is a fatal error. This has drawbacks +// in that if there are multiple missing builtins, the error will only +// cite the first one. On the plus side, terminating the link here has +// advantages in that we won't run the risk of panics or crashes later +// on in the linker due to R_CALL relocations with 0-valued target +// symbols. +func (l *Loader) reportMissingBuiltin(bsym int, reflib string) { + bname, _ := goobj.BuiltinName(bsym) + log.Fatalf("reference to undefined builtin %q from package %q", + bname, reflib) +} + +// Look up a symbol by name, return global index, or 0 if not found. +// This is more like Syms.ROLookup than Lookup -- it doesn't create +// new symbol. +func (l *Loader) Lookup(name string, ver int) Sym { + if ver >= sym.SymVerStatic || ver < 0 { + return l.extStaticSyms[nameVer{name, ver}] + } + return l.symsByName[ver][name] +} + +// Check that duplicate symbols have same contents. +func (l *Loader) checkdup(name string, r *oReader, li uint32, dup Sym) { + p := r.Data(li) + rdup, ldup := l.toLocal(dup) + pdup := rdup.Data(ldup) + reason := "same length but different contents" + if len(p) != len(pdup) { + reason = fmt.Sprintf("new length %d != old length %d", len(p), len(pdup)) + } else if bytes.Equal(p, pdup) { + // For BSS symbols, we need to check size as well, see issue 46653. + szdup := l.SymSize(dup) + sz := int64(r.Sym(li).Siz()) + if szdup == sz { + return + } + reason = fmt.Sprintf("different sizes: new size %d != old size %d", + sz, szdup) + } + fmt.Fprintf(os.Stderr, "cmd/link: while reading object for '%v': duplicate symbol '%s', previous def at '%v', with mismatched payload: %s\n", r.unit.Lib, name, rdup.unit.Lib, reason) + + // For the moment, allow DWARF subprogram DIEs for + // auto-generated wrapper functions. What seems to happen + // here is that we get different line numbers on formal + // params; I am guessing that the pos is being inherited + // from the spot where the wrapper is needed. + allowed := strings.HasPrefix(name, "go:info.go.interface") || + strings.HasPrefix(name, "go:info.go.builtin") || + strings.HasPrefix(name, "go:debuglines") + if !allowed { + l.strictDupMsgs++ + } +} + +func (l *Loader) NStrictDupMsgs() int { return l.strictDupMsgs } + +// Number of total symbols. +func (l *Loader) NSym() int { + return len(l.objSyms) +} + +// Number of defined Go symbols. +func (l *Loader) NDef() int { + return int(l.extStart) +} + +// Number of reachable symbols. +func (l *Loader) NReachableSym() int { + return l.attrReachable.Count() +} + +// Returns the name of the i-th symbol. +func (l *Loader) SymName(i Sym) string { + if l.IsExternal(i) { + pp := l.getPayload(i) + return pp.name + } + r, li := l.toLocal(i) + if r == nil { + return "?" + } + return r.Sym(li).Name(r.Reader) +} + +// Returns the version of the i-th symbol. +func (l *Loader) SymVersion(i Sym) int { + if l.IsExternal(i) { + pp := l.getPayload(i) + return pp.ver + } + r, li := l.toLocal(i) + return int(abiToVer(r.Sym(li).ABI(), r.version)) +} + +func (l *Loader) IsFileLocal(i Sym) bool { + return l.SymVersion(i) >= sym.SymVerStatic +} + +// IsFromAssembly returns true if this symbol is derived from an +// object file generated by the Go assembler. +func (l *Loader) IsFromAssembly(i Sym) bool { + if l.IsExternal(i) { + return false + } + r, _ := l.toLocal(i) + return r.FromAssembly() +} + +// Returns the type of the i-th symbol. +func (l *Loader) SymType(i Sym) sym.SymKind { + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp != nil { + return pp.kind + } + return 0 + } + r, li := l.toLocal(i) + return sym.AbiSymKindToSymKind[objabi.SymKind(r.Sym(li).Type())] +} + +// Returns the attributes of the i-th symbol. +func (l *Loader) SymAttr(i Sym) uint8 { + if l.IsExternal(i) { + // TODO: do something? External symbols have different representation of attributes. + // For now, ReflectMethod, NoSplit, GoType, and Typelink are used and they cannot be + // set by external symbol. + return 0 + } + r, li := l.toLocal(i) + return r.Sym(li).Flag() +} + +// Returns the size of the i-th symbol. +func (l *Loader) SymSize(i Sym) int64 { + if l.IsExternal(i) { + pp := l.getPayload(i) + return pp.size + } + r, li := l.toLocal(i) + return int64(r.Sym(li).Siz()) +} + +// AttrReachable returns true for symbols that are transitively +// referenced from the entry points. Unreachable symbols are not +// written to the output. +func (l *Loader) AttrReachable(i Sym) bool { + return l.attrReachable.Has(i) +} + +// SetAttrReachable sets the reachability property for a symbol (see +// AttrReachable). +func (l *Loader) SetAttrReachable(i Sym, v bool) { + if v { + l.attrReachable.Set(i) + } else { + l.attrReachable.Unset(i) + } +} + +// AttrOnList returns true for symbols that are on some list (such as +// the list of all text symbols, or one of the lists of data symbols) +// and is consulted to avoid bugs where a symbol is put on a list +// twice. +func (l *Loader) AttrOnList(i Sym) bool { + return l.attrOnList.Has(i) +} + +// SetAttrOnList sets the "on list" property for a symbol (see +// AttrOnList). +func (l *Loader) SetAttrOnList(i Sym, v bool) { + if v { + l.attrOnList.Set(i) + } else { + l.attrOnList.Unset(i) + } +} + +// AttrLocal returns true for symbols that are only visible within the +// module (executable or shared library) being linked. This attribute +// is applied to thunks and certain other linker-generated symbols. +func (l *Loader) AttrLocal(i Sym) bool { + return l.attrLocal.Has(i) +} + +// SetAttrLocal the "local" property for a symbol (see AttrLocal above). +func (l *Loader) SetAttrLocal(i Sym, v bool) { + if v { + l.attrLocal.Set(i) + } else { + l.attrLocal.Unset(i) + } +} + +// AttrUsedInIface returns true for a type symbol that is used in +// an interface. +func (l *Loader) AttrUsedInIface(i Sym) bool { + return l.attrUsedInIface.Has(i) +} + +func (l *Loader) SetAttrUsedInIface(i Sym, v bool) { + if v { + l.attrUsedInIface.Set(i) + } else { + l.attrUsedInIface.Unset(i) + } +} + +// SymAddr checks that a symbol is reachable, and returns its value. +func (l *Loader) SymAddr(i Sym) int64 { + if !l.AttrReachable(i) { + panic("unreachable symbol in symaddr") + } + return l.values[i] +} + +// AttrNotInSymbolTable returns true for symbols that should not be +// added to the symbol table of the final generated load module. +func (l *Loader) AttrNotInSymbolTable(i Sym) bool { + return l.attrNotInSymbolTable.Has(i) +} + +// SetAttrNotInSymbolTable the "not in symtab" property for a symbol +// (see AttrNotInSymbolTable above). +func (l *Loader) SetAttrNotInSymbolTable(i Sym, v bool) { + if v { + l.attrNotInSymbolTable.Set(i) + } else { + l.attrNotInSymbolTable.Unset(i) + } +} + +// AttrVisibilityHidden symbols returns true for ELF symbols with +// visibility set to STV_HIDDEN. They become local symbols in +// the final executable. Only relevant when internally linking +// on an ELF platform. +func (l *Loader) AttrVisibilityHidden(i Sym) bool { + if !l.IsExternal(i) { + return false + } + return l.attrVisibilityHidden.Has(l.extIndex(i)) +} + +// SetAttrVisibilityHidden sets the "hidden visibility" property for a +// symbol (see AttrVisibilityHidden). +func (l *Loader) SetAttrVisibilityHidden(i Sym, v bool) { + if !l.IsExternal(i) { + panic("tried to set visibility attr on non-external symbol") + } + if v { + l.attrVisibilityHidden.Set(l.extIndex(i)) + } else { + l.attrVisibilityHidden.Unset(l.extIndex(i)) + } +} + +// AttrDuplicateOK returns true for a symbol that can be present in +// multiple object files. +func (l *Loader) AttrDuplicateOK(i Sym) bool { + if !l.IsExternal(i) { + // TODO: if this path winds up being taken frequently, it + // might make more sense to copy the flag value out of the object + // into a larger bitmap during preload. + r, li := l.toLocal(i) + return r.Sym(li).Dupok() + } + return l.attrDuplicateOK.Has(l.extIndex(i)) +} + +// SetAttrDuplicateOK sets the "duplicate OK" property for an external +// symbol (see AttrDuplicateOK). +func (l *Loader) SetAttrDuplicateOK(i Sym, v bool) { + if !l.IsExternal(i) { + panic("tried to set dupok attr on non-external symbol") + } + if v { + l.attrDuplicateOK.Set(l.extIndex(i)) + } else { + l.attrDuplicateOK.Unset(l.extIndex(i)) + } +} + +// AttrShared returns true for symbols compiled with the -shared option. +func (l *Loader) AttrShared(i Sym) bool { + if !l.IsExternal(i) { + // TODO: if this path winds up being taken frequently, it + // might make more sense to copy the flag value out of the + // object into a larger bitmap during preload. + r, _ := l.toLocal(i) + return r.Shared() + } + return l.attrShared.Has(l.extIndex(i)) +} + +// SetAttrShared sets the "shared" property for an external +// symbol (see AttrShared). +func (l *Loader) SetAttrShared(i Sym, v bool) { + if !l.IsExternal(i) { + panic(fmt.Sprintf("tried to set shared attr on non-external symbol %d %s", i, l.SymName(i))) + } + if v { + l.attrShared.Set(l.extIndex(i)) + } else { + l.attrShared.Unset(l.extIndex(i)) + } +} + +// AttrExternal returns true for function symbols loaded from host +// object files. +func (l *Loader) AttrExternal(i Sym) bool { + if !l.IsExternal(i) { + return false + } + return l.attrExternal.Has(l.extIndex(i)) +} + +// SetAttrExternal sets the "external" property for a host object +// symbol (see AttrExternal). +func (l *Loader) SetAttrExternal(i Sym, v bool) { + if !l.IsExternal(i) { + panic(fmt.Sprintf("tried to set external attr on non-external symbol %q", l.SymName(i))) + } + if v { + l.attrExternal.Set(l.extIndex(i)) + } else { + l.attrExternal.Unset(l.extIndex(i)) + } +} + +// AttrSpecial returns true for a symbols that do not have their +// address (i.e. Value) computed by the usual mechanism of +// data.go:dodata() & data.go:address(). +func (l *Loader) AttrSpecial(i Sym) bool { + return l.attrSpecial.Has(i) +} + +// SetAttrSpecial sets the "special" property for a symbol (see +// AttrSpecial). +func (l *Loader) SetAttrSpecial(i Sym, v bool) { + if v { + l.attrSpecial.Set(i) + } else { + l.attrSpecial.Unset(i) + } +} + +// AttrCgoExportDynamic returns true for a symbol that has been +// specially marked via the "cgo_export_dynamic" compiler directive +// written by cgo (in response to //export directives in the source). +func (l *Loader) AttrCgoExportDynamic(i Sym) bool { + _, ok := l.attrCgoExportDynamic[i] + return ok +} + +// SetAttrCgoExportDynamic sets the "cgo_export_dynamic" for a symbol +// (see AttrCgoExportDynamic). +func (l *Loader) SetAttrCgoExportDynamic(i Sym, v bool) { + if v { + l.attrCgoExportDynamic[i] = struct{}{} + } else { + delete(l.attrCgoExportDynamic, i) + } +} + +// ForAllCgoExportDynamic calls f for every symbol that has been +// marked with the "cgo_export_dynamic" compiler directive. +func (l *Loader) ForAllCgoExportDynamic(f func(Sym)) { + for s := range l.attrCgoExportDynamic { + f(s) + } +} + +// AttrCgoExportStatic returns true for a symbol that has been +// specially marked via the "cgo_export_static" directive +// written by cgo. +func (l *Loader) AttrCgoExportStatic(i Sym) bool { + _, ok := l.attrCgoExportStatic[i] + return ok +} + +// SetAttrCgoExportStatic sets the "cgo_export_static" for a symbol +// (see AttrCgoExportStatic). +func (l *Loader) SetAttrCgoExportStatic(i Sym, v bool) { + if v { + l.attrCgoExportStatic[i] = struct{}{} + } else { + delete(l.attrCgoExportStatic, i) + } +} + +// IsGeneratedSym returns true if a symbol's been previously marked as a +// generator symbol through the SetIsGeneratedSym. The functions for generator +// symbols are kept in the Link context. +func (l *Loader) IsGeneratedSym(i Sym) bool { + if !l.IsExternal(i) { + return false + } + return l.generatedSyms.Has(l.extIndex(i)) +} + +// SetIsGeneratedSym marks symbols as generated symbols. Data shouldn't be +// stored in generated symbols, and a function is registered and called for +// each of these symbols. +func (l *Loader) SetIsGeneratedSym(i Sym, v bool) { + if !l.IsExternal(i) { + panic("only external symbols can be generated") + } + if v { + l.generatedSyms.Set(l.extIndex(i)) + } else { + l.generatedSyms.Unset(l.extIndex(i)) + } +} + +func (l *Loader) AttrCgoExport(i Sym) bool { + return l.AttrCgoExportDynamic(i) || l.AttrCgoExportStatic(i) +} + +// AttrReadOnly returns true for a symbol whose underlying data +// is stored via a read-only mmap. +func (l *Loader) AttrReadOnly(i Sym) bool { + if v, ok := l.attrReadOnly[i]; ok { + return v + } + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp.objidx != 0 { + return l.objs[pp.objidx].r.ReadOnly() + } + return false + } + r, _ := l.toLocal(i) + return r.ReadOnly() +} + +// SetAttrReadOnly sets the "data is read only" property for a symbol +// (see AttrReadOnly). +func (l *Loader) SetAttrReadOnly(i Sym, v bool) { + l.attrReadOnly[i] = v +} + +// AttrSubSymbol returns true for symbols that are listed as a +// sub-symbol of some other outer symbol. The sub/outer mechanism is +// used when loading host objects (sections from the host object +// become regular linker symbols and symbols go on the Sub list of +// their section) and for constructing the global offset table when +// internally linking a dynamic executable. +// +// Note that in later stages of the linker, we set Outer(S) to some +// container symbol C, but don't set Sub(C). Thus we have two +// distinct scenarios: +// +// - Outer symbol covers the address ranges of its sub-symbols. +// Outer.Sub is set in this case. +// - Outer symbol doesn't cover the address ranges. It is zero-sized +// and doesn't have sub-symbols. In the case, the inner symbol is +// not actually a "SubSymbol". (Tricky!) +// +// This method returns TRUE only for sub-symbols in the first scenario. +// +// FIXME: would be better to do away with this and have a better way +// to represent container symbols. + +func (l *Loader) AttrSubSymbol(i Sym) bool { + // we don't explicitly store this attribute any more -- return + // a value based on the sub-symbol setting. + o := l.OuterSym(i) + if o == 0 { + return false + } + return l.SubSym(o) != 0 +} + +// Note that we don't have a 'SetAttrSubSymbol' method in the loader; +// clients should instead use the AddInteriorSym method to establish +// containment relationships for host object symbols. + +// Returns whether the i-th symbol has ReflectMethod attribute set. +func (l *Loader) IsReflectMethod(i Sym) bool { + return l.SymAttr(i)&goobj.SymFlagReflectMethod != 0 +} + +// Returns whether the i-th symbol is nosplit. +func (l *Loader) IsNoSplit(i Sym) bool { + return l.SymAttr(i)&goobj.SymFlagNoSplit != 0 +} + +// Returns whether this is a Go type symbol. +func (l *Loader) IsGoType(i Sym) bool { + return l.SymAttr(i)&goobj.SymFlagGoType != 0 +} + +// Returns whether this symbol should be included in typelink. +func (l *Loader) IsTypelink(i Sym) bool { + return l.SymAttr(i)&goobj.SymFlagTypelink != 0 +} + +// Returns whether this symbol is an itab symbol. +func (l *Loader) IsItab(i Sym) bool { + if l.IsExternal(i) { + return false + } + r, li := l.toLocal(i) + return r.Sym(li).IsItab() +} + +// Returns whether this symbol is a dictionary symbol. +func (l *Loader) IsDict(i Sym) bool { + if l.IsExternal(i) { + return false + } + r, li := l.toLocal(i) + return r.Sym(li).IsDict() +} + +// Returns whether this symbol is a compiler-generated package init func. +func (l *Loader) IsPkgInit(i Sym) bool { + if l.IsExternal(i) { + return false + } + r, li := l.toLocal(i) + return r.Sym(li).IsPkgInit() +} + +// Return whether this is a trampoline of a deferreturn call. +func (l *Loader) IsDeferReturnTramp(i Sym) bool { + return l.deferReturnTramp[i] +} + +// Set that i is a trampoline of a deferreturn call. +func (l *Loader) SetIsDeferReturnTramp(i Sym, v bool) { + l.deferReturnTramp[i] = v +} + +// growValues grows the slice used to store symbol values. +func (l *Loader) growValues(reqLen int) { + curLen := len(l.values) + if reqLen > curLen { + l.values = append(l.values, make([]int64, reqLen+1-curLen)...) + } +} + +// SymValue returns the value of the i-th symbol. i is global index. +func (l *Loader) SymValue(i Sym) int64 { + return l.values[i] +} + +// SetSymValue sets the value of the i-th symbol. i is global index. +func (l *Loader) SetSymValue(i Sym, val int64) { + l.values[i] = val +} + +// AddToSymValue adds to the value of the i-th symbol. i is the global index. +func (l *Loader) AddToSymValue(i Sym, val int64) { + l.values[i] += val +} + +// Returns the symbol content of the i-th symbol. i is global index. +func (l *Loader) Data(i Sym) []byte { + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp != nil { + return pp.data + } + return nil + } + r, li := l.toLocal(i) + return r.Data(li) +} + +// Returns the symbol content of the i-th symbol as a string. i is global index. +func (l *Loader) DataString(i Sym) string { + if l.IsExternal(i) { + pp := l.getPayload(i) + return string(pp.data) + } + r, li := l.toLocal(i) + return r.DataString(li) +} + +// FreeData clears the symbol data of an external symbol, allowing the memory +// to be freed earlier. No-op for non-external symbols. +// i is global index. +func (l *Loader) FreeData(i Sym) { + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp != nil { + pp.data = nil + } + } +} + +// SymAlign returns the alignment for a symbol. +func (l *Loader) SymAlign(i Sym) int32 { + if int(i) >= len(l.align) { + // align is extended lazily -- it the sym in question is + // outside the range of the existing slice, then we assume its + // alignment has not yet been set. + return 0 + } + // TODO: would it make sense to return an arch-specific + // alignment depending on section type? E.g. STEXT => 32, + // SDATA => 1, etc? + abits := l.align[i] + if abits == 0 { + return 0 + } + return int32(1 << (abits - 1)) +} + +// SetSymAlign sets the alignment for a symbol. +func (l *Loader) SetSymAlign(i Sym, align int32) { + // Reject nonsense alignments. + if align < 0 || align&(align-1) != 0 { + panic("bad alignment value") + } + if int(i) >= len(l.align) { + l.align = append(l.align, make([]uint8, l.NSym()-len(l.align))...) + } + if align == 0 { + l.align[i] = 0 + } + l.align[i] = uint8(bits.Len32(uint32(align))) +} + +// SymSect returns the section of the i-th symbol. i is global index. +func (l *Loader) SymSect(i Sym) *sym.Section { + if int(i) >= len(l.symSects) { + // symSects is extended lazily -- it the sym in question is + // outside the range of the existing slice, then we assume its + // section has not yet been set. + return nil + } + return l.sects[l.symSects[i]] +} + +// SetSymSect sets the section of the i-th symbol. i is global index. +func (l *Loader) SetSymSect(i Sym, sect *sym.Section) { + if int(i) >= len(l.symSects) { + l.symSects = append(l.symSects, make([]uint16, l.NSym()-len(l.symSects))...) + } + l.symSects[i] = sect.Index +} + +// NewSection creates a new (output) section. +func (l *Loader) NewSection() *sym.Section { + sect := new(sym.Section) + idx := len(l.sects) + if idx != int(uint16(idx)) { + panic("too many sections created") + } + sect.Index = uint16(idx) + l.sects = append(l.sects, sect) + return sect +} + +// SymDynimplib returns the "dynimplib" attribute for the specified +// symbol, making up a portion of the info for a symbol specified +// on a "cgo_import_dynamic" compiler directive. +func (l *Loader) SymDynimplib(i Sym) string { + return l.dynimplib[i] +} + +// SetSymDynimplib sets the "dynimplib" attribute for a symbol. +func (l *Loader) SetSymDynimplib(i Sym, value string) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetDynimplib") + } + if value == "" { + delete(l.dynimplib, i) + } else { + l.dynimplib[i] = value + } +} + +// SymDynimpvers returns the "dynimpvers" attribute for the specified +// symbol, making up a portion of the info for a symbol specified +// on a "cgo_import_dynamic" compiler directive. +func (l *Loader) SymDynimpvers(i Sym) string { + return l.dynimpvers[i] +} + +// SetSymDynimpvers sets the "dynimpvers" attribute for a symbol. +func (l *Loader) SetSymDynimpvers(i Sym, value string) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetDynimpvers") + } + if value == "" { + delete(l.dynimpvers, i) + } else { + l.dynimpvers[i] = value + } +} + +// SymExtname returns the "extname" value for the specified +// symbol. +func (l *Loader) SymExtname(i Sym) string { + if s, ok := l.extname[i]; ok { + return s + } + return l.SymName(i) +} + +// SetSymExtname sets the "extname" attribute for a symbol. +func (l *Loader) SetSymExtname(i Sym, value string) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetExtname") + } + if value == "" { + delete(l.extname, i) + } else { + l.extname[i] = value + } +} + +// SymElfType returns the previously recorded ELF type for a symbol +// (used only for symbols read from shared libraries by ldshlibsyms). +// It is not set for symbols defined by the packages being linked or +// by symbols read by ldelf (and so is left as elf.STT_NOTYPE). +func (l *Loader) SymElfType(i Sym) elf.SymType { + if et, ok := l.elfType[i]; ok { + return et + } + return elf.STT_NOTYPE +} + +// SetSymElfType sets the elf type attribute for a symbol. +func (l *Loader) SetSymElfType(i Sym, et elf.SymType) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetSymElfType") + } + if et == elf.STT_NOTYPE { + delete(l.elfType, i) + } else { + l.elfType[i] = et + } +} + +// SymElfSym returns the ELF symbol index for a given loader +// symbol, assigned during ELF symtab generation. +func (l *Loader) SymElfSym(i Sym) int32 { + return l.elfSym[i] +} + +// SetSymElfSym sets the elf symbol index for a symbol. +func (l *Loader) SetSymElfSym(i Sym, es int32) { + if i == 0 { + panic("bad sym index") + } + if es == 0 { + delete(l.elfSym, i) + } else { + l.elfSym[i] = es + } +} + +// SymLocalElfSym returns the "local" ELF symbol index for a given loader +// symbol, assigned during ELF symtab generation. +func (l *Loader) SymLocalElfSym(i Sym) int32 { + return l.localElfSym[i] +} + +// SetSymLocalElfSym sets the "local" elf symbol index for a symbol. +func (l *Loader) SetSymLocalElfSym(i Sym, es int32) { + if i == 0 { + panic("bad sym index") + } + if es == 0 { + delete(l.localElfSym, i) + } else { + l.localElfSym[i] = es + } +} + +// SymPlt returns the PLT offset of symbol s. +func (l *Loader) SymPlt(s Sym) int32 { + if v, ok := l.plt[s]; ok { + return v + } + return -1 +} + +// SetPlt sets the PLT offset of symbol i. +func (l *Loader) SetPlt(i Sym, v int32) { + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol for SetPlt") + } + if v == -1 { + delete(l.plt, i) + } else { + l.plt[i] = v + } +} + +// SymGot returns the GOT offset of symbol s. +func (l *Loader) SymGot(s Sym) int32 { + if v, ok := l.got[s]; ok { + return v + } + return -1 +} + +// SetGot sets the GOT offset of symbol i. +func (l *Loader) SetGot(i Sym, v int32) { + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol for SetGot") + } + if v == -1 { + delete(l.got, i) + } else { + l.got[i] = v + } +} + +// SymDynid returns the "dynid" property for the specified symbol. +func (l *Loader) SymDynid(i Sym) int32 { + if s, ok := l.dynid[i]; ok { + return s + } + return -1 +} + +// SetSymDynid sets the "dynid" property for a symbol. +func (l *Loader) SetSymDynid(i Sym, val int32) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetSymDynid") + } + if val == -1 { + delete(l.dynid, i) + } else { + l.dynid[i] = val + } +} + +// DynidSyms returns the set of symbols for which dynID is set to an +// interesting (non-default) value. This is expected to be a fairly +// small set. +func (l *Loader) DynidSyms() []Sym { + sl := make([]Sym, 0, len(l.dynid)) + for s := range l.dynid { + sl = append(sl, s) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +// SymGoType returns the 'Gotype' property for a given symbol (set by +// the Go compiler for variable symbols). This version relies on +// reading aux symbols for the target sym -- it could be that a faster +// approach would be to check for gotype during preload and copy the +// results in to a map (might want to try this at some point and see +// if it helps speed things up). +func (l *Loader) SymGoType(i Sym) Sym { return l.aux1(i, goobj.AuxGotype) } + +// SymUnit returns the compilation unit for a given symbol (which will +// typically be nil for external or linker-manufactured symbols). +func (l *Loader) SymUnit(i Sym) *sym.CompilationUnit { + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp.objidx != 0 { + r := l.objs[pp.objidx].r + return r.unit + } + return nil + } + r, _ := l.toLocal(i) + return r.unit +} + +// SymPkg returns the package where the symbol came from (for +// regular compiler-generated Go symbols), but in the case of +// building with "-linkshared" (when a symbol is read from a +// shared library), will hold the library name. +// NOTE: this corresponds to sym.Symbol.File field. +func (l *Loader) SymPkg(i Sym) string { + if f, ok := l.symPkg[i]; ok { + return f + } + if l.IsExternal(i) { + pp := l.getPayload(i) + if pp.objidx != 0 { + r := l.objs[pp.objidx].r + return r.unit.Lib.Pkg + } + return "" + } + r, _ := l.toLocal(i) + return r.unit.Lib.Pkg +} + +// SetSymPkg sets the package/library for a symbol. This is +// needed mainly for external symbols, specifically those imported +// from shared libraries. +func (l *Loader) SetSymPkg(i Sym, pkg string) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetSymPkg") + } + l.symPkg[i] = pkg +} + +// SymLocalentry returns an offset in bytes of the "local entry" of a symbol. +// +// On PPC64, a value of 1 indicates the symbol does not use or preserve a TOC +// pointer in R2, nor does it have a distinct local entry. +func (l *Loader) SymLocalentry(i Sym) uint8 { + return l.localentry[i] +} + +// SetSymLocalentry sets the "local entry" offset attribute for a symbol. +func (l *Loader) SetSymLocalentry(i Sym, value uint8) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetSymLocalentry") + } + if value == 0 { + delete(l.localentry, i) + } else { + l.localentry[i] = value + } +} + +// Returns the number of aux symbols given a global index. +func (l *Loader) NAux(i Sym) int { + if l.IsExternal(i) { + return 0 + } + r, li := l.toLocal(i) + return r.NAux(li) +} + +// Returns the "handle" to the j-th aux symbol of the i-th symbol. +func (l *Loader) Aux(i Sym, j int) Aux { + if l.IsExternal(i) { + return Aux{} + } + r, li := l.toLocal(i) + if j >= r.NAux(li) { + return Aux{} + } + return Aux{r.Aux(li, j), r, l} +} + +// WasmImportSym returns the auxiliary WebAssembly import symbol associated with +// a given function symbol. The aux sym only exists for Go function stubs that +// have been annotated with the //go:wasmimport directive. The aux sym +// contains the information necessary for the linker to add a WebAssembly +// import statement. +// (https://webassembly.github.io/spec/core/syntax/modules.html#imports) +func (l *Loader) WasmImportSym(fnSymIdx Sym) (Sym, bool) { + if l.SymType(fnSymIdx) != sym.STEXT { + log.Fatalf("error: non-function sym %d/%s t=%s passed to WasmImportSym", fnSymIdx, l.SymName(fnSymIdx), l.SymType(fnSymIdx).String()) + } + r, li := l.toLocal(fnSymIdx) + auxs := r.Auxs(li) + for i := range auxs { + a := &auxs[i] + switch a.Type() { + case goobj.AuxWasmImport: + return l.resolve(r, a.Sym()), true + } + } + + return 0, false +} + +// SEHUnwindSym returns the auxiliary SEH unwind symbol associated with +// a given function symbol. +func (l *Loader) SEHUnwindSym(fnSymIdx Sym) Sym { + if l.SymType(fnSymIdx) != sym.STEXT { + log.Fatalf("error: non-function sym %d/%s t=%s passed to SEHUnwindSym", fnSymIdx, l.SymName(fnSymIdx), l.SymType(fnSymIdx).String()) + } + + return l.aux1(fnSymIdx, goobj.AuxSehUnwindInfo) +} + +// GetFuncDwarfAuxSyms collects and returns the auxiliary DWARF +// symbols associated with a given function symbol. Prior to the +// introduction of the loader, this was done purely using name +// lookups, e.f. for function with name XYZ we would then look up +// go.info.XYZ, etc. +func (l *Loader) GetFuncDwarfAuxSyms(fnSymIdx Sym) (auxDwarfInfo, auxDwarfLoc, auxDwarfRanges, auxDwarfLines Sym) { + if l.SymType(fnSymIdx) != sym.STEXT { + log.Fatalf("error: non-function sym %d/%s t=%s passed to GetFuncDwarfAuxSyms", fnSymIdx, l.SymName(fnSymIdx), l.SymType(fnSymIdx).String()) + } + r, auxs := l.auxs(fnSymIdx) + + for i := range auxs { + a := &auxs[i] + switch a.Type() { + case goobj.AuxDwarfInfo: + auxDwarfInfo = l.resolve(r, a.Sym()) + if l.SymType(auxDwarfInfo) != sym.SDWARFFCN { + panic("aux dwarf info sym with wrong type") + } + case goobj.AuxDwarfLoc: + auxDwarfLoc = l.resolve(r, a.Sym()) + if l.SymType(auxDwarfLoc) != sym.SDWARFLOC { + panic("aux dwarf loc sym with wrong type") + } + case goobj.AuxDwarfRanges: + auxDwarfRanges = l.resolve(r, a.Sym()) + if l.SymType(auxDwarfRanges) != sym.SDWARFRANGE { + panic("aux dwarf ranges sym with wrong type") + } + case goobj.AuxDwarfLines: + auxDwarfLines = l.resolve(r, a.Sym()) + if l.SymType(auxDwarfLines) != sym.SDWARFLINES { + panic("aux dwarf lines sym with wrong type") + } + } + } + return +} + +func (l *Loader) GetVarDwarfAuxSym(i Sym) Sym { + aux := l.aux1(i, goobj.AuxDwarfInfo) + if aux != 0 && l.SymType(aux) != sym.SDWARFVAR { + fmt.Println(l.SymName(i), l.SymType(i), l.SymType(aux), sym.SDWARFVAR) + panic("aux dwarf info sym with wrong type") + } + return aux +} + +// AddInteriorSym sets up 'interior' as an interior symbol of +// container/payload symbol 'container'. An interior symbol does not +// itself have data, but gives a name to a subrange of the data in its +// container symbol. The container itself may or may not have a name. +// This method is intended primarily for use in the host object +// loaders, to capture the semantics of symbols and sections in an +// object file. When reading a host object file, we'll typically +// encounter a static section symbol (ex: ".text") containing content +// for a collection of functions, then a series of ELF (or macho, etc) +// symbol table entries each of which points into a sub-section +// (offset and length) of its corresponding container symbol. Within +// the go linker we create a loader.Sym for the container (which is +// expected to have the actual content/payload) and then a set of +// interior loader.Sym's that point into a portion of the container. +func (l *Loader) AddInteriorSym(container Sym, interior Sym) { + // Container symbols are expected to have content/data. + // NB: this restriction may turn out to be too strict (it's possible + // to imagine a zero-sized container with an interior symbol pointing + // into it); it's ok to relax or remove it if we counter an + // oddball host object that triggers this. + if l.SymSize(container) == 0 && len(l.Data(container)) == 0 { + panic("unexpected empty container symbol") + } + // The interior symbols for a container are not expected to have + // content/data or relocations. + if len(l.Data(interior)) != 0 { + panic("unexpected non-empty interior symbol") + } + // Interior symbol is expected to be in the symbol table. + if l.AttrNotInSymbolTable(interior) { + panic("interior symbol must be in symtab") + } + // Only a single level of containment is allowed. + if l.OuterSym(container) != 0 { + panic("outer has outer itself") + } + // Interior sym should not already have a sibling. + if l.SubSym(interior) != 0 { + panic("sub set for subsym") + } + // Interior sym should not already point at a container. + if l.OuterSym(interior) != 0 { + panic("outer already set for subsym") + } + l.sub[interior] = l.sub[container] + l.sub[container] = interior + l.outer[interior] = container +} + +// OuterSym gets the outer/container symbol. +func (l *Loader) OuterSym(i Sym) Sym { + return l.outer[i] +} + +// SubSym gets the subsymbol for host object loaded symbols. +func (l *Loader) SubSym(i Sym) Sym { + return l.sub[i] +} + +// growOuter grows the slice used to store outer symbol. +func (l *Loader) growOuter(reqLen int) { + curLen := len(l.outer) + if reqLen > curLen { + l.outer = append(l.outer, make([]Sym, reqLen-curLen)...) + } +} + +// SetCarrierSym declares that 'c' is the carrier or container symbol +// for 's'. Carrier symbols are used in the linker to as a container +// for a collection of sub-symbols where the content of the +// sub-symbols is effectively concatenated to form the content of the +// carrier. The carrier is given a name in the output symbol table +// while the sub-symbol names are not. For example, the Go compiler +// emits named string symbols (type SGOSTRING) when compiling a +// package; after being deduplicated, these symbols are collected into +// a single unit by assigning them a new carrier symbol named +// "go:string.*" (which appears in the final symbol table for the +// output load module). +func (l *Loader) SetCarrierSym(s Sym, c Sym) { + if c == 0 { + panic("invalid carrier in SetCarrierSym") + } + if s == 0 { + panic("invalid sub-symbol in SetCarrierSym") + } + // Carrier symbols are not expected to have content/data. It is + // ok for them to have non-zero size (to allow for use of generator + // symbols). + if len(l.Data(c)) != 0 { + panic("unexpected non-empty carrier symbol") + } + l.outer[s] = c + // relocsym's foldSubSymbolOffset requires that we only + // have a single level of containment-- enforce here. + if l.outer[c] != 0 { + panic("invalid nested carrier sym") + } +} + +// Initialize Reachable bitmap and its siblings for running deadcode pass. +func (l *Loader) InitReachable() { + l.growAttrBitmaps(l.NSym() + 1) +} + +type symWithVal struct { + s Sym + v int64 +} +type bySymValue []symWithVal + +func (s bySymValue) Len() int { return len(s) } +func (s bySymValue) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s bySymValue) Less(i, j int) bool { return s[i].v < s[j].v } + +// SortSub walks through the sub-symbols for 's' and sorts them +// in place by increasing value. Return value is the new +// sub symbol for the specified outer symbol. +func (l *Loader) SortSub(s Sym) Sym { + + if s == 0 || l.sub[s] == 0 { + return s + } + + // Sort symbols using a slice first. Use a stable sort on the off + // chance that there's more than once symbol with the same value, + // so as to preserve reproducible builds. + sl := []symWithVal{} + for ss := l.sub[s]; ss != 0; ss = l.sub[ss] { + sl = append(sl, symWithVal{s: ss, v: l.SymValue(ss)}) + } + sort.Stable(bySymValue(sl)) + + // Then apply any changes needed to the sub map. + ns := Sym(0) + for i := len(sl) - 1; i >= 0; i-- { + s := sl[i].s + l.sub[s] = ns + ns = s + } + + // Update sub for outer symbol, then return + l.sub[s] = sl[0].s + return sl[0].s +} + +// SortSyms sorts a list of symbols by their value. +func (l *Loader) SortSyms(ss []Sym) { + sort.SliceStable(ss, func(i, j int) bool { return l.SymValue(ss[i]) < l.SymValue(ss[j]) }) +} + +// Insure that reachable bitmap and its siblings have enough size. +func (l *Loader) growAttrBitmaps(reqLen int) { + if reqLen > l.attrReachable.Len() { + // These are indexed by global symbol + l.attrReachable = growBitmap(reqLen, l.attrReachable) + l.attrOnList = growBitmap(reqLen, l.attrOnList) + l.attrLocal = growBitmap(reqLen, l.attrLocal) + l.attrNotInSymbolTable = growBitmap(reqLen, l.attrNotInSymbolTable) + l.attrUsedInIface = growBitmap(reqLen, l.attrUsedInIface) + l.attrSpecial = growBitmap(reqLen, l.attrSpecial) + } + l.growExtAttrBitmaps() +} + +func (l *Loader) growExtAttrBitmaps() { + // These are indexed by external symbol index (e.g. l.extIndex(i)) + extReqLen := len(l.payloads) + if extReqLen > l.attrVisibilityHidden.Len() { + l.attrVisibilityHidden = growBitmap(extReqLen, l.attrVisibilityHidden) + l.attrDuplicateOK = growBitmap(extReqLen, l.attrDuplicateOK) + l.attrShared = growBitmap(extReqLen, l.attrShared) + l.attrExternal = growBitmap(extReqLen, l.attrExternal) + l.generatedSyms = growBitmap(extReqLen, l.generatedSyms) + } +} + +func (relocs *Relocs) Count() int { return len(relocs.rs) } + +// At returns the j-th reloc for a global symbol. +func (relocs *Relocs) At(j int) Reloc { + if relocs.l.isExtReader(relocs.r) { + return Reloc{&relocs.rs[j], relocs.r, relocs.l} + } + return Reloc{&relocs.rs[j], relocs.r, relocs.l} +} + +// Relocs returns a Relocs object for the given global sym. +func (l *Loader) Relocs(i Sym) Relocs { + r, li := l.toLocal(i) + if r == nil { + panic(fmt.Sprintf("trying to get oreader for invalid sym %d\n\n", i)) + } + return l.relocs(r, li) +} + +// relocs returns a Relocs object given a local sym index and reader. +func (l *Loader) relocs(r *oReader, li uint32) Relocs { + var rs []goobj.Reloc + if l.isExtReader(r) { + pp := l.payloads[li] + rs = pp.relocs + } else { + rs = r.Relocs(li) + } + return Relocs{ + rs: rs, + li: li, + r: r, + l: l, + } +} + +func (l *Loader) auxs(i Sym) (*oReader, []goobj.Aux) { + if l.IsExternal(i) { + pp := l.getPayload(i) + return l.objs[pp.objidx].r, pp.auxs + } else { + r, li := l.toLocal(i) + return r, r.Auxs(li) + } +} + +// Returns a specific aux symbol of type t for symbol i. +func (l *Loader) aux1(i Sym, t uint8) Sym { + r, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + if a.Type() == t { + return l.resolve(r, a.Sym()) + } + } + return 0 +} + +func (l *Loader) Pcsp(i Sym) Sym { return l.aux1(i, goobj.AuxPcsp) } + +// Returns all aux symbols of per-PC data for symbol i. +// tmp is a scratch space for the pcdata slice. +func (l *Loader) PcdataAuxs(i Sym, tmp []Sym) (pcsp, pcfile, pcline, pcinline Sym, pcdata []Sym) { + pcdata = tmp[:0] + r, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + switch a.Type() { + case goobj.AuxPcsp: + pcsp = l.resolve(r, a.Sym()) + case goobj.AuxPcline: + pcline = l.resolve(r, a.Sym()) + case goobj.AuxPcfile: + pcfile = l.resolve(r, a.Sym()) + case goobj.AuxPcinline: + pcinline = l.resolve(r, a.Sym()) + case goobj.AuxPcdata: + pcdata = append(pcdata, l.resolve(r, a.Sym())) + } + } + return +} + +// Returns the number of pcdata for symbol i. +func (l *Loader) NumPcdata(i Sym) int { + n := 0 + _, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + if a.Type() == goobj.AuxPcdata { + n++ + } + } + return n +} + +// Returns all funcdata symbols of symbol i. +// tmp is a scratch space. +func (l *Loader) Funcdata(i Sym, tmp []Sym) []Sym { + fd := tmp[:0] + r, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + if a.Type() == goobj.AuxFuncdata { + fd = append(fd, l.resolve(r, a.Sym())) + } + } + return fd +} + +// Returns the number of funcdata for symbol i. +func (l *Loader) NumFuncdata(i Sym) int { + n := 0 + _, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + if a.Type() == goobj.AuxFuncdata { + n++ + } + } + return n +} + +// FuncInfo provides hooks to access goobj.FuncInfo in the objects. +type FuncInfo struct { + l *Loader + r *oReader + data []byte + lengths goobj.FuncInfoLengths +} + +func (fi *FuncInfo) Valid() bool { return fi.r != nil } + +func (fi *FuncInfo) Args() int { + return int((*goobj.FuncInfo)(nil).ReadArgs(fi.data)) +} + +func (fi *FuncInfo) Locals() int { + return int((*goobj.FuncInfo)(nil).ReadLocals(fi.data)) +} + +func (fi *FuncInfo) FuncID() abi.FuncID { + return (*goobj.FuncInfo)(nil).ReadFuncID(fi.data) +} + +func (fi *FuncInfo) FuncFlag() abi.FuncFlag { + return (*goobj.FuncInfo)(nil).ReadFuncFlag(fi.data) +} + +func (fi *FuncInfo) StartLine() int32 { + return (*goobj.FuncInfo)(nil).ReadStartLine(fi.data) +} + +// Preload has to be called prior to invoking the various methods +// below related to pcdata, funcdataoff, files, and inltree nodes. +func (fi *FuncInfo) Preload() { + fi.lengths = (*goobj.FuncInfo)(nil).ReadFuncInfoLengths(fi.data) +} + +func (fi *FuncInfo) NumFile() uint32 { + if !fi.lengths.Initialized { + panic("need to call Preload first") + } + return fi.lengths.NumFile +} + +func (fi *FuncInfo) File(k int) goobj.CUFileIndex { + if !fi.lengths.Initialized { + panic("need to call Preload first") + } + return (*goobj.FuncInfo)(nil).ReadFile(fi.data, fi.lengths.FileOff, uint32(k)) +} + +// TopFrame returns true if the function associated with this FuncInfo +// is an entry point, meaning that unwinders should stop when they hit +// this function. +func (fi *FuncInfo) TopFrame() bool { + return (fi.FuncFlag() & abi.FuncFlagTopFrame) != 0 +} + +type InlTreeNode struct { + Parent int32 + File goobj.CUFileIndex + Line int32 + Func Sym + ParentPC int32 +} + +func (fi *FuncInfo) NumInlTree() uint32 { + if !fi.lengths.Initialized { + panic("need to call Preload first") + } + return fi.lengths.NumInlTree +} + +func (fi *FuncInfo) InlTree(k int) InlTreeNode { + if !fi.lengths.Initialized { + panic("need to call Preload first") + } + node := (*goobj.FuncInfo)(nil).ReadInlTree(fi.data, fi.lengths.InlTreeOff, uint32(k)) + return InlTreeNode{ + Parent: node.Parent, + File: node.File, + Line: node.Line, + Func: fi.l.resolve(fi.r, node.Func), + ParentPC: node.ParentPC, + } +} + +func (l *Loader) FuncInfo(i Sym) FuncInfo { + r, auxs := l.auxs(i) + for j := range auxs { + a := &auxs[j] + if a.Type() == goobj.AuxFuncInfo { + b := r.Data(a.Sym().SymIdx) + return FuncInfo{l, r, b, goobj.FuncInfoLengths{}} + } + } + return FuncInfo{} +} + +// Preload a package: adds autolib. +// Does not add defined package or non-packaged symbols to the symbol table. +// These are done in LoadSyms. +// Does not read symbol data. +// Returns the fingerprint of the object. +func (l *Loader) Preload(localSymVersion int, f *bio.Reader, lib *sym.Library, unit *sym.CompilationUnit, length int64) goobj.FingerprintType { + roObject, readonly, err := f.Slice(uint64(length)) // TODO: no need to map blocks that are for tools only (e.g. RefName) + if err != nil { + log.Fatal("cannot read object file:", err) + } + r := goobj.NewReaderFromBytes(roObject, readonly) + if r == nil { + if len(roObject) >= 8 && bytes.Equal(roObject[:8], []byte("\x00go114ld")) { + log.Fatalf("found object file %s in old format", f.File().Name()) + } + panic("cannot read object file") + } + pkgprefix := objabi.PathToPrefix(lib.Pkg) + "." + ndef := r.NSym() + nhashed64def := r.NHashed64def() + nhasheddef := r.NHasheddef() + or := &oReader{ + Reader: r, + unit: unit, + version: localSymVersion, + pkgprefix: pkgprefix, + syms: make([]Sym, ndef+nhashed64def+nhasheddef+r.NNonpkgdef()+r.NNonpkgref()), + ndef: ndef, + nhasheddef: nhasheddef, + nhashed64def: nhashed64def, + objidx: uint32(len(l.objs)), + } + + if r.Unlinkable() { + log.Fatalf("link: unlinkable object (from package %s) - compiler requires -p flag", lib.Pkg) + } + + // Autolib + lib.Autolib = append(lib.Autolib, r.Autolib()...) + + // DWARF file table + nfile := r.NFile() + unit.FileTable = make([]string, nfile) + for i := range unit.FileTable { + unit.FileTable[i] = r.File(i) + } + + l.addObj(lib.Pkg, or) + + // The caller expects us consuming all the data + f.MustSeek(length, io.SeekCurrent) + + return r.Fingerprint() +} + +// Holds the loader along with temporary states for loading symbols. +type loadState struct { + l *Loader + hashed64Syms map[uint64]symAndSize // short hashed (content-addressable) symbols, keyed by content hash + hashedSyms map[goobj.HashType]symAndSize // hashed (content-addressable) symbols, keyed by content hash +} + +// Preload symbols of given kind from an object. +func (st *loadState) preloadSyms(r *oReader, kind int) { + l := st.l + var start, end uint32 + switch kind { + case pkgDef: + start = 0 + end = uint32(r.ndef) + case hashed64Def: + start = uint32(r.ndef) + end = uint32(r.ndef + r.nhashed64def) + case hashedDef: + start = uint32(r.ndef + r.nhashed64def) + end = uint32(r.ndef + r.nhashed64def + r.nhasheddef) + case nonPkgDef: + start = uint32(r.ndef + r.nhashed64def + r.nhasheddef) + end = uint32(r.ndef + r.nhashed64def + r.nhasheddef + r.NNonpkgdef()) + default: + panic("preloadSyms: bad kind") + } + l.growAttrBitmaps(len(l.objSyms) + int(end-start)) + loadingRuntimePkg := r.unit.Lib.Pkg == "runtime" + for i := start; i < end; i++ { + osym := r.Sym(i) + var name string + var v int + if kind != hashed64Def && kind != hashedDef { // we don't need the name, etc. for hashed symbols + name = osym.Name(r.Reader) + v = abiToVer(osym.ABI(), r.version) + } + gi := st.addSym(name, v, r, i, kind, osym) + r.syms[i] = gi + if osym.Local() { + l.SetAttrLocal(gi, true) + } + if osym.UsedInIface() { + l.SetAttrUsedInIface(gi, true) + } + if strings.HasPrefix(name, "runtime.") || + (loadingRuntimePkg && strings.HasPrefix(name, "type:")) { + if bi := goobj.BuiltinIdx(name, int(osym.ABI())); bi != -1 { + // This is a definition of a builtin symbol. Record where it is. + l.builtinSyms[bi] = gi + } + } + if a := int32(osym.Align()); a != 0 && a > l.SymAlign(gi) { + l.SetSymAlign(gi, a) + } + } +} + +// Add syms, hashed (content-addressable) symbols, non-package symbols, and +// references to external symbols (which are always named). +func (l *Loader) LoadSyms(arch *sys.Arch) { + // Allocate space for symbols, making a guess as to how much space we need. + // This function was determined empirically by looking at the cmd/compile on + // Darwin, and picking factors for hashed and hashed64 syms. + var symSize, hashedSize, hashed64Size int + for _, o := range l.objs[goObjStart:] { + symSize += o.r.ndef + o.r.nhasheddef/2 + o.r.nhashed64def/2 + o.r.NNonpkgdef() + hashedSize += o.r.nhasheddef / 2 + hashed64Size += o.r.nhashed64def / 2 + } + // Index 0 is invalid for symbols. + l.objSyms = make([]objSym, 1, symSize) + + st := loadState{ + l: l, + hashed64Syms: make(map[uint64]symAndSize, hashed64Size), + hashedSyms: make(map[goobj.HashType]symAndSize, hashedSize), + } + + for _, o := range l.objs[goObjStart:] { + st.preloadSyms(o.r, pkgDef) + } + l.npkgsyms = l.NSym() + for _, o := range l.objs[goObjStart:] { + st.preloadSyms(o.r, hashed64Def) + st.preloadSyms(o.r, hashedDef) + st.preloadSyms(o.r, nonPkgDef) + } + l.nhashedsyms = len(st.hashed64Syms) + len(st.hashedSyms) + for _, o := range l.objs[goObjStart:] { + loadObjRefs(l, o.r, arch) + } + l.values = make([]int64, l.NSym(), l.NSym()+1000) // +1000 make some room for external symbols + l.outer = make([]Sym, l.NSym(), l.NSym()+1000) +} + +func loadObjRefs(l *Loader, r *oReader, arch *sys.Arch) { + // load non-package refs + ndef := uint32(r.NAlldef()) + for i, n := uint32(0), uint32(r.NNonpkgref()); i < n; i++ { + osym := r.Sym(ndef + i) + name := osym.Name(r.Reader) + v := abiToVer(osym.ABI(), r.version) + r.syms[ndef+i] = l.LookupOrCreateSym(name, v) + gi := r.syms[ndef+i] + if osym.Local() { + l.SetAttrLocal(gi, true) + } + if osym.UsedInIface() { + l.SetAttrUsedInIface(gi, true) + } + } + + // referenced packages + npkg := r.NPkg() + r.pkg = make([]uint32, npkg) + for i := 1; i < npkg; i++ { // PkgIdx 0 is a dummy invalid package + pkg := r.Pkg(i) + objidx, ok := l.objByPkg[pkg] + if !ok { + log.Fatalf("%v: reference to nonexistent package %s", r.unit.Lib, pkg) + } + r.pkg[i] = objidx + } + + // load flags of package refs + for i, n := 0, r.NRefFlags(); i < n; i++ { + rf := r.RefFlags(i) + gi := l.resolve(r, rf.Sym()) + if rf.Flag2()&goobj.SymFlagUsedInIface != 0 { + l.SetAttrUsedInIface(gi, true) + } + } +} + +func abiToVer(abi uint16, localSymVersion int) int { + var v int + if abi == goobj.SymABIstatic { + // Static + v = localSymVersion + } else if abiver := sym.ABIToVersion(obj.ABI(abi)); abiver != -1 { + // Note that data symbols are "ABI0", which maps to version 0. + v = abiver + } else { + log.Fatalf("invalid symbol ABI: %d", abi) + } + return v +} + +// TopLevelSym tests a symbol (by name and kind) to determine whether +// the symbol first class sym (participating in the link) or is an +// anonymous aux or sub-symbol containing some sub-part or payload of +// another symbol. +func (l *Loader) TopLevelSym(s Sym) bool { + return topLevelSym(l.SymName(s), l.SymType(s)) +} + +// topLevelSym tests a symbol name and kind to determine whether +// the symbol first class sym (participating in the link) or is an +// anonymous aux or sub-symbol containing some sub-part or payload of +// another symbol. +func topLevelSym(sname string, skind sym.SymKind) bool { + if sname != "" { + return true + } + switch skind { + case sym.SDWARFFCN, sym.SDWARFABSFCN, sym.SDWARFTYPE, sym.SDWARFCONST, sym.SDWARFCUINFO, sym.SDWARFRANGE, sym.SDWARFLOC, sym.SDWARFLINES, sym.SGOFUNC: + return true + default: + return false + } +} + +// cloneToExternal takes the existing object file symbol (symIdx) +// and creates a new external symbol payload that is a clone with +// respect to name, version, type, relocations, etc. The idea here +// is that if the linker decides it wants to update the contents of +// a symbol originally discovered as part of an object file, it's +// easier to do this if we make the updates to an external symbol +// payload. +func (l *Loader) cloneToExternal(symIdx Sym) { + if l.IsExternal(symIdx) { + panic("sym is already external, no need for clone") + } + + // Read the particulars from object. + r, li := l.toLocal(symIdx) + osym := r.Sym(li) + sname := osym.Name(r.Reader) + sver := abiToVer(osym.ABI(), r.version) + skind := sym.AbiSymKindToSymKind[objabi.SymKind(osym.Type())] + + // Create new symbol, update version and kind. + pi := l.newPayload(sname, sver) + pp := l.payloads[pi] + pp.kind = skind + pp.ver = sver + pp.size = int64(osym.Siz()) + pp.objidx = r.objidx + + // If this is a def, then copy the guts. We expect this case + // to be very rare (one case it may come up is with -X). + if li < uint32(r.NAlldef()) { + + // Copy relocations + relocs := l.Relocs(symIdx) + pp.relocs = make([]goobj.Reloc, relocs.Count()) + for i := range pp.relocs { + // Copy the relocs slice. + // Convert local reference to global reference. + rel := relocs.At(i) + pp.relocs[i].Set(rel.Off(), rel.Siz(), uint16(rel.Type()), rel.Add(), goobj.SymRef{PkgIdx: 0, SymIdx: uint32(rel.Sym())}) + } + + // Copy data + pp.data = r.Data(li) + } + + // If we're overriding a data symbol, collect the associated + // Gotype, so as to propagate it to the new symbol. + auxs := r.Auxs(li) + pp.auxs = auxs + + // Install new payload to global index space. + // (This needs to happen at the end, as the accessors above + // need to access the old symbol content.) + l.objSyms[symIdx] = objSym{l.extReader.objidx, uint32(pi)} + l.extReader.syms = append(l.extReader.syms, symIdx) + + // Some attributes were encoded in the object file. Copy them over. + l.SetAttrDuplicateOK(symIdx, r.Sym(li).Dupok()) + l.SetAttrShared(symIdx, r.Shared()) +} + +// Copy the payload of symbol src to dst. Both src and dst must be external +// symbols. +// The intended use case is that when building/linking against a shared library, +// where we do symbol name mangling, the Go object file may have reference to +// the original symbol name whereas the shared library provides a symbol with +// the mangled name. When we do mangling, we copy payload of mangled to original. +func (l *Loader) CopySym(src, dst Sym) { + if !l.IsExternal(dst) { + panic("dst is not external") //l.newExtSym(l.SymName(dst), l.SymVersion(dst)) + } + if !l.IsExternal(src) { + panic("src is not external") //l.cloneToExternal(src) + } + l.payloads[l.extIndex(dst)] = l.payloads[l.extIndex(src)] + l.SetSymPkg(dst, l.SymPkg(src)) + // TODO: other attributes? +} + +// CreateExtSym creates a new external symbol with the specified name +// without adding it to any lookup tables, returning a Sym index for it. +func (l *Loader) CreateExtSym(name string, ver int) Sym { + return l.newExtSym(name, ver) +} + +// CreateStaticSym creates a new static symbol with the specified name +// without adding it to any lookup tables, returning a Sym index for it. +func (l *Loader) CreateStaticSym(name string) Sym { + // Assign a new unique negative version -- this is to mark the + // symbol so that it is not included in the name lookup table. + l.anonVersion-- + return l.newExtSym(name, l.anonVersion) +} + +func (l *Loader) FreeSym(i Sym) { + if l.IsExternal(i) { + pp := l.getPayload(i) + *pp = extSymPayload{} + } +} + +// relocId is essentially a tuple identifying the Rth +// relocation of symbol S. +type relocId struct { + sym Sym + ridx int +} + +// SetRelocVariant sets the 'variant' property of a relocation on +// some specific symbol. +func (l *Loader) SetRelocVariant(s Sym, ri int, v sym.RelocVariant) { + // sanity check + if relocs := l.Relocs(s); ri >= relocs.Count() { + panic("invalid relocation ID") + } + if l.relocVariant == nil { + l.relocVariant = make(map[relocId]sym.RelocVariant) + } + if v != 0 { + l.relocVariant[relocId{s, ri}] = v + } else { + delete(l.relocVariant, relocId{s, ri}) + } +} + +// RelocVariant returns the 'variant' property of a relocation on +// some specific symbol. +func (l *Loader) RelocVariant(s Sym, ri int) sym.RelocVariant { + return l.relocVariant[relocId{s, ri}] +} + +// UndefinedRelocTargets iterates through the global symbol index +// space, looking for symbols with relocations targeting undefined +// references. The linker's loadlib method uses this to determine if +// there are unresolved references to functions in system libraries +// (for example, libgcc.a), presumably due to CGO code. Return value +// is a pair of lists of loader.Sym's. First list corresponds to the +// corresponding to the undefined symbols themselves, the second list +// is the symbol that is making a reference to the undef. The "limit" +// param controls the maximum number of results returned; if "limit" +// is -1, then all undefs are returned. +func (l *Loader) UndefinedRelocTargets(limit int) ([]Sym, []Sym) { + result, fromr := []Sym{}, []Sym{} +outerloop: + for si := Sym(1); si < Sym(len(l.objSyms)); si++ { + relocs := l.Relocs(si) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + rs := r.Sym() + if rs != 0 && l.SymType(rs) == sym.SXREF && l.SymName(rs) != ".got" { + result = append(result, rs) + fromr = append(fromr, si) + if limit != -1 && len(result) >= limit { + break outerloop + } + } + } + } + return result, fromr +} + +// AssignTextSymbolOrder populates the Textp slices within each +// library and compilation unit, insuring that packages are laid down +// in dependency order (internal first, then everything else). Return value +// is a slice of all text syms. +func (l *Loader) AssignTextSymbolOrder(libs []*sym.Library, intlibs []bool, extsyms []Sym) []Sym { + + // Library Textp lists should be empty at this point. + for _, lib := range libs { + if len(lib.Textp) != 0 { + panic("expected empty Textp slice for library") + } + if len(lib.DupTextSyms) != 0 { + panic("expected empty DupTextSyms slice for library") + } + } + + // Used to record which dupok symbol we've assigned to a unit. + // Can't use the onlist attribute here because it will need to + // clear for the later assignment of the sym.Symbol to a unit. + // NB: we can convert to using onList once we no longer have to + // call the regular addToTextp. + assignedToUnit := MakeBitmap(l.NSym() + 1) + + // Start off textp with reachable external syms. + textp := []Sym{} + for _, sym := range extsyms { + if !l.attrReachable.Has(sym) { + continue + } + textp = append(textp, sym) + } + + // Walk through all text symbols from Go object files and append + // them to their corresponding library's textp list. + for _, o := range l.objs[goObjStart:] { + r := o.r + lib := r.unit.Lib + for i, n := uint32(0), uint32(r.NAlldef()); i < n; i++ { + gi := l.toGlobal(r, i) + if !l.attrReachable.Has(gi) { + continue + } + osym := r.Sym(i) + st := sym.AbiSymKindToSymKind[objabi.SymKind(osym.Type())] + if st != sym.STEXT { + continue + } + dupok := osym.Dupok() + if r2, i2 := l.toLocal(gi); r2 != r || i2 != i { + // A dupok text symbol is resolved to another package. + // We still need to record its presence in the current + // package, as the trampoline pass expects packages + // are laid out in dependency order. + lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi)) + continue // symbol in different object + } + if dupok { + lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi)) + continue + } + + lib.Textp = append(lib.Textp, sym.LoaderSym(gi)) + } + } + + // Now assemble global textp, and assign text symbols to units. + for _, doInternal := range [2]bool{true, false} { + for idx, lib := range libs { + if intlibs[idx] != doInternal { + continue + } + lists := [2][]sym.LoaderSym{lib.Textp, lib.DupTextSyms} + for i, list := range lists { + for _, s := range list { + sym := Sym(s) + if !assignedToUnit.Has(sym) { + textp = append(textp, sym) + unit := l.SymUnit(sym) + if unit != nil { + unit.Textp = append(unit.Textp, s) + assignedToUnit.Set(sym) + } + // Dupok symbols may be defined in multiple packages; the + // associated package for a dupok sym is chosen sort of + // arbitrarily (the first containing package that the linker + // loads). Canonicalizes its Pkg to the package with which + // it will be laid down in text. + if i == 1 /* DupTextSyms2 */ && l.SymPkg(sym) != lib.Pkg { + l.SetSymPkg(sym, lib.Pkg) + } + } + } + } + lib.Textp = nil + lib.DupTextSyms = nil + } + } + + return textp +} + +// ErrorReporter is a helper class for reporting errors. +type ErrorReporter struct { + ldr *Loader + AfterErrorAction func() +} + +// Errorf method logs an error message. +// +// After each error, the error actions function will be invoked; this +// will either terminate the link immediately (if -h option given) +// or it will keep a count and exit if more than 20 errors have been printed. +// +// Logging an error means that on exit cmd/link will delete any +// output file and return a non-zero error code. +func (reporter *ErrorReporter) Errorf(s Sym, format string, args ...interface{}) { + if s != 0 && reporter.ldr.SymName(s) != "" { + // Note: Replace is needed here because symbol names might have % in them, + // due to the use of LinkString for names of instantiating types. + format = strings.Replace(reporter.ldr.SymName(s), "%", "%%", -1) + ": " + format + } else { + format = fmt.Sprintf("sym %d: %s", s, format) + } + format += "\n" + fmt.Fprintf(os.Stderr, format, args...) + reporter.AfterErrorAction() +} + +// GetErrorReporter returns the loader's associated error reporter. +func (l *Loader) GetErrorReporter() *ErrorReporter { + return l.errorReporter +} + +// Errorf method logs an error message. See ErrorReporter.Errorf for details. +func (l *Loader) Errorf(s Sym, format string, args ...interface{}) { + l.errorReporter.Errorf(s, format, args...) +} + +// Symbol statistics. +func (l *Loader) Stat() string { + s := fmt.Sprintf("%d symbols, %d reachable\n", l.NSym(), l.NReachableSym()) + s += fmt.Sprintf("\t%d package symbols, %d hashed symbols, %d non-package symbols, %d external symbols\n", + l.npkgsyms, l.nhashedsyms, int(l.extStart)-l.npkgsyms-l.nhashedsyms, l.NSym()-int(l.extStart)) + return s +} + +// For debugging. +func (l *Loader) Dump() { + fmt.Println("objs") + for _, obj := range l.objs[goObjStart:] { + if obj.r != nil { + fmt.Println(obj.i, obj.r.unit.Lib) + } + } + fmt.Println("extStart:", l.extStart) + fmt.Println("Nsyms:", len(l.objSyms)) + fmt.Println("syms") + for i := Sym(1); i < Sym(len(l.objSyms)); i++ { + pi := "" + if l.IsExternal(i) { + pi = fmt.Sprintf("", l.extIndex(i)) + } + sect := "" + if l.SymSect(i) != nil { + sect = l.SymSect(i).Name + } + fmt.Printf("%v %v %v %v %x %v\n", i, l.SymName(i), l.SymType(i), pi, l.SymValue(i), sect) + } + fmt.Println("symsByName") + for name, i := range l.symsByName[0] { + fmt.Println(i, name, 0) + } + for name, i := range l.symsByName[1] { + fmt.Println(i, name, 1) + } + fmt.Println("payloads:") + for i := range l.payloads { + pp := l.payloads[i] + fmt.Println(i, pp.name, pp.ver, pp.kind) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader_test.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader_test.go new file mode 100644 index 0000000000000000000000000000000000000000..32ff2586cebfb71f5a9f77c8a9912d71915825c0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/loader_test.go @@ -0,0 +1,465 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "bytes" + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/sym" + "fmt" + "testing" +) + +// dummyAddSym adds the named symbol to the loader as if it had been +// read from a Go object file. Note that it allocates a global +// index without creating an associated object reader, so one can't +// do anything interesting with this symbol (such as look at its +// data or relocations). +func addDummyObjSym(t *testing.T, ldr *Loader, or *oReader, name string) Sym { + idx := uint32(len(ldr.objSyms)) + st := loadState{l: ldr} + return st.addSym(name, 0, or, idx, nonPkgDef, &goobj.Sym{}) +} + +func mkLoader() *Loader { + er := ErrorReporter{} + ldr := NewLoader(0, &er) + er.ldr = ldr + return ldr +} + +func TestAddMaterializedSymbol(t *testing.T) { + ldr := mkLoader() + dummyOreader := oReader{version: -1, syms: make([]Sym, 100)} + or := &dummyOreader + + // Create some syms from a dummy object file symbol to get things going. + ts1 := addDummyObjSym(t, ldr, or, "type:uint8") + ts2 := addDummyObjSym(t, ldr, or, "mumble") + ts3 := addDummyObjSym(t, ldr, or, "type:string") + + // Create some external symbols. + es1 := ldr.LookupOrCreateSym("extnew1", 0) + if es1 == 0 { + t.Fatalf("LookupOrCreateSym failed for extnew1") + } + es1x := ldr.LookupOrCreateSym("extnew1", 0) + if es1x != es1 { + t.Fatalf("LookupOrCreateSym lookup: expected %d got %d for second lookup", es1, es1x) + } + es2 := ldr.LookupOrCreateSym("go:info.type.uint8", 0) + if es2 == 0 { + t.Fatalf("LookupOrCreateSym failed for go.info.type.uint8") + } + // Create a nameless symbol + es3 := ldr.CreateStaticSym("") + if es3 == 0 { + t.Fatalf("CreateStaticSym failed for nameless sym") + } + + // Grab symbol builder pointers + sb1 := ldr.MakeSymbolUpdater(es1) + sb2 := ldr.MakeSymbolUpdater(es2) + sb3 := ldr.MakeSymbolUpdater(es3) + + // Suppose we create some more symbols, which triggers a grow. + // Make sure the symbol builder's payload pointer is valid, + // even across a grow. + for i := 0; i < 9999; i++ { + ldr.CreateStaticSym("dummy") + } + + // Check get/set symbol type + es3typ := sb3.Type() + if es3typ != sym.Sxxx { + t.Errorf("SymType(es3): expected %v, got %v", sym.Sxxx, es3typ) + } + sb3.SetType(sym.SRODATA) + es3typ = sb3.Type() + if es3typ != sym.SRODATA { + t.Errorf("SymType(es3): expected %v, got %v", sym.SRODATA, es3typ) + } + es3typ = ldr.SymType(es3) + if es3typ != sym.SRODATA { + t.Errorf("SymType(es3): expected %v, got %v", sym.SRODATA, es3typ) + } + + // New symbols should not initially be reachable. + if ldr.AttrReachable(es1) || ldr.AttrReachable(es2) || ldr.AttrReachable(es3) { + t.Errorf("newly materialized symbols should not be reachable") + } + + // ... however it should be possible to set/unset their reachability. + ldr.SetAttrReachable(es3, true) + if !ldr.AttrReachable(es3) { + t.Errorf("expected reachable symbol after update") + } + ldr.SetAttrReachable(es3, false) + if ldr.AttrReachable(es3) { + t.Errorf("expected unreachable symbol after update") + } + + // Test expansion of attr bitmaps + for idx := 0; idx < 36; idx++ { + es := ldr.LookupOrCreateSym(fmt.Sprintf("zext%d", idx), 0) + if ldr.AttrOnList(es) { + t.Errorf("expected OnList after creation") + } + ldr.SetAttrOnList(es, true) + if !ldr.AttrOnList(es) { + t.Errorf("expected !OnList after update") + } + if ldr.AttrDuplicateOK(es) { + t.Errorf("expected DupOK after creation") + } + ldr.SetAttrDuplicateOK(es, true) + if !ldr.AttrDuplicateOK(es) { + t.Errorf("expected !DupOK after update") + } + } + + sb1 = ldr.MakeSymbolUpdater(es1) + sb2 = ldr.MakeSymbolUpdater(es2) + + // Get/set a few other attributes + if ldr.AttrVisibilityHidden(es3) { + t.Errorf("expected initially not hidden") + } + ldr.SetAttrVisibilityHidden(es3, true) + if !ldr.AttrVisibilityHidden(es3) { + t.Errorf("expected hidden after update") + } + + // Test get/set symbol value. + toTest := []Sym{ts2, es3} + for i, s := range toTest { + if v := ldr.SymValue(s); v != 0 { + t.Errorf("ldr.Value(%d): expected 0 got %d\n", s, v) + } + nv := int64(i + 101) + ldr.SetSymValue(s, nv) + if v := ldr.SymValue(s); v != nv { + t.Errorf("ldr.SetValue(%d,%d): expected %d got %d\n", s, nv, nv, v) + } + } + + // Check/set alignment + es3al := ldr.SymAlign(es3) + if es3al != 0 { + t.Errorf("SymAlign(es3): expected 0, got %d", es3al) + } + ldr.SetSymAlign(es3, 128) + es3al = ldr.SymAlign(es3) + if es3al != 128 { + t.Errorf("SymAlign(es3): expected 128, got %d", es3al) + } + + // Add some relocations to the new symbols. + r1, _ := sb1.AddRel(objabi.R_ADDR) + r1.SetOff(0) + r1.SetSiz(1) + r1.SetSym(ts1) + r2, _ := sb1.AddRel(objabi.R_CALL) + r2.SetOff(3) + r2.SetSiz(8) + r2.SetSym(ts2) + r3, _ := sb2.AddRel(objabi.R_USETYPE) + r3.SetOff(7) + r3.SetSiz(1) + r3.SetSym(ts3) + + // Add some data to the symbols. + d1 := []byte{1, 2, 3} + d2 := []byte{4, 5, 6, 7} + sb1.AddBytes(d1) + sb2.AddBytes(d2) + + // Now invoke the usual loader interfaces to make sure + // we're getting the right things back for these symbols. + // First relocations... + expRel := [][]Reloc{{r1, r2}, {r3}} + for k, sb := range []*SymbolBuilder{sb1, sb2} { + rsl := sb.Relocs() + exp := expRel[k] + if !sameRelocSlice(&rsl, exp) { + t.Errorf("expected relocs %v, got %v", exp, rsl) + } + } + + // ... then data. + dat := sb2.Data() + if bytes.Compare(dat, d2) != 0 { + t.Errorf("expected es2 data %v, got %v", d2, dat) + } + + // Nameless symbol should still be nameless. + es3name := ldr.SymName(es3) + if "" != es3name { + t.Errorf("expected es3 name of '', got '%s'", es3name) + } + + // Read value of materialized symbol. + es1val := sb1.Value() + if 0 != es1val { + t.Errorf("expected es1 value of 0, got %v", es1val) + } + + // Test other misc methods + irm := ldr.IsReflectMethod(es1) + if 0 != es1val { + t.Errorf("expected IsReflectMethod(es1) value of 0, got %v", irm) + } +} + +func sameRelocSlice(s1 *Relocs, s2 []Reloc) bool { + if s1.Count() != len(s2) { + return false + } + for i := 0; i < s1.Count(); i++ { + r1 := s1.At(i) + r2 := &s2[i] + if r1.Sym() != r2.Sym() || + r1.Type() != r2.Type() || + r1.Off() != r2.Off() || + r1.Add() != r2.Add() || + r1.Siz() != r2.Siz() { + return false + } + } + return true +} + +type addFunc func(l *Loader, s Sym, s2 Sym) Sym + +func mkReloc(l *Loader, typ objabi.RelocType, off int32, siz uint8, add int64, sym Sym) Reloc { + r := Reloc{&goobj.Reloc{}, l.extReader, l} + r.SetType(typ) + r.SetOff(off) + r.SetSiz(siz) + r.SetAdd(add) + r.SetSym(sym) + return r +} + +func TestAddDataMethods(t *testing.T) { + ldr := mkLoader() + dummyOreader := oReader{version: -1, syms: make([]Sym, 100)} + or := &dummyOreader + + // Populate loader with some symbols. + addDummyObjSym(t, ldr, or, "type:uint8") + ldr.LookupOrCreateSym("hello", 0) + + arch := sys.ArchAMD64 + var testpoints = []struct { + which string + addDataFunc addFunc + expData []byte + expKind sym.SymKind + expRel []Reloc + }{ + { + which: "AddUint8", + addDataFunc: func(l *Loader, s Sym, _ Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddUint8('a') + return s + }, + expData: []byte{'a'}, + expKind: sym.SDATA, + }, + { + which: "AddUintXX", + addDataFunc: func(l *Loader, s Sym, _ Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddUintXX(arch, 25185, 2) + return s + }, + expData: []byte{'a', 'b'}, + expKind: sym.SDATA, + }, + { + which: "SetUint8", + addDataFunc: func(l *Loader, s Sym, _ Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddUint8('a') + sb.AddUint8('b') + sb.SetUint8(arch, 1, 'c') + return s + }, + expData: []byte{'a', 'c'}, + expKind: sym.SDATA, + }, + { + which: "AddString", + addDataFunc: func(l *Loader, s Sym, _ Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.Addstring("hello") + return s + }, + expData: []byte{'h', 'e', 'l', 'l', 'o', 0}, + expKind: sym.SNOPTRDATA, + }, + { + which: "AddAddrPlus", + addDataFunc: func(l *Loader, s Sym, s2 Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddAddrPlus(arch, s2, 3) + return s + }, + expData: []byte{0, 0, 0, 0, 0, 0, 0, 0}, + expKind: sym.SDATA, + expRel: []Reloc{mkReloc(ldr, objabi.R_ADDR, 0, 8, 3, 6)}, + }, + { + which: "AddAddrPlus4", + addDataFunc: func(l *Loader, s Sym, s2 Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddAddrPlus4(arch, s2, 3) + return s + }, + expData: []byte{0, 0, 0, 0}, + expKind: sym.SDATA, + expRel: []Reloc{mkReloc(ldr, objabi.R_ADDR, 0, 4, 3, 7)}, + }, + { + which: "AddCURelativeAddrPlus", + addDataFunc: func(l *Loader, s Sym, s2 Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddCURelativeAddrPlus(arch, s2, 7) + return s + }, + expData: []byte{0, 0, 0, 0, 0, 0, 0, 0}, + expKind: sym.SDATA, + expRel: []Reloc{mkReloc(ldr, objabi.R_ADDRCUOFF, 0, 8, 7, 8)}, + }, + { + which: "AddPEImageRelativeAddrPlus", + addDataFunc: func(l *Loader, s Sym, s2 Sym) Sym { + sb := l.MakeSymbolUpdater(s) + sb.AddPEImageRelativeAddrPlus(arch, s2, 3) + return s + }, + expData: []byte{0, 0, 0, 0}, + expKind: sym.SDATA, + expRel: []Reloc{mkReloc(ldr, objabi.R_PEIMAGEOFF, 0, 4, 3, 9)}, + }, + } + + var pmi Sym + for k, tp := range testpoints { + name := fmt.Sprintf("new%d", k+1) + mi := ldr.LookupOrCreateSym(name, 0) + if mi == 0 { + t.Fatalf("LookupOrCreateSym failed for '" + name + "'") + } + mi = tp.addDataFunc(ldr, mi, pmi) + if ldr.SymType(mi) != tp.expKind { + t.Errorf("testing Loader.%s: expected kind %s got %s", + tp.which, tp.expKind, ldr.SymType(mi)) + } + if bytes.Compare(ldr.Data(mi), tp.expData) != 0 { + t.Errorf("testing Loader.%s: expected data %v got %v", + tp.which, tp.expData, ldr.Data(mi)) + } + relocs := ldr.Relocs(mi) + if !sameRelocSlice(&relocs, tp.expRel) { + t.Fatalf("testing Loader.%s: got relocslice %+v wanted %+v", + tp.which, relocs, tp.expRel) + } + pmi = mi + } +} + +func TestOuterSub(t *testing.T) { + ldr := mkLoader() + dummyOreader := oReader{version: -1, syms: make([]Sym, 100)} + or := &dummyOreader + + // Populate loader with some symbols. + addDummyObjSym(t, ldr, or, "type:uint8") + es1 := ldr.LookupOrCreateSym("outer", 0) + ldr.MakeSymbolUpdater(es1).SetSize(101) + es2 := ldr.LookupOrCreateSym("sub1", 0) + es3 := ldr.LookupOrCreateSym("sub2", 0) + es4 := ldr.LookupOrCreateSym("sub3", 0) + es5 := ldr.LookupOrCreateSym("sub4", 0) + es6 := ldr.LookupOrCreateSym("sub5", 0) + + // Should not have an outer sym initially + if ldr.OuterSym(es1) != 0 { + t.Errorf("es1 outer sym set ") + } + if ldr.SubSym(es2) != 0 { + t.Errorf("es2 outer sym set ") + } + + // Establish first outer/sub relationship + ldr.AddInteriorSym(es1, es2) + if ldr.OuterSym(es1) != 0 { + t.Errorf("ldr.OuterSym(es1) got %d wanted %d", ldr.OuterSym(es1), 0) + } + if ldr.OuterSym(es2) != es1 { + t.Errorf("ldr.OuterSym(es2) got %d wanted %d", ldr.OuterSym(es2), es1) + } + if ldr.SubSym(es1) != es2 { + t.Errorf("ldr.SubSym(es1) got %d wanted %d", ldr.SubSym(es1), es2) + } + if ldr.SubSym(es2) != 0 { + t.Errorf("ldr.SubSym(es2) got %d wanted %d", ldr.SubSym(es2), 0) + } + + // Establish second outer/sub relationship + ldr.AddInteriorSym(es1, es3) + if ldr.OuterSym(es1) != 0 { + t.Errorf("ldr.OuterSym(es1) got %d wanted %d", ldr.OuterSym(es1), 0) + } + if ldr.OuterSym(es2) != es1 { + t.Errorf("ldr.OuterSym(es2) got %d wanted %d", ldr.OuterSym(es2), es1) + } + if ldr.OuterSym(es3) != es1 { + t.Errorf("ldr.OuterSym(es3) got %d wanted %d", ldr.OuterSym(es3), es1) + } + if ldr.SubSym(es1) != es3 { + t.Errorf("ldr.SubSym(es1) got %d wanted %d", ldr.SubSym(es1), es3) + } + if ldr.SubSym(es3) != es2 { + t.Errorf("ldr.SubSym(es3) got %d wanted %d", ldr.SubSym(es3), es2) + } + + // Some more + ldr.AddInteriorSym(es1, es4) + ldr.AddInteriorSym(es1, es5) + ldr.AddInteriorSym(es1, es6) + + // Set values. + ldr.SetSymValue(es2, 7) + ldr.SetSymValue(es3, 1) + ldr.SetSymValue(es4, 13) + ldr.SetSymValue(es5, 101) + ldr.SetSymValue(es6, 3) + + // Sort + news := ldr.SortSub(es1) + if news != es3 { + t.Errorf("ldr.SortSub leader got %d wanted %d", news, es3) + } + pv := int64(-1) + count := 0 + for ss := ldr.SubSym(es1); ss != 0; ss = ldr.SubSym(ss) { + v := ldr.SymValue(ss) + if v <= pv { + t.Errorf("ldr.SortSub sortfail at %d: val %d >= prev val %d", + ss, v, pv) + } + pv = v + count++ + } + if count != 5 { + t.Errorf("expected %d in sub list got %d", 5, count) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/symbolbuilder.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/symbolbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..b9eaca7fb6fd61c9e41b6d5d1b90220dbb1831ea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loader/symbolbuilder.go @@ -0,0 +1,442 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/sym" + "sort" +) + +// SymbolBuilder is a helper designed to help with the construction +// of new symbol contents. +type SymbolBuilder struct { + *extSymPayload // points to payload being updated + symIdx Sym // index of symbol being updated/constructed + l *Loader // loader +} + +// MakeSymbolBuilder creates a symbol builder for use in constructing +// an entirely new symbol. +func (l *Loader) MakeSymbolBuilder(name string) *SymbolBuilder { + // for now assume that any new sym is intended to be static + symIdx := l.CreateStaticSym(name) + sb := &SymbolBuilder{l: l, symIdx: symIdx} + sb.extSymPayload = l.getPayload(symIdx) + return sb +} + +// MakeSymbolUpdater creates a symbol builder helper for an existing +// symbol 'symIdx'. If 'symIdx' is not an external symbol, then create +// a clone of it (copy name, properties, etc) fix things up so that +// the lookup tables and caches point to the new version, not the old +// version. +func (l *Loader) MakeSymbolUpdater(symIdx Sym) *SymbolBuilder { + if symIdx == 0 { + panic("can't update the null symbol") + } + if !l.IsExternal(symIdx) { + // Create a clone with the same name/version/kind etc. + l.cloneToExternal(symIdx) + } + + // Construct updater and return. + sb := &SymbolBuilder{l: l, symIdx: symIdx} + sb.extSymPayload = l.getPayload(symIdx) + return sb +} + +// CreateSymForUpdate creates a symbol with given name and version, +// returns a CreateSymForUpdate for update. If the symbol already +// exists, it will update in-place. +func (l *Loader) CreateSymForUpdate(name string, version int) *SymbolBuilder { + s := l.LookupOrCreateSym(name, version) + l.SetAttrReachable(s, true) + return l.MakeSymbolUpdater(s) +} + +// Getters for properties of the symbol we're working on. + +func (sb *SymbolBuilder) Sym() Sym { return sb.symIdx } +func (sb *SymbolBuilder) Name() string { return sb.name } +func (sb *SymbolBuilder) Version() int { return sb.ver } +func (sb *SymbolBuilder) Type() sym.SymKind { return sb.kind } +func (sb *SymbolBuilder) Size() int64 { return sb.size } +func (sb *SymbolBuilder) Data() []byte { return sb.data } +func (sb *SymbolBuilder) Value() int64 { return sb.l.SymValue(sb.symIdx) } +func (sb *SymbolBuilder) Align() int32 { return sb.l.SymAlign(sb.symIdx) } +func (sb *SymbolBuilder) Localentry() uint8 { return sb.l.SymLocalentry(sb.symIdx) } +func (sb *SymbolBuilder) OnList() bool { return sb.l.AttrOnList(sb.symIdx) } +func (sb *SymbolBuilder) External() bool { return sb.l.AttrExternal(sb.symIdx) } +func (sb *SymbolBuilder) Extname() string { return sb.l.SymExtname(sb.symIdx) } +func (sb *SymbolBuilder) CgoExportDynamic() bool { return sb.l.AttrCgoExportDynamic(sb.symIdx) } +func (sb *SymbolBuilder) Dynimplib() string { return sb.l.SymDynimplib(sb.symIdx) } +func (sb *SymbolBuilder) Dynimpvers() string { return sb.l.SymDynimpvers(sb.symIdx) } +func (sb *SymbolBuilder) SubSym() Sym { return sb.l.SubSym(sb.symIdx) } +func (sb *SymbolBuilder) GoType() Sym { return sb.l.SymGoType(sb.symIdx) } +func (sb *SymbolBuilder) VisibilityHidden() bool { return sb.l.AttrVisibilityHidden(sb.symIdx) } +func (sb *SymbolBuilder) Sect() *sym.Section { return sb.l.SymSect(sb.symIdx) } + +// Setters for symbol properties. + +func (sb *SymbolBuilder) SetType(kind sym.SymKind) { sb.kind = kind } +func (sb *SymbolBuilder) SetSize(size int64) { sb.size = size } +func (sb *SymbolBuilder) SetData(data []byte) { sb.data = data } +func (sb *SymbolBuilder) SetOnList(v bool) { sb.l.SetAttrOnList(sb.symIdx, v) } +func (sb *SymbolBuilder) SetExternal(v bool) { sb.l.SetAttrExternal(sb.symIdx, v) } +func (sb *SymbolBuilder) SetValue(v int64) { sb.l.SetSymValue(sb.symIdx, v) } +func (sb *SymbolBuilder) SetAlign(align int32) { sb.l.SetSymAlign(sb.symIdx, align) } +func (sb *SymbolBuilder) SetLocalentry(value uint8) { sb.l.SetSymLocalentry(sb.symIdx, value) } +func (sb *SymbolBuilder) SetExtname(value string) { sb.l.SetSymExtname(sb.symIdx, value) } +func (sb *SymbolBuilder) SetDynimplib(value string) { sb.l.SetSymDynimplib(sb.symIdx, value) } +func (sb *SymbolBuilder) SetDynimpvers(value string) { sb.l.SetSymDynimpvers(sb.symIdx, value) } +func (sb *SymbolBuilder) SetPlt(value int32) { sb.l.SetPlt(sb.symIdx, value) } +func (sb *SymbolBuilder) SetGot(value int32) { sb.l.SetGot(sb.symIdx, value) } +func (sb *SymbolBuilder) SetSpecial(value bool) { sb.l.SetAttrSpecial(sb.symIdx, value) } +func (sb *SymbolBuilder) SetLocal(value bool) { sb.l.SetAttrLocal(sb.symIdx, value) } +func (sb *SymbolBuilder) SetVisibilityHidden(value bool) { + sb.l.SetAttrVisibilityHidden(sb.symIdx, value) +} +func (sb *SymbolBuilder) SetNotInSymbolTable(value bool) { + sb.l.SetAttrNotInSymbolTable(sb.symIdx, value) +} +func (sb *SymbolBuilder) SetSect(sect *sym.Section) { sb.l.SetSymSect(sb.symIdx, sect) } + +func (sb *SymbolBuilder) AddBytes(data []byte) { + if sb.kind == 0 { + sb.kind = sym.SDATA + } + sb.data = append(sb.data, data...) + sb.size = int64(len(sb.data)) +} + +func (sb *SymbolBuilder) Relocs() Relocs { + return sb.l.Relocs(sb.symIdx) +} + +// ResetRelocs removes all relocations on this symbol. +func (sb *SymbolBuilder) ResetRelocs() { + sb.relocs = sb.relocs[:0] +} + +// SetRelocType sets the type of the 'i'-th relocation on this sym to 't' +func (sb *SymbolBuilder) SetRelocType(i int, t objabi.RelocType) { + sb.relocs[i].SetType(uint16(t)) +} + +// SetRelocSym sets the target sym of the 'i'-th relocation on this sym to 's' +func (sb *SymbolBuilder) SetRelocSym(i int, tgt Sym) { + sb.relocs[i].SetSym(goobj.SymRef{PkgIdx: 0, SymIdx: uint32(tgt)}) +} + +// SetRelocAdd sets the addend of the 'i'-th relocation on this sym to 'a' +func (sb *SymbolBuilder) SetRelocAdd(i int, a int64) { + sb.relocs[i].SetAdd(a) +} + +// Add n relocations, return a handle to the relocations. +func (sb *SymbolBuilder) AddRelocs(n int) Relocs { + sb.relocs = append(sb.relocs, make([]goobj.Reloc, n)...) + return sb.l.Relocs(sb.symIdx) +} + +// Add a relocation with given type, return its handle and index +// (to set other fields). +func (sb *SymbolBuilder) AddRel(typ objabi.RelocType) (Reloc, int) { + j := len(sb.relocs) + sb.relocs = append(sb.relocs, goobj.Reloc{}) + sb.relocs[j].SetType(uint16(typ)) + relocs := sb.Relocs() + return relocs.At(j), j +} + +// Sort relocations by offset. +func (sb *SymbolBuilder) SortRelocs() { + sort.Sort((*relocsByOff)(sb.extSymPayload)) +} + +// Implement sort.Interface +type relocsByOff extSymPayload + +func (p *relocsByOff) Len() int { return len(p.relocs) } +func (p *relocsByOff) Less(i, j int) bool { return p.relocs[i].Off() < p.relocs[j].Off() } +func (p *relocsByOff) Swap(i, j int) { + p.relocs[i], p.relocs[j] = p.relocs[j], p.relocs[i] +} + +func (sb *SymbolBuilder) Reachable() bool { + return sb.l.AttrReachable(sb.symIdx) +} + +func (sb *SymbolBuilder) SetReachable(v bool) { + sb.l.SetAttrReachable(sb.symIdx, v) +} + +func (sb *SymbolBuilder) ReadOnly() bool { + return sb.l.AttrReadOnly(sb.symIdx) +} + +func (sb *SymbolBuilder) SetReadOnly(v bool) { + sb.l.SetAttrReadOnly(sb.symIdx, v) +} + +func (sb *SymbolBuilder) DuplicateOK() bool { + return sb.l.AttrDuplicateOK(sb.symIdx) +} + +func (sb *SymbolBuilder) SetDuplicateOK(v bool) { + sb.l.SetAttrDuplicateOK(sb.symIdx, v) +} + +func (sb *SymbolBuilder) Outer() Sym { + return sb.l.OuterSym(sb.symIdx) +} + +func (sb *SymbolBuilder) Sub() Sym { + return sb.l.SubSym(sb.symIdx) +} + +func (sb *SymbolBuilder) SortSub() { + sb.l.SortSub(sb.symIdx) +} + +func (sb *SymbolBuilder) AddInteriorSym(sub Sym) { + sb.l.AddInteriorSym(sb.symIdx, sub) +} + +func (sb *SymbolBuilder) AddUint8(v uint8) int64 { + off := sb.size + if sb.kind == 0 { + sb.kind = sym.SDATA + } + sb.size++ + sb.data = append(sb.data, v) + return off +} + +func (sb *SymbolBuilder) AddUintXX(arch *sys.Arch, v uint64, wid int) int64 { + off := sb.size + sb.setUintXX(arch, off, v, int64(wid)) + return off +} + +func (sb *SymbolBuilder) setUintXX(arch *sys.Arch, off int64, v uint64, wid int64) int64 { + if sb.kind == 0 { + sb.kind = sym.SDATA + } + if sb.size < off+wid { + sb.size = off + wid + sb.Grow(sb.size) + } + + switch wid { + case 1: + sb.data[off] = uint8(v) + case 2: + arch.ByteOrder.PutUint16(sb.data[off:], uint16(v)) + case 4: + arch.ByteOrder.PutUint32(sb.data[off:], uint32(v)) + case 8: + arch.ByteOrder.PutUint64(sb.data[off:], v) + } + + return off + wid +} + +func (sb *SymbolBuilder) AddUint16(arch *sys.Arch, v uint16) int64 { + return sb.AddUintXX(arch, uint64(v), 2) +} + +func (sb *SymbolBuilder) AddUint32(arch *sys.Arch, v uint32) int64 { + return sb.AddUintXX(arch, uint64(v), 4) +} + +func (sb *SymbolBuilder) AddUint64(arch *sys.Arch, v uint64) int64 { + return sb.AddUintXX(arch, v, 8) +} + +func (sb *SymbolBuilder) AddUint(arch *sys.Arch, v uint64) int64 { + return sb.AddUintXX(arch, v, arch.PtrSize) +} + +func (sb *SymbolBuilder) SetUint8(arch *sys.Arch, r int64, v uint8) int64 { + return sb.setUintXX(arch, r, uint64(v), 1) +} + +func (sb *SymbolBuilder) SetUint16(arch *sys.Arch, r int64, v uint16) int64 { + return sb.setUintXX(arch, r, uint64(v), 2) +} + +func (sb *SymbolBuilder) SetUint32(arch *sys.Arch, r int64, v uint32) int64 { + return sb.setUintXX(arch, r, uint64(v), 4) +} + +func (sb *SymbolBuilder) SetUint(arch *sys.Arch, r int64, v uint64) int64 { + return sb.setUintXX(arch, r, v, int64(arch.PtrSize)) +} + +func (sb *SymbolBuilder) SetUintptr(arch *sys.Arch, r int64, v uintptr) int64 { + return sb.setUintXX(arch, r, uint64(v), int64(arch.PtrSize)) +} + +func (sb *SymbolBuilder) SetAddrPlus(arch *sys.Arch, off int64, tgt Sym, add int64) int64 { + if sb.Type() == 0 { + sb.SetType(sym.SDATA) + } + if off+int64(arch.PtrSize) > sb.size { + sb.size = off + int64(arch.PtrSize) + sb.Grow(sb.size) + } + r, _ := sb.AddRel(objabi.R_ADDR) + r.SetSym(tgt) + r.SetOff(int32(off)) + r.SetSiz(uint8(arch.PtrSize)) + r.SetAdd(add) + return off + int64(r.Siz()) +} + +func (sb *SymbolBuilder) SetAddr(arch *sys.Arch, off int64, tgt Sym) int64 { + return sb.SetAddrPlus(arch, off, tgt, 0) +} + +func (sb *SymbolBuilder) AddStringAt(off int64, str string) int64 { + strLen := int64(len(str)) + if off+strLen > int64(len(sb.data)) { + panic("attempt to write past end of buffer") + } + copy(sb.data[off:off+strLen], str) + return off + strLen +} + +// AddCStringAt adds str plus a null terminating byte. +func (sb *SymbolBuilder) AddCStringAt(off int64, str string) int64 { + strLen := int64(len(str)) + if off+strLen+1 > int64(len(sb.data)) { + panic("attempt to write past end of buffer") + } + copy(sb.data[off:off+strLen], str) + sb.data[off+strLen] = 0 + return off + strLen + 1 +} + +func (sb *SymbolBuilder) Addstring(str string) int64 { + if sb.kind == 0 { + sb.kind = sym.SNOPTRDATA + } + r := sb.size + sb.data = append(sb.data, str...) + sb.data = append(sb.data, 0) + sb.size = int64(len(sb.data)) + return r +} + +func (sb *SymbolBuilder) SetBytesAt(off int64, b []byte) int64 { + datLen := int64(len(b)) + if off+datLen > int64(len(sb.data)) { + panic("attempt to write past end of buffer") + } + copy(sb.data[off:off+datLen], b) + return off + datLen +} + +func (sb *SymbolBuilder) addSymRef(tgt Sym, add int64, typ objabi.RelocType, rsize int) int64 { + if sb.kind == 0 { + sb.kind = sym.SDATA + } + i := sb.size + + sb.size += int64(rsize) + sb.Grow(sb.size) + + r, _ := sb.AddRel(typ) + r.SetSym(tgt) + r.SetOff(int32(i)) + r.SetSiz(uint8(rsize)) + r.SetAdd(add) + + return i + int64(rsize) +} + +// Add a symbol reference (relocation) with given type, addend, and size +// (the most generic form). +func (sb *SymbolBuilder) AddSymRef(arch *sys.Arch, tgt Sym, add int64, typ objabi.RelocType, rsize int) int64 { + return sb.addSymRef(tgt, add, typ, rsize) +} + +func (sb *SymbolBuilder) AddAddrPlus(arch *sys.Arch, tgt Sym, add int64) int64 { + return sb.addSymRef(tgt, add, objabi.R_ADDR, arch.PtrSize) +} + +func (sb *SymbolBuilder) AddAddrPlus4(arch *sys.Arch, tgt Sym, add int64) int64 { + return sb.addSymRef(tgt, add, objabi.R_ADDR, 4) +} + +func (sb *SymbolBuilder) AddAddr(arch *sys.Arch, tgt Sym) int64 { + return sb.AddAddrPlus(arch, tgt, 0) +} + +func (sb *SymbolBuilder) AddPEImageRelativeAddrPlus(arch *sys.Arch, tgt Sym, add int64) int64 { + return sb.addSymRef(tgt, add, objabi.R_PEIMAGEOFF, 4) +} + +func (sb *SymbolBuilder) AddPCRelPlus(arch *sys.Arch, tgt Sym, add int64) int64 { + return sb.addSymRef(tgt, add, objabi.R_PCREL, 4) +} + +func (sb *SymbolBuilder) AddCURelativeAddrPlus(arch *sys.Arch, tgt Sym, add int64) int64 { + return sb.addSymRef(tgt, add, objabi.R_ADDRCUOFF, arch.PtrSize) +} + +func (sb *SymbolBuilder) AddSize(arch *sys.Arch, tgt Sym) int64 { + return sb.addSymRef(tgt, 0, objabi.R_SIZE, arch.PtrSize) +} + +// GenAddAddrPlusFunc returns a function to be called when capturing +// a function symbol's address. In later stages of the link (when +// address assignment is done) when doing internal linking and +// targeting an executable, we can just emit the address of a function +// directly instead of generating a relocation. Clients can call +// this function (setting 'internalExec' based on build mode and target) +// and then invoke the returned function in roughly the same way that +// loader.*SymbolBuilder.AddAddrPlus would be used. +func GenAddAddrPlusFunc(internalExec bool) func(s *SymbolBuilder, arch *sys.Arch, tgt Sym, add int64) int64 { + if internalExec { + return func(s *SymbolBuilder, arch *sys.Arch, tgt Sym, add int64) int64 { + if v := s.l.SymValue(tgt); v != 0 { + return s.AddUint(arch, uint64(v+add)) + } + return s.AddAddrPlus(arch, tgt, add) + } + } else { + return (*SymbolBuilder).AddAddrPlus + } +} + +func (sb *SymbolBuilder) MakeWritable() { + if sb.ReadOnly() { + sb.data = append([]byte(nil), sb.data...) + sb.l.SetAttrReadOnly(sb.symIdx, false) + } +} + +func (sb *SymbolBuilder) AddUleb(v uint64) { + if v < 128 { // common case: 1 byte + sb.AddUint8(uint8(v)) + return + } + for { + c := uint8(v & 0x7f) + v >>= 7 + if v != 0 { + c |= 0x80 + } + sb.AddUint8(c) + if c&0x80 == 0 { + break + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loadmacho/ldmacho.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadmacho/ldmacho.go new file mode 100644 index 0000000000000000000000000000000000000000..6e783929e32b945a34928cfc5f3fcd6c1bae79e2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadmacho/ldmacho.go @@ -0,0 +1,806 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadmacho implements a Mach-O file reader. +package loadmacho + +import ( + "bytes" + "cmd/internal/bio" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "encoding/binary" + "fmt" +) + +/* +Derived from Plan 9 from User Space's src/libmach/elf.h, elf.c +https://github.com/9fans/plan9port/tree/master/src/libmach/ + + Copyright © 2004 Russ Cox. + Portions Copyright © 2008-2010 Google Inc. + Portions Copyright © 2010 The Go Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +// TODO(crawshaw): de-duplicate these symbols with cmd/link/internal/ld +const ( + MACHO_X86_64_RELOC_UNSIGNED = 0 + MACHO_X86_64_RELOC_SIGNED = 1 + MACHO_ARM64_RELOC_ADDEND = 10 +) + +type ldMachoObj struct { + f *bio.Reader + base int64 // off in f where Mach-O begins + length int64 // length of Mach-O + is64 bool + name string + e binary.ByteOrder + cputype uint + subcputype uint + filetype uint32 + flags uint32 + cmd []ldMachoCmd + ncmd uint +} + +type ldMachoCmd struct { + type_ int + off uint32 + size uint32 + seg ldMachoSeg + sym ldMachoSymtab + dsym ldMachoDysymtab +} + +type ldMachoSeg struct { + name string + vmaddr uint64 + vmsize uint64 + fileoff uint32 + filesz uint32 + maxprot uint32 + initprot uint32 + nsect uint32 + flags uint32 + sect []ldMachoSect +} + +type ldMachoSect struct { + name string + segname string + addr uint64 + size uint64 + off uint32 + align uint32 + reloff uint32 + nreloc uint32 + flags uint32 + res1 uint32 + res2 uint32 + sym loader.Sym + rel []ldMachoRel +} + +type ldMachoRel struct { + addr uint32 + symnum uint32 + pcrel uint8 + length uint8 + extrn uint8 + type_ uint8 + scattered uint8 + value uint32 +} + +type ldMachoSymtab struct { + symoff uint32 + nsym uint32 + stroff uint32 + strsize uint32 + str []byte + sym []ldMachoSym +} + +type ldMachoSym struct { + name string + type_ uint8 + sectnum uint8 + desc uint16 + kind int8 + value uint64 + sym loader.Sym +} + +type ldMachoDysymtab struct { + ilocalsym uint32 + nlocalsym uint32 + iextdefsym uint32 + nextdefsym uint32 + iundefsym uint32 + nundefsym uint32 + tocoff uint32 + ntoc uint32 + modtaboff uint32 + nmodtab uint32 + extrefsymoff uint32 + nextrefsyms uint32 + indirectsymoff uint32 + nindirectsyms uint32 + extreloff uint32 + nextrel uint32 + locreloff uint32 + nlocrel uint32 + indir []uint32 +} + +// ldMachoSym.type_ +const ( + N_EXT = 0x01 + N_TYPE = 0x1e + N_STAB = 0xe0 +) + +// ldMachoSym.desc +const ( + N_WEAK_REF = 0x40 + N_WEAK_DEF = 0x80 +) + +const ( + LdMachoCpuVax = 1 + LdMachoCpu68000 = 6 + LdMachoCpu386 = 7 + LdMachoCpuAmd64 = 1<<24 | 7 + LdMachoCpuMips = 8 + LdMachoCpu98000 = 10 + LdMachoCpuHppa = 11 + LdMachoCpuArm = 12 + LdMachoCpuArm64 = 1<<24 | 12 + LdMachoCpu88000 = 13 + LdMachoCpuSparc = 14 + LdMachoCpu860 = 15 + LdMachoCpuAlpha = 16 + LdMachoCpuPower = 18 + LdMachoCmdSegment = 1 + LdMachoCmdSymtab = 2 + LdMachoCmdSymseg = 3 + LdMachoCmdThread = 4 + LdMachoCmdDysymtab = 11 + LdMachoCmdSegment64 = 25 + LdMachoFileObject = 1 + LdMachoFileExecutable = 2 + LdMachoFileFvmlib = 3 + LdMachoFileCore = 4 + LdMachoFilePreload = 5 +) + +func unpackcmd(p []byte, m *ldMachoObj, c *ldMachoCmd, type_ uint, sz uint) int { + e4 := m.e.Uint32 + e8 := m.e.Uint64 + + c.type_ = int(type_) + c.size = uint32(sz) + switch type_ { + default: + return -1 + + case LdMachoCmdSegment: + if sz < 56 { + return -1 + } + c.seg.name = cstring(p[8:24]) + c.seg.vmaddr = uint64(e4(p[24:])) + c.seg.vmsize = uint64(e4(p[28:])) + c.seg.fileoff = e4(p[32:]) + c.seg.filesz = e4(p[36:]) + c.seg.maxprot = e4(p[40:]) + c.seg.initprot = e4(p[44:]) + c.seg.nsect = e4(p[48:]) + c.seg.flags = e4(p[52:]) + c.seg.sect = make([]ldMachoSect, c.seg.nsect) + if uint32(sz) < 56+c.seg.nsect*68 { + return -1 + } + p = p[56:] + var s *ldMachoSect + for i := 0; uint32(i) < c.seg.nsect; i++ { + s = &c.seg.sect[i] + s.name = cstring(p[0:16]) + s.segname = cstring(p[16:32]) + s.addr = uint64(e4(p[32:])) + s.size = uint64(e4(p[36:])) + s.off = e4(p[40:]) + s.align = e4(p[44:]) + s.reloff = e4(p[48:]) + s.nreloc = e4(p[52:]) + s.flags = e4(p[56:]) + s.res1 = e4(p[60:]) + s.res2 = e4(p[64:]) + p = p[68:] + } + + case LdMachoCmdSegment64: + if sz < 72 { + return -1 + } + c.seg.name = cstring(p[8:24]) + c.seg.vmaddr = e8(p[24:]) + c.seg.vmsize = e8(p[32:]) + c.seg.fileoff = uint32(e8(p[40:])) + c.seg.filesz = uint32(e8(p[48:])) + c.seg.maxprot = e4(p[56:]) + c.seg.initprot = e4(p[60:]) + c.seg.nsect = e4(p[64:]) + c.seg.flags = e4(p[68:]) + c.seg.sect = make([]ldMachoSect, c.seg.nsect) + if uint32(sz) < 72+c.seg.nsect*80 { + return -1 + } + p = p[72:] + var s *ldMachoSect + for i := 0; uint32(i) < c.seg.nsect; i++ { + s = &c.seg.sect[i] + s.name = cstring(p[0:16]) + s.segname = cstring(p[16:32]) + s.addr = e8(p[32:]) + s.size = e8(p[40:]) + s.off = e4(p[48:]) + s.align = e4(p[52:]) + s.reloff = e4(p[56:]) + s.nreloc = e4(p[60:]) + s.flags = e4(p[64:]) + s.res1 = e4(p[68:]) + s.res2 = e4(p[72:]) + + // p+76 is reserved + p = p[80:] + } + + case LdMachoCmdSymtab: + if sz < 24 { + return -1 + } + c.sym.symoff = e4(p[8:]) + c.sym.nsym = e4(p[12:]) + c.sym.stroff = e4(p[16:]) + c.sym.strsize = e4(p[20:]) + + case LdMachoCmdDysymtab: + if sz < 80 { + return -1 + } + c.dsym.ilocalsym = e4(p[8:]) + c.dsym.nlocalsym = e4(p[12:]) + c.dsym.iextdefsym = e4(p[16:]) + c.dsym.nextdefsym = e4(p[20:]) + c.dsym.iundefsym = e4(p[24:]) + c.dsym.nundefsym = e4(p[28:]) + c.dsym.tocoff = e4(p[32:]) + c.dsym.ntoc = e4(p[36:]) + c.dsym.modtaboff = e4(p[40:]) + c.dsym.nmodtab = e4(p[44:]) + c.dsym.extrefsymoff = e4(p[48:]) + c.dsym.nextrefsyms = e4(p[52:]) + c.dsym.indirectsymoff = e4(p[56:]) + c.dsym.nindirectsyms = e4(p[60:]) + c.dsym.extreloff = e4(p[64:]) + c.dsym.nextrel = e4(p[68:]) + c.dsym.locreloff = e4(p[72:]) + c.dsym.nlocrel = e4(p[76:]) + } + + return 0 +} + +func macholoadrel(m *ldMachoObj, sect *ldMachoSect) int { + if sect.rel != nil || sect.nreloc == 0 { + return 0 + } + rel := make([]ldMachoRel, sect.nreloc) + m.f.MustSeek(m.base+int64(sect.reloff), 0) + buf, _, err := m.f.Slice(uint64(sect.nreloc * 8)) + if err != nil { + return -1 + } + for i := uint32(0); i < sect.nreloc; i++ { + r := &rel[i] + p := buf[i*8:] + r.addr = m.e.Uint32(p) + + // TODO(rsc): Wrong interpretation for big-endian bitfields? + if r.addr&0x80000000 != 0 { + // scatterbrained relocation + r.scattered = 1 + + v := r.addr >> 24 + r.addr &= 0xFFFFFF + r.type_ = uint8(v & 0xF) + v >>= 4 + r.length = 1 << (v & 3) + v >>= 2 + r.pcrel = uint8(v & 1) + r.value = m.e.Uint32(p[4:]) + } else { + v := m.e.Uint32(p[4:]) + r.symnum = v & 0xFFFFFF + v >>= 24 + r.pcrel = uint8(v & 1) + v >>= 1 + r.length = 1 << (v & 3) + v >>= 2 + r.extrn = uint8(v & 1) + v >>= 1 + r.type_ = uint8(v) + } + } + + sect.rel = rel + return 0 +} + +func macholoaddsym(m *ldMachoObj, d *ldMachoDysymtab) int { + n := int(d.nindirectsyms) + m.f.MustSeek(m.base+int64(d.indirectsymoff), 0) + p, _, err := m.f.Slice(uint64(n * 4)) + if err != nil { + return -1 + } + + d.indir = make([]uint32, n) + for i := 0; i < n; i++ { + d.indir[i] = m.e.Uint32(p[4*i:]) + } + return 0 +} + +func macholoadsym(m *ldMachoObj, symtab *ldMachoSymtab) int { + if symtab.sym != nil { + return 0 + } + + m.f.MustSeek(m.base+int64(symtab.stroff), 0) + strbuf, _, err := m.f.Slice(uint64(symtab.strsize)) + if err != nil { + return -1 + } + + symsize := 12 + if m.is64 { + symsize = 16 + } + n := int(symtab.nsym * uint32(symsize)) + m.f.MustSeek(m.base+int64(symtab.symoff), 0) + symbuf, _, err := m.f.Slice(uint64(n)) + if err != nil { + return -1 + } + sym := make([]ldMachoSym, symtab.nsym) + p := symbuf + for i := uint32(0); i < symtab.nsym; i++ { + s := &sym[i] + v := m.e.Uint32(p) + if v >= symtab.strsize { + return -1 + } + s.name = cstring(strbuf[v:]) + s.type_ = p[4] + s.sectnum = p[5] + s.desc = m.e.Uint16(p[6:]) + if m.is64 { + s.value = m.e.Uint64(p[8:]) + } else { + s.value = uint64(m.e.Uint32(p[8:])) + } + p = p[symsize:] + } + + symtab.str = strbuf + symtab.sym = sym + return 0 +} + +// Load the Mach-O file pn from f. +// Symbols are written into syms, and a slice of the text symbols is returned. +func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, err error) { + errorf := func(str string, args ...interface{}) ([]loader.Sym, error) { + return nil, fmt.Errorf("loadmacho: %v: %v", pn, fmt.Sprintf(str, args...)) + } + + base := f.Offset() + + hdr, _, err := f.Slice(7 * 4) + if err != nil { + return errorf("reading hdr: %v", err) + } + + var e binary.ByteOrder + if binary.BigEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE { + e = binary.BigEndian + } else if binary.LittleEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE { + e = binary.LittleEndian + } else { + return errorf("bad magic - not mach-o file") + } + + is64 := e.Uint32(hdr[:]) == 0xFEEDFACF + ncmd := e.Uint32(hdr[4*4:]) + cmdsz := e.Uint32(hdr[5*4:]) + if ncmd > 0x10000 || cmdsz >= 0x01000000 { + return errorf("implausible mach-o header ncmd=%d cmdsz=%d", ncmd, cmdsz) + } + + if is64 { + f.MustSeek(4, 1) // skip reserved word in header + } + + m := &ldMachoObj{ + f: f, + e: e, + cputype: uint(e.Uint32(hdr[1*4:])), + subcputype: uint(e.Uint32(hdr[2*4:])), + filetype: e.Uint32(hdr[3*4:]), + ncmd: uint(ncmd), + flags: e.Uint32(hdr[6*4:]), + is64: is64, + base: base, + length: length, + name: pn, + } + + switch arch.Family { + default: + return errorf("mach-o %s unimplemented", arch.Name) + case sys.AMD64: + if e != binary.LittleEndian || m.cputype != LdMachoCpuAmd64 { + return errorf("mach-o object but not amd64") + } + case sys.ARM64: + if e != binary.LittleEndian || m.cputype != LdMachoCpuArm64 { + return errorf("mach-o object but not arm64") + } + } + + m.cmd = make([]ldMachoCmd, ncmd) + cmdp, _, err := f.Slice(uint64(cmdsz)) + if err != nil { + return errorf("reading cmds: %v", err) + } + + // read and parse load commands + var c *ldMachoCmd + + var symtab *ldMachoSymtab + var dsymtab *ldMachoDysymtab + + off := uint32(len(hdr)) + for i := uint32(0); i < ncmd; i++ { + ty := e.Uint32(cmdp) + sz := e.Uint32(cmdp[4:]) + m.cmd[i].off = off + unpackcmd(cmdp, m, &m.cmd[i], uint(ty), uint(sz)) + cmdp = cmdp[sz:] + off += sz + if ty == LdMachoCmdSymtab { + if symtab != nil { + return errorf("multiple symbol tables") + } + + symtab = &m.cmd[i].sym + macholoadsym(m, symtab) + } + + if ty == LdMachoCmdDysymtab { + dsymtab = &m.cmd[i].dsym + macholoaddsym(m, dsymtab) + } + + if (is64 && ty == LdMachoCmdSegment64) || (!is64 && ty == LdMachoCmdSegment) { + if c != nil { + return errorf("multiple load commands") + } + + c = &m.cmd[i] + } + } + + // load text and data segments into memory. + // they are not as small as the load commands, but we'll need + // the memory anyway for the symbol images, so we might + // as well use one large chunk. + if c == nil { + return errorf("no load command") + } + + if symtab == nil { + // our work is done here - no symbols means nothing can refer to this file + return + } + + if int64(c.seg.fileoff+c.seg.filesz) >= length { + return errorf("load segment out of range") + } + + f.MustSeek(m.base+int64(c.seg.fileoff), 0) + dat, readOnly, err := f.Slice(uint64(c.seg.filesz)) + if err != nil { + return errorf("cannot load object data: %v", err) + } + + for i := uint32(0); i < c.seg.nsect; i++ { + sect := &c.seg.sect[i] + if sect.segname != "__TEXT" && sect.segname != "__DATA" { + continue + } + if sect.name == "__eh_frame" { + continue + } + name := fmt.Sprintf("%s(%s/%s)", pkg, sect.segname, sect.name) + s := l.LookupOrCreateSym(name, localSymVersion) + bld := l.MakeSymbolUpdater(s) + if bld.Type() != 0 { + return errorf("duplicate %s/%s", sect.segname, sect.name) + } + + if sect.flags&0xff == 1 { // S_ZEROFILL + bld.SetData(make([]byte, sect.size)) + } else { + bld.SetReadOnly(readOnly) + bld.SetData(dat[sect.addr-c.seg.vmaddr:][:sect.size]) + } + bld.SetSize(int64(len(bld.Data()))) + + if sect.segname == "__TEXT" { + if sect.name == "__text" { + bld.SetType(sym.STEXT) + } else { + bld.SetType(sym.SRODATA) + } + } else { + if sect.name == "__bss" { + bld.SetType(sym.SNOPTRBSS) + bld.SetData(nil) + } else { + bld.SetType(sym.SNOPTRDATA) + } + } + + sect.sym = s + } + + // enter sub-symbols into symbol table. + // have to guess sizes from next symbol. + for i := uint32(0); i < symtab.nsym; i++ { + machsym := &symtab.sym[i] + if machsym.type_&N_STAB != 0 { + continue + } + + // TODO: check sym->type against outer->type. + name := machsym.name + + if name[0] == '_' && name[1] != '\x00' { + name = name[1:] + } + v := 0 + if machsym.type_&N_EXT == 0 { + v = localSymVersion + } + s := l.LookupOrCreateCgoExport(name, v) + if machsym.type_&N_EXT == 0 { + l.SetAttrDuplicateOK(s, true) + } + if machsym.desc&(N_WEAK_REF|N_WEAK_DEF) != 0 { + l.SetAttrDuplicateOK(s, true) + } + machsym.sym = s + if machsym.sectnum == 0 { // undefined + continue + } + if uint32(machsym.sectnum) > c.seg.nsect { + return errorf("reference to invalid section %d", machsym.sectnum) + } + + sect := &c.seg.sect[machsym.sectnum-1] + bld := l.MakeSymbolUpdater(s) + outer := sect.sym + if outer == 0 { + continue // ignore reference to invalid section + } + + if osym := l.OuterSym(s); osym != 0 { + if l.AttrDuplicateOK(s) { + continue + } + return errorf("duplicate symbol reference: %s in both %s and %s", l.SymName(s), l.SymName(osym), l.SymName(sect.sym)) + } + + bld.SetType(l.SymType(outer)) + if l.SymSize(outer) != 0 { // skip empty section (0-sized symbol) + l.AddInteriorSym(outer, s) + } + + bld.SetValue(int64(machsym.value - sect.addr)) + if !l.AttrCgoExportDynamic(s) { + bld.SetDynimplib("") // satisfy dynimport + } + if l.SymType(outer) == sym.STEXT { + if bld.External() && !bld.DuplicateOK() { + return errorf("%v: duplicate symbol definition", s) + } + bld.SetExternal(true) + } + } + + // Sort outer lists by address, adding to textp. + // This keeps textp in increasing address order. + for i := 0; uint32(i) < c.seg.nsect; i++ { + sect := &c.seg.sect[i] + s := sect.sym + if s == 0 { + continue + } + bld := l.MakeSymbolUpdater(s) + if bld.SubSym() != 0 { + + bld.SortSub() + + // assign sizes, now that we know symbols in sorted order. + for s1 := bld.Sub(); s1 != 0; s1 = l.SubSym(s1) { + s1Bld := l.MakeSymbolUpdater(s1) + if sub := l.SubSym(s1); sub != 0 { + s1Bld.SetSize(l.SymValue(sub) - l.SymValue(s1)) + } else { + dlen := int64(len(l.Data(s))) + s1Bld.SetSize(l.SymValue(s) + dlen - l.SymValue(s1)) + } + } + } + + if bld.Type() == sym.STEXT { + if bld.OnList() { + return errorf("symbol %s listed multiple times", bld.Name()) + } + bld.SetOnList(true) + textp = append(textp, s) + for s1 := bld.Sub(); s1 != 0; s1 = l.SubSym(s1) { + if l.AttrOnList(s1) { + return errorf("symbol %s listed multiple times", l.SymName(s1)) + } + l.SetAttrOnList(s1, true) + textp = append(textp, s1) + } + } + } + + // load relocations + for i := 0; uint32(i) < c.seg.nsect; i++ { + sect := &c.seg.sect[i] + s := sect.sym + if s == 0 { + continue + } + macholoadrel(m, sect) + if sect.rel == nil { + continue + } + + sb := l.MakeSymbolUpdater(sect.sym) + var rAdd int64 + for j := uint32(0); j < sect.nreloc; j++ { + var ( + rOff int32 + rSize uint8 + rType objabi.RelocType + rSym loader.Sym + ) + rel := §.rel[j] + if rel.scattered != 0 { + // mach-o only uses scattered relocation on 32-bit platforms, + // which are no longer supported. + return errorf("%v: unexpected scattered relocation", s) + } + + if arch.Family == sys.ARM64 && rel.type_ == MACHO_ARM64_RELOC_ADDEND { + // Two relocations. This addend will be applied to the next one. + rAdd = int64(rel.symnum) << 40 >> 40 // convert unsigned 24-bit to signed 24-bit + continue + } + + rSize = rel.length + rType = objabi.MachoRelocOffset + (objabi.RelocType(rel.type_) << 1) + objabi.RelocType(rel.pcrel) + rOff = int32(rel.addr) + + // Handle X86_64_RELOC_SIGNED referencing a section (rel.extrn == 0). + p := l.Data(s) + if arch.Family == sys.AMD64 { + if rel.extrn == 0 && rel.type_ == MACHO_X86_64_RELOC_SIGNED { + // Calculate the addend as the offset into the section. + // + // The rip-relative offset stored in the object file is encoded + // as follows: + // + // movsd 0x00000360(%rip),%xmm0 + // + // To get the absolute address of the value this rip-relative address is pointing + // to, we must add the address of the next instruction to it. This is done by + // taking the address of the relocation and adding 4 to it (since the rip-relative + // offset can at most be 32 bits long). To calculate the offset into the section the + // relocation is referencing, we subtract the vaddr of the start of the referenced + // section found in the original object file. + // + // [For future reference, see Darwin's /usr/include/mach-o/x86_64/reloc.h] + secaddr := c.seg.sect[rel.symnum-1].addr + rAdd = int64(uint64(int64(int32(e.Uint32(p[rOff:])))+int64(rOff)+4) - secaddr) + } else { + rAdd = int64(int32(e.Uint32(p[rOff:]))) + } + } + + // An unsigned internal relocation has a value offset + // by the section address. + if arch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == MACHO_X86_64_RELOC_UNSIGNED { + secaddr := c.seg.sect[rel.symnum-1].addr + rAdd -= int64(secaddr) + } + + if rel.extrn == 0 { + if rel.symnum < 1 || rel.symnum > c.seg.nsect { + return errorf("invalid relocation: section reference out of range %d vs %d", rel.symnum, c.seg.nsect) + } + + rSym = c.seg.sect[rel.symnum-1].sym + if rSym == 0 { + return errorf("invalid relocation: %s", c.seg.sect[rel.symnum-1].name) + } + } else { + if rel.symnum >= symtab.nsym { + return errorf("invalid relocation: symbol reference out of range") + } + + rSym = symtab.sym[rel.symnum].sym + } + + r, _ := sb.AddRel(rType) + r.SetOff(rOff) + r.SetSiz(rSize) + r.SetSym(rSym) + r.SetAdd(rAdd) + + rAdd = 0 // clear rAdd for next iteration + } + + sb.SortRelocs() + } + + return textp, nil +} + +func cstring(x []byte) string { + i := bytes.IndexByte(x, '\x00') + if i >= 0 { + x = x[:i] + } + return string(x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/ldpe.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/ldpe.go new file mode 100644 index 0000000000000000000000000000000000000000..1ba6debb4e67b874b267d5743d0c33b4ceb08643 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/ldpe.go @@ -0,0 +1,822 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadpe implements a PE/COFF file reader. +package loadpe + +import ( + "bytes" + "cmd/internal/bio" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/pe" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" +) + +const ( + IMAGE_SYM_UNDEFINED = 0 + IMAGE_SYM_ABSOLUTE = -1 + IMAGE_SYM_DEBUG = -2 + IMAGE_SYM_TYPE_NULL = 0 + IMAGE_SYM_TYPE_VOID = 1 + IMAGE_SYM_TYPE_CHAR = 2 + IMAGE_SYM_TYPE_SHORT = 3 + IMAGE_SYM_TYPE_INT = 4 + IMAGE_SYM_TYPE_LONG = 5 + IMAGE_SYM_TYPE_FLOAT = 6 + IMAGE_SYM_TYPE_DOUBLE = 7 + IMAGE_SYM_TYPE_STRUCT = 8 + IMAGE_SYM_TYPE_UNION = 9 + IMAGE_SYM_TYPE_ENUM = 10 + IMAGE_SYM_TYPE_MOE = 11 + IMAGE_SYM_TYPE_BYTE = 12 + IMAGE_SYM_TYPE_WORD = 13 + IMAGE_SYM_TYPE_UINT = 14 + IMAGE_SYM_TYPE_DWORD = 15 + IMAGE_SYM_TYPE_PCODE = 32768 + IMAGE_SYM_DTYPE_NULL = 0 + IMAGE_SYM_DTYPE_POINTER = 1 + IMAGE_SYM_DTYPE_FUNCTION = 2 + IMAGE_SYM_DTYPE_ARRAY = 3 + IMAGE_SYM_CLASS_END_OF_FUNCTION = -1 + IMAGE_SYM_CLASS_NULL = 0 + IMAGE_SYM_CLASS_AUTOMATIC = 1 + IMAGE_SYM_CLASS_EXTERNAL = 2 + IMAGE_SYM_CLASS_STATIC = 3 + IMAGE_SYM_CLASS_REGISTER = 4 + IMAGE_SYM_CLASS_EXTERNAL_DEF = 5 + IMAGE_SYM_CLASS_LABEL = 6 + IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7 + IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8 + IMAGE_SYM_CLASS_ARGUMENT = 9 + IMAGE_SYM_CLASS_STRUCT_TAG = 10 + IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11 + IMAGE_SYM_CLASS_UNION_TAG = 12 + IMAGE_SYM_CLASS_TYPE_DEFINITION = 13 + IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14 + IMAGE_SYM_CLASS_ENUM_TAG = 15 + IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16 + IMAGE_SYM_CLASS_REGISTER_PARAM = 17 + IMAGE_SYM_CLASS_BIT_FIELD = 18 + IMAGE_SYM_CLASS_FAR_EXTERNAL = 68 /* Not in PECOFF v8 spec */ + IMAGE_SYM_CLASS_BLOCK = 100 + IMAGE_SYM_CLASS_FUNCTION = 101 + IMAGE_SYM_CLASS_END_OF_STRUCT = 102 + IMAGE_SYM_CLASS_FILE = 103 + IMAGE_SYM_CLASS_SECTION = 104 + IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105 + IMAGE_SYM_CLASS_CLR_TOKEN = 107 + IMAGE_REL_I386_ABSOLUTE = 0x0000 + IMAGE_REL_I386_DIR16 = 0x0001 + IMAGE_REL_I386_REL16 = 0x0002 + IMAGE_REL_I386_DIR32 = 0x0006 + IMAGE_REL_I386_DIR32NB = 0x0007 + IMAGE_REL_I386_SEG12 = 0x0009 + IMAGE_REL_I386_SECTION = 0x000A + IMAGE_REL_I386_SECREL = 0x000B + IMAGE_REL_I386_TOKEN = 0x000C + IMAGE_REL_I386_SECREL7 = 0x000D + IMAGE_REL_I386_REL32 = 0x0014 + IMAGE_REL_AMD64_ABSOLUTE = 0x0000 + IMAGE_REL_AMD64_ADDR64 = 0x0001 + IMAGE_REL_AMD64_ADDR32 = 0x0002 + IMAGE_REL_AMD64_ADDR32NB = 0x0003 + IMAGE_REL_AMD64_REL32 = 0x0004 + IMAGE_REL_AMD64_REL32_1 = 0x0005 + IMAGE_REL_AMD64_REL32_2 = 0x0006 + IMAGE_REL_AMD64_REL32_3 = 0x0007 + IMAGE_REL_AMD64_REL32_4 = 0x0008 + IMAGE_REL_AMD64_REL32_5 = 0x0009 + IMAGE_REL_AMD64_SECTION = 0x000A + IMAGE_REL_AMD64_SECREL = 0x000B + IMAGE_REL_AMD64_SECREL7 = 0x000C + IMAGE_REL_AMD64_TOKEN = 0x000D + IMAGE_REL_AMD64_SREL32 = 0x000E + IMAGE_REL_AMD64_PAIR = 0x000F + IMAGE_REL_AMD64_SSPAN32 = 0x0010 + IMAGE_REL_ARM_ABSOLUTE = 0x0000 + IMAGE_REL_ARM_ADDR32 = 0x0001 + IMAGE_REL_ARM_ADDR32NB = 0x0002 + IMAGE_REL_ARM_BRANCH24 = 0x0003 + IMAGE_REL_ARM_BRANCH11 = 0x0004 + IMAGE_REL_ARM_SECTION = 0x000E + IMAGE_REL_ARM_SECREL = 0x000F + IMAGE_REL_ARM_MOV32 = 0x0010 + IMAGE_REL_THUMB_MOV32 = 0x0011 + IMAGE_REL_THUMB_BRANCH20 = 0x0012 + IMAGE_REL_THUMB_BRANCH24 = 0x0014 + IMAGE_REL_THUMB_BLX23 = 0x0015 + IMAGE_REL_ARM_PAIR = 0x0016 + IMAGE_REL_ARM64_ABSOLUTE = 0x0000 + IMAGE_REL_ARM64_ADDR32 = 0x0001 + IMAGE_REL_ARM64_ADDR32NB = 0x0002 + IMAGE_REL_ARM64_BRANCH26 = 0x0003 + IMAGE_REL_ARM64_PAGEBASE_REL21 = 0x0004 + IMAGE_REL_ARM64_REL21 = 0x0005 + IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006 + IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007 + IMAGE_REL_ARM64_SECREL = 0x0008 + IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009 + IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A + IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B + IMAGE_REL_ARM64_TOKEN = 0x000C + IMAGE_REL_ARM64_SECTION = 0x000D + IMAGE_REL_ARM64_ADDR64 = 0x000E + IMAGE_REL_ARM64_BRANCH19 = 0x000F + IMAGE_REL_ARM64_BRANCH14 = 0x0010 + IMAGE_REL_ARM64_REL32 = 0x0011 +) + +const ( + // When stored into the PLT value for a symbol, this token tells + // windynrelocsym to redirect direct references to this symbol to a stub + // that loads from the corresponding import symbol and then does + // a jump to the loaded value. + CreateImportStubPltToken = -2 + + // When stored into the GOT value for an import symbol __imp_X this + // token tells windynrelocsym to redirect references to the + // underlying DYNIMPORT symbol X. + RedirectToDynImportGotToken = -2 +) + +// TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating peBiobuf + +// peBiobuf makes bio.Reader look like io.ReaderAt. +type peBiobuf bio.Reader + +func (f *peBiobuf) ReadAt(p []byte, off int64) (int, error) { + ret := ((*bio.Reader)(f)).MustSeek(off, 0) + if ret < 0 { + return 0, errors.New("fail to seek") + } + n, err := f.Read(p) + if err != nil { + return 0, err + } + return n, nil +} + +// makeUpdater creates a loader.SymbolBuilder if one hasn't been created previously. +// We use this to lazily make SymbolBuilders as we don't always need a builder, and creating them for all symbols might be an error. +func makeUpdater(l *loader.Loader, bld *loader.SymbolBuilder, s loader.Sym) *loader.SymbolBuilder { + if bld != nil { + return bld + } + bld = l.MakeSymbolUpdater(s) + return bld +} + +// peImportSymsState tracks the set of DLL import symbols we've seen +// while reading host objects. We create a singleton instance of this +// type, which will persist across multiple host objects. +type peImportSymsState struct { + + // Text and non-text sections read in by the host object loader. + secSyms []loader.Sym + + // Loader and arch, for use in postprocessing. + l *loader.Loader + arch *sys.Arch +} + +var importSymsState *peImportSymsState + +func createImportSymsState(l *loader.Loader, arch *sys.Arch) { + if importSymsState != nil { + return + } + importSymsState = &peImportSymsState{ + l: l, + arch: arch, + } +} + +// peLoaderState holds various bits of useful state information needed +// while loading a single PE object file. +type peLoaderState struct { + l *loader.Loader + arch *sys.Arch + f *pe.File + pn string + sectsyms map[*pe.Section]loader.Sym + comdats map[uint16]int64 // key is section index, val is size + sectdata map[*pe.Section][]byte + localSymVersion int +} + +// comdatDefinitions records the names of symbols for which we've +// previously seen a definition in COMDAT. Key is symbol name, value +// is symbol size (or -1 if we're using the "any" strategy). +var comdatDefinitions map[string]int64 + +// Symbols contains the symbols that can be loaded from a PE file. +type Symbols struct { + Textp []loader.Sym // text symbols + Resources []loader.Sym // .rsrc section or set of .rsrc$xx sections + PData loader.Sym + XData loader.Sym +} + +// Load loads the PE file pn from input. +// Symbols from the object file are created via the loader 'l'. +func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (*Symbols, error) { + state := &peLoaderState{ + l: l, + arch: arch, + sectsyms: make(map[*pe.Section]loader.Sym), + sectdata: make(map[*pe.Section][]byte), + localSymVersion: localSymVersion, + pn: pn, + } + createImportSymsState(state.l, state.arch) + if comdatDefinitions == nil { + comdatDefinitions = make(map[string]int64) + } + + // Some input files are archives containing multiple of + // object files, and pe.NewFile seeks to the start of + // input file and get confused. Create section reader + // to stop pe.NewFile looking before current position. + sr := io.NewSectionReader((*peBiobuf)(input), input.Offset(), 1<<63-1) + + // TODO: replace pe.NewFile with pe.Load (grep for "add Load function" in debug/pe for details) + f, err := pe.NewFile(sr) + if err != nil { + return nil, err + } + defer f.Close() + state.f = f + + var ls Symbols + + // TODO return error if found .cormeta + + // create symbols for mapped sections + for _, sect := range f.Sections { + if sect.Characteristics&pe.IMAGE_SCN_MEM_DISCARDABLE != 0 { + continue + } + + if sect.Characteristics&(pe.IMAGE_SCN_CNT_CODE|pe.IMAGE_SCN_CNT_INITIALIZED_DATA|pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0 { + // This has been seen for .idata sections, which we + // want to ignore. See issues 5106 and 5273. + continue + } + + name := fmt.Sprintf("%s(%s)", pkg, sect.Name) + s := state.l.LookupOrCreateCgoExport(name, localSymVersion) + bld := l.MakeSymbolUpdater(s) + + switch sect.Characteristics & (pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA | pe.IMAGE_SCN_CNT_INITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ | pe.IMAGE_SCN_MEM_WRITE | pe.IMAGE_SCN_CNT_CODE | pe.IMAGE_SCN_MEM_EXECUTE) { + case pe.IMAGE_SCN_CNT_INITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ: //.rdata + if issehsect(arch, sect) { + bld.SetType(sym.SSEHSECT) + bld.SetAlign(4) + } else { + bld.SetType(sym.SRODATA) + } + + case pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ | pe.IMAGE_SCN_MEM_WRITE: //.bss + bld.SetType(sym.SNOPTRBSS) + + case pe.IMAGE_SCN_CNT_INITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ | pe.IMAGE_SCN_MEM_WRITE: //.data + bld.SetType(sym.SNOPTRDATA) + + case pe.IMAGE_SCN_CNT_CODE | pe.IMAGE_SCN_MEM_EXECUTE | pe.IMAGE_SCN_MEM_READ: //.text + bld.SetType(sym.STEXT) + + default: + return nil, fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name) + } + + if bld.Type() != sym.SNOPTRBSS { + data, err := sect.Data() + if err != nil { + return nil, err + } + state.sectdata[sect] = data + bld.SetData(data) + } + bld.SetSize(int64(sect.Size)) + state.sectsyms[sect] = s + if sect.Name == ".rsrc" || strings.HasPrefix(sect.Name, ".rsrc$") { + ls.Resources = append(ls.Resources, s) + } else if bld.Type() == sym.SSEHSECT { + if sect.Name == ".pdata" { + ls.PData = s + } else if sect.Name == ".xdata" { + ls.XData = s + } + } + } + + // Make a prepass over the symbols to collect info about COMDAT symbols. + if err := state.preprocessSymbols(); err != nil { + return nil, err + } + + // load relocations + for _, rsect := range f.Sections { + if _, found := state.sectsyms[rsect]; !found { + continue + } + if rsect.NumberOfRelocations == 0 { + continue + } + if rsect.Characteristics&pe.IMAGE_SCN_MEM_DISCARDABLE != 0 { + continue + } + if rsect.Characteristics&(pe.IMAGE_SCN_CNT_CODE|pe.IMAGE_SCN_CNT_INITIALIZED_DATA|pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0 { + // This has been seen for .idata sections, which we + // want to ignore. See issues 5106 and 5273. + continue + } + + splitResources := strings.HasPrefix(rsect.Name, ".rsrc$") + issehsect := issehsect(arch, rsect) + sb := l.MakeSymbolUpdater(state.sectsyms[rsect]) + for j, r := range rsect.Relocs { + if int(r.SymbolTableIndex) >= len(f.COFFSymbols) { + return nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols)) + } + pesym := &f.COFFSymbols[r.SymbolTableIndex] + _, gosym, err := state.readpesym(pesym) + if err != nil { + return nil, err + } + if gosym == 0 { + name, err := pesym.FullName(f.StringTable) + if err != nil { + name = string(pesym.Name[:]) + } + return nil, fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type) + } + + rSym := gosym + rSize := uint8(4) + rOff := int32(r.VirtualAddress) + var rAdd int64 + var rType objabi.RelocType + switch arch.Family { + default: + return nil, fmt.Errorf("%s: unsupported arch %v", pn, arch.Family) + case sys.I386, sys.AMD64: + switch r.Type { + default: + return nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type) + + case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32, + IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32 + IMAGE_REL_AMD64_ADDR32NB: + if r.Type == IMAGE_REL_AMD64_ADDR32NB { + rType = objabi.R_PEIMAGEOFF + } else { + rType = objabi.R_PCREL + } + + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + + case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32: + if r.Type == IMAGE_REL_I386_DIR32NB { + rType = objabi.R_PEIMAGEOFF + } else { + rType = objabi.R_ADDR + } + + // load addend from image + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + + case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64 + rSize = 8 + + rType = objabi.R_ADDR + + // load addend from image + rAdd = int64(binary.LittleEndian.Uint64(state.sectdata[rsect][rOff:])) + } + + case sys.ARM: + switch r.Type { + default: + return nil, fmt.Errorf("%s: %v: unknown ARM relocation type %v", pn, state.sectsyms[rsect], r.Type) + + case IMAGE_REL_ARM_SECREL: + rType = objabi.R_PCREL + + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + + case IMAGE_REL_ARM_ADDR32, IMAGE_REL_ARM_ADDR32NB: + if r.Type == IMAGE_REL_ARM_ADDR32NB { + rType = objabi.R_PEIMAGEOFF + } else { + rType = objabi.R_ADDR + } + + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + + case IMAGE_REL_ARM_BRANCH24: + rType = objabi.R_CALLARM + + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + } + + case sys.ARM64: + switch r.Type { + default: + return nil, fmt.Errorf("%s: %v: unknown ARM64 relocation type %v", pn, state.sectsyms[rsect], r.Type) + + case IMAGE_REL_ARM64_ADDR32, IMAGE_REL_ARM64_ADDR32NB: + if r.Type == IMAGE_REL_ARM64_ADDR32NB { + rType = objabi.R_PEIMAGEOFF + } else { + rType = objabi.R_ADDR + } + + rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + } + } + + // ld -r could generate multiple section symbols for the + // same section but with different values, we have to take + // that into account, or in the case of split resources, + // the section and its symbols are split into two sections. + if issect(pesym) || splitResources { + rAdd += int64(pesym.Value) + } + if issehsect { + // .pdata and .xdata sections can contain records + // associated to functions that won't be used in + // the final binary, in which case the relocation + // target symbol won't be reachable. + rType |= objabi.R_WEAK + } + + rel, _ := sb.AddRel(rType) + rel.SetOff(rOff) + rel.SetSiz(rSize) + rel.SetSym(rSym) + rel.SetAdd(rAdd) + + } + + sb.SortRelocs() + } + + // enter sub-symbols into symbol table. + for i, numaux := 0, 0; i < len(f.COFFSymbols); i += numaux + 1 { + pesym := &f.COFFSymbols[i] + + numaux = int(pesym.NumberOfAuxSymbols) + + name, err := pesym.FullName(f.StringTable) + if err != nil { + return nil, err + } + if name == "" { + continue + } + if issect(pesym) { + continue + } + if int(pesym.SectionNumber) > len(f.Sections) { + continue + } + if pesym.SectionNumber == IMAGE_SYM_DEBUG { + continue + } + if pesym.SectionNumber == IMAGE_SYM_ABSOLUTE && bytes.Equal(pesym.Name[:], []byte("@feat.00")) { + // The PE documentation says that, on x86 platforms, the absolute symbol named @feat.00 + // is used to indicate that the COFF object supports SEH. + // Go doesn't support SEH on windows/386, so we can ignore this symbol. + // See https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#the-sxdata-section + continue + } + var sect *pe.Section + if pesym.SectionNumber > 0 { + sect = f.Sections[pesym.SectionNumber-1] + if _, found := state.sectsyms[sect]; !found { + continue + } + } + + bld, s, err := state.readpesym(pesym) + if err != nil { + return nil, err + } + + if pesym.SectionNumber == 0 { // extern + if l.SymType(s) == sym.SXREF && pesym.Value > 0 { // global data + bld = makeUpdater(l, bld, s) + bld.SetType(sym.SNOPTRDATA) + bld.SetSize(int64(pesym.Value)) + } + + continue + } else if pesym.SectionNumber > 0 && int(pesym.SectionNumber) <= len(f.Sections) { + sect = f.Sections[pesym.SectionNumber-1] + if _, found := state.sectsyms[sect]; !found { + return nil, fmt.Errorf("%s: %v: missing sect.sym", pn, s) + } + } else { + return nil, fmt.Errorf("%s: %v: sectnum < 0!", pn, s) + } + + if sect == nil { + return nil, nil + } + + // Check for COMDAT symbol. + if sz, ok1 := state.comdats[uint16(pesym.SectionNumber-1)]; ok1 { + if psz, ok2 := comdatDefinitions[l.SymName(s)]; ok2 { + if sz == psz { + // OK to discard, we've seen an instance + // already. + continue + } + } + } + if l.OuterSym(s) != 0 { + if l.AttrDuplicateOK(s) { + continue + } + outerName := l.SymName(l.OuterSym(s)) + sectName := l.SymName(state.sectsyms[sect]) + return nil, fmt.Errorf("%s: duplicate symbol reference: %s in both %s and %s", pn, l.SymName(s), outerName, sectName) + } + + bld = makeUpdater(l, bld, s) + sectsym := state.sectsyms[sect] + bld.SetType(l.SymType(sectsym)) + l.AddInteriorSym(sectsym, s) + bld.SetValue(int64(pesym.Value)) + bld.SetSize(4) + if l.SymType(sectsym) == sym.STEXT { + if bld.External() && !bld.DuplicateOK() { + return nil, fmt.Errorf("%s: duplicate symbol definition", l.SymName(s)) + } + bld.SetExternal(true) + } + if sz, ok := state.comdats[uint16(pesym.SectionNumber-1)]; ok { + // This is a COMDAT definition. Record that we're picking + // this instance so that we can ignore future defs. + if _, ok := comdatDefinitions[l.SymName(s)]; ok { + return nil, fmt.Errorf("internal error: preexisting COMDAT definition for %q", name) + } + comdatDefinitions[l.SymName(s)] = sz + } + } + + // Sort outer lists by address, adding to textp. + // This keeps textp in increasing address order. + for _, sect := range f.Sections { + s := state.sectsyms[sect] + if s == 0 { + continue + } + l.SortSub(s) + importSymsState.secSyms = append(importSymsState.secSyms, s) + if l.SymType(s) == sym.STEXT { + for ; s != 0; s = l.SubSym(s) { + if l.AttrOnList(s) { + return nil, fmt.Errorf("symbol %s listed multiple times", l.SymName(s)) + } + l.SetAttrOnList(s, true) + ls.Textp = append(ls.Textp, s) + } + } + } + + if ls.PData != 0 { + processSEH(l, arch, ls.PData, ls.XData) + } + + return &ls, nil +} + +// PostProcessImports works to resolve inconsistencies with DLL import +// symbols; it is needed when building with more "modern" C compilers +// with internal linkage. +// +// Background: DLL import symbols are data (SNOPTRDATA) symbols whose +// name is of the form "__imp_XXX", which contain a pointer/reference +// to symbol XXX. It's possible to have import symbols for both data +// symbols ("__imp__fmode") and text symbols ("__imp_CreateEventA"). +// In some case import symbols are just references to some external +// thing, and in other cases we see actual definitions of import +// symbols when reading host objects. +// +// Previous versions of the linker would in most cases immediately +// "forward" import symbol references, e.g. treat a references to +// "__imp_XXX" a references to "XXX", however this doesn't work well +// with more modern compilers, where you can sometimes see import +// symbols that are defs (as opposed to external refs). +// +// The main actions taken below are to search for references to +// SDYNIMPORT symbols in host object text/data sections and flag the +// symbols for later fixup. When we see a reference to an import +// symbol __imp_XYZ where XYZ corresponds to some SDYNIMPORT symbol, +// we flag the symbol (via GOT setting) so that it can be redirected +// to XYZ later in windynrelocsym. When we see a direct reference to +// an SDYNIMPORT symbol XYZ, we also flag the symbol (via PLT setting) +// to indicated that the reference will need to be redirected to a +// stub. +func PostProcessImports() error { + ldr := importSymsState.l + arch := importSymsState.arch + keeprelocneeded := make(map[loader.Sym]loader.Sym) + for _, s := range importSymsState.secSyms { + isText := ldr.SymType(s) == sym.STEXT + relocs := ldr.Relocs(s) + for i := 0; i < relocs.Count(); i++ { + r := relocs.At(i) + rs := r.Sym() + if ldr.SymType(rs) == sym.SDYNIMPORT { + // Tag the symbol for later stub generation. + ldr.SetPlt(rs, CreateImportStubPltToken) + continue + } + isym, err := LookupBaseFromImport(rs, ldr, arch) + if err != nil { + return err + } + if isym == 0 { + continue + } + if ldr.SymType(isym) != sym.SDYNIMPORT { + continue + } + // For non-text symbols, forward the reference from __imp_X to + // X immediately. + if !isText { + r.SetSym(isym) + continue + } + // Flag this imp symbol to be processed later in windynrelocsym. + ldr.SetGot(rs, RedirectToDynImportGotToken) + // Consistency check: should be no PLT token here. + splt := ldr.SymPlt(rs) + if splt != -1 { + return fmt.Errorf("internal error: import symbol %q has invalid PLT setting %d", ldr.SymName(rs), splt) + } + // Flag for dummy relocation. + keeprelocneeded[rs] = isym + } + } + for k, v := range keeprelocneeded { + sb := ldr.MakeSymbolUpdater(k) + r, _ := sb.AddRel(objabi.R_KEEP) + r.SetSym(v) + } + importSymsState = nil + return nil +} + +func issehsect(arch *sys.Arch, s *pe.Section) bool { + return arch.Family == sys.AMD64 && (s.Name == ".pdata" || s.Name == ".xdata") +} + +func issect(s *pe.COFFSymbol) bool { + return s.StorageClass == IMAGE_SYM_CLASS_STATIC && s.Type == 0 && s.Name[0] == '.' +} + +func (state *peLoaderState) readpesym(pesym *pe.COFFSymbol) (*loader.SymbolBuilder, loader.Sym, error) { + symname, err := pesym.FullName(state.f.StringTable) + if err != nil { + return nil, 0, err + } + var name string + if issect(pesym) { + name = state.l.SymName(state.sectsyms[state.f.Sections[pesym.SectionNumber-1]]) + } else { + name = symname + // A note on the "_main" exclusion below: the main routine + // defined by the Go runtime is named "_main", not "main", so + // when reading references to _main from a host object we want + // to avoid rewriting "_main" to "main" in this specific + // instance. See #issuecomment-1143698749 on #35006 for more + // details on this problem. + if state.arch.Family == sys.I386 && name[0] == '_' && name != "_main" && !strings.HasPrefix(name, "__imp_") { + name = name[1:] // _Name => Name + } + } + + // remove last @XXX + if i := strings.LastIndex(name, "@"); i >= 0 { + name = name[:i] + } + + var s loader.Sym + var bld *loader.SymbolBuilder + // Microsoft's PE documentation is contradictory. It says that the symbol's complex type + // is stored in the pesym.Type most significant byte, but MSVC, LLVM, and mingw store it + // in the 4 high bits of the less significant byte. + switch uint8(pesym.Type&0xf0) >> 4 { + default: + return nil, 0, fmt.Errorf("%s: invalid symbol type %d", symname, pesym.Type) + + case IMAGE_SYM_DTYPE_FUNCTION, IMAGE_SYM_DTYPE_NULL: + switch pesym.StorageClass { + case IMAGE_SYM_CLASS_EXTERNAL: //global + s = state.l.LookupOrCreateCgoExport(name, 0) + + case IMAGE_SYM_CLASS_NULL, IMAGE_SYM_CLASS_STATIC, IMAGE_SYM_CLASS_LABEL: + s = state.l.LookupOrCreateCgoExport(name, state.localSymVersion) + bld = makeUpdater(state.l, bld, s) + bld.SetDuplicateOK(true) + + default: + return nil, 0, fmt.Errorf("%s: invalid symbol binding %d", symname, pesym.StorageClass) + } + } + + if s != 0 && state.l.SymType(s) == 0 && (pesym.StorageClass != IMAGE_SYM_CLASS_STATIC || pesym.Value != 0) { + bld = makeUpdater(state.l, bld, s) + bld.SetType(sym.SXREF) + } + + return bld, s, nil +} + +// preprocessSymbols walks the COFF symbols for the PE file we're +// reading and looks for cases where we have both a symbol definition +// for "XXX" and an "__imp_XXX" symbol, recording these cases in a map +// in the state struct. This information will be used in readpesym() +// above to give such symbols special treatment. This function also +// gathers information about COMDAT sections/symbols for later use +// in readpesym(). +func (state *peLoaderState) preprocessSymbols() error { + + // Locate comdat sections. + state.comdats = make(map[uint16]int64) + for i, s := range state.f.Sections { + if s.Characteristics&uint32(pe.IMAGE_SCN_LNK_COMDAT) != 0 { + state.comdats[uint16(i)] = int64(s.Size) + } + } + + // Examine symbol defs. + for i, numaux := 0, 0; i < len(state.f.COFFSymbols); i += numaux + 1 { + pesym := &state.f.COFFSymbols[i] + numaux = int(pesym.NumberOfAuxSymbols) + if pesym.SectionNumber == 0 { // extern + continue + } + symname, err := pesym.FullName(state.f.StringTable) + if err != nil { + return err + } + if _, isc := state.comdats[uint16(pesym.SectionNumber-1)]; !isc { + continue + } + if pesym.StorageClass != uint8(IMAGE_SYM_CLASS_STATIC) { + continue + } + // This symbol corresponds to a COMDAT section. Read the + // aux data for it. + auxsymp, err := state.f.COFFSymbolReadSectionDefAux(i) + if err != nil { + return fmt.Errorf("unable to read aux info for section def symbol %d %s: pe.COFFSymbolReadComdatInfo returns %v", i, symname, err) + } + if auxsymp.Selection == pe.IMAGE_COMDAT_SELECT_SAME_SIZE { + // This is supported. + } else if auxsymp.Selection == pe.IMAGE_COMDAT_SELECT_ANY { + // Also supported. + state.comdats[uint16(pesym.SectionNumber-1)] = int64(-1) + } else { + // We don't support any of the other strategies at the + // moment. I suspect that we may need to also support + // "associative", we'll see. + return fmt.Errorf("internal error: unsupported COMDAT selection strategy found in path=%s sec=%d strategy=%d idx=%d, please file a bug", state.pn, auxsymp.SecNum, auxsymp.Selection, i) + } + } + return nil +} + +// LookupBaseFromImport examines the symbol "s" to see if it +// corresponds to an import symbol (name of the form "__imp_XYZ") and +// if so, it looks up the underlying target of the import symbol and +// returns it. An error is returned if the symbol is of the form +// "__imp_XYZ" but no XYZ can be found. +func LookupBaseFromImport(s loader.Sym, ldr *loader.Loader, arch *sys.Arch) (loader.Sym, error) { + sname := ldr.SymName(s) + if !strings.HasPrefix(sname, "__imp_") { + return 0, nil + } + basename := sname[len("__imp_"):] + if arch.Family == sys.I386 && basename[0] == '_' { + basename = basename[1:] // _Name => Name + } + isym := ldr.Lookup(basename, 0) + if isym == 0 { + return 0, fmt.Errorf("internal error: import symbol %q with no underlying sym", sname) + } + return isym, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/seh.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/seh.go new file mode 100644 index 0000000000000000000000000000000000000000..545958f1d60beedb2492a36e92879d9e72536656 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadpe/seh.go @@ -0,0 +1,111 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loadpe + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "fmt" + "sort" +) + +const ( + UNW_FLAG_EHANDLER = 1 << 3 + UNW_FLAG_UHANDLER = 2 << 3 + UNW_FLAG_CHAININFO = 4 << 3 + unwStaticDataSize = 4 // Bytes of unwind data before the variable length part. + unwCodeSize = 2 // Bytes per unwind code. +) + +// processSEH walks all pdata relocations looking for exception handler function symbols. +// We want to mark these as reachable if the function that they protect is reachable +// in the final binary. +func processSEH(ldr *loader.Loader, arch *sys.Arch, pdata sym.LoaderSym, xdata sym.LoaderSym) error { + switch arch.Family { + case sys.AMD64: + ldr.SetAttrReachable(pdata, true) + if xdata != 0 { + ldr.SetAttrReachable(xdata, true) + } + return processSEHAMD64(ldr, pdata) + default: + // TODO: support SEH on other architectures. + return fmt.Errorf("unsupported architecture for SEH: %v", arch.Family) + } +} + +func processSEHAMD64(ldr *loader.Loader, pdata sym.LoaderSym) error { + // The following loop traverses a list of pdata entries, + // each entry being 3 relocations long. The first relocation + // is a pointer to the function symbol to which the pdata entry + // corresponds. The third relocation is a pointer to the + // corresponding .xdata entry. + // Reference: + // https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-runtime_function + rels := ldr.Relocs(pdata) + if rels.Count()%3 != 0 { + return fmt.Errorf(".pdata symbol %q has invalid relocation count", ldr.SymName(pdata)) + } + for i := 0; i < rels.Count(); i += 3 { + xrel := rels.At(i + 2) + handler := findHandlerInXDataAMD64(ldr, xrel.Sym(), xrel.Add()) + if handler != 0 { + sb := ldr.MakeSymbolUpdater(rels.At(i).Sym()) + r, _ := sb.AddRel(objabi.R_KEEP) + r.SetSym(handler) + } + } + return nil +} + +// findHandlerInXDataAMD64 finds the symbol in the .xdata section that +// corresponds to the exception handler. +// Reference: +// https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-unwind_info +func findHandlerInXDataAMD64(ldr *loader.Loader, xsym sym.LoaderSym, add int64) loader.Sym { + data := ldr.Data(xsym) + if add < 0 || add+unwStaticDataSize > int64(len(data)) { + return 0 + } + data = data[add:] + var isChained bool + switch flag := data[0]; { + case flag&UNW_FLAG_EHANDLER != 0 || flag&UNW_FLAG_UHANDLER != 0: + // Exception handler. + case flag&UNW_FLAG_CHAININFO != 0: + isChained = true + default: + // Nothing to do. + return 0 + } + codes := data[2] + if codes%2 != 0 { + // There are always an even number of unwind codes, even if the last one is unused. + codes += 1 + } + // The exception handler relocation is the first relocation after the unwind codes, + // unless it is chained, but we will handle this case later. + targetOff := add + unwStaticDataSize + unwCodeSize*int64(codes) + xrels := ldr.Relocs(xsym) + xrelsCount := xrels.Count() + idx := sort.Search(xrelsCount, func(i int) bool { + return int64(xrels.At(i).Off()) >= targetOff + }) + if idx == xrelsCount { + return 0 + } + if isChained { + // The third relocations references the next .xdata entry in the chain, recurse. + idx += 2 + if idx >= xrelsCount { + return 0 + } + r := xrels.At(idx) + return findHandlerInXDataAMD64(ldr, r.Sym(), r.Add()) + } + return xrels.At(idx).Sym() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loadxcoff/ldxcoff.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadxcoff/ldxcoff.go new file mode 100644 index 0000000000000000000000000000000000000000..29d162596a3657d3f2d68c8ce4f1b8c91a45c8c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loadxcoff/ldxcoff.go @@ -0,0 +1,227 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadxcoff implements a XCOFF file reader. +package loadxcoff + +import ( + "cmd/internal/bio" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "errors" + "fmt" + "internal/xcoff" +) + +// ldSection is an XCOFF section with its symbols. +type ldSection struct { + xcoff.Section + sym loader.Sym +} + +// TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating xcoffBiobuf + +// xcoffBiobuf makes bio.Reader look like io.ReaderAt. +type xcoffBiobuf bio.Reader + +func (f *xcoffBiobuf) ReadAt(p []byte, off int64) (int, error) { + ret := ((*bio.Reader)(f)).MustSeek(off, 0) + if ret < 0 { + return 0, errors.New("fail to seek") + } + n, err := f.Read(p) + if err != nil { + return 0, err + } + return n, nil +} + +// loads the Xcoff file pn from f. +// Symbols are written into loader, and a slice of the text symbols is returned. +func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, err error) { + errorf := func(str string, args ...interface{}) ([]loader.Sym, error) { + return nil, fmt.Errorf("loadxcoff: %v: %v", pn, fmt.Sprintf(str, args...)) + } + + var ldSections []*ldSection + + f, err := xcoff.NewFile((*xcoffBiobuf)(input)) + if err != nil { + return nil, err + } + defer f.Close() + + for _, sect := range f.Sections { + //only text, data and bss section + if sect.Type < xcoff.STYP_TEXT || sect.Type > xcoff.STYP_BSS { + continue + } + lds := new(ldSection) + lds.Section = *sect + name := fmt.Sprintf("%s(%s)", pkg, lds.Name) + symbol := l.LookupOrCreateSym(name, localSymVersion) + s := l.MakeSymbolUpdater(symbol) + + switch lds.Type { + default: + return errorf("unrecognized section type 0x%x", lds.Type) + case xcoff.STYP_TEXT: + s.SetType(sym.STEXT) + case xcoff.STYP_DATA: + s.SetType(sym.SNOPTRDATA) + case xcoff.STYP_BSS: + s.SetType(sym.SNOPTRBSS) + } + + s.SetSize(int64(lds.Size)) + if s.Type() != sym.SNOPTRBSS { + data, err := lds.Section.Data() + if err != nil { + return nil, err + } + s.SetData(data) + } + + lds.sym = symbol + ldSections = append(ldSections, lds) + } + + // sx = symbol from file + // s = symbol for loader + for _, sx := range f.Symbols { + // get symbol type + stype, errmsg := getSymbolType(f, sx) + if errmsg != "" { + return errorf("error reading symbol %s: %s", sx.Name, errmsg) + } + if stype == sym.Sxxx { + continue + } + + s := l.LookupOrCreateSym(sx.Name, 0) + + // Text symbol + if l.SymType(s) == sym.STEXT { + if l.AttrOnList(s) { + return errorf("symbol %s listed multiple times", l.SymName(s)) + } + l.SetAttrOnList(s, true) + textp = append(textp, s) + } + } + + // Read relocations + for _, sect := range ldSections { + // TODO(aix): Dwarf section relocation if needed + if sect.Type != xcoff.STYP_TEXT && sect.Type != xcoff.STYP_DATA { + continue + } + sb := l.MakeSymbolUpdater(sect.sym) + for _, rx := range sect.Relocs { + rSym := l.LookupOrCreateCgoExport(rx.Symbol.Name, 0) + if uint64(int32(rx.VirtualAddress)) != rx.VirtualAddress { + return errorf("virtual address of a relocation is too big: 0x%x", rx.VirtualAddress) + } + rOff := int32(rx.VirtualAddress) + var rSize uint8 + var rType objabi.RelocType + var rAdd int64 + switch rx.Type { + default: + return errorf("section %s: unknown relocation of type 0x%x", sect.Name, rx.Type) + case xcoff.R_POS: + // Reloc the address of r.Sym + // Length should be 64 + if rx.Length != 64 { + return errorf("section %s: relocation R_POS has length different from 64: %d", sect.Name, rx.Length) + } + rSize = 8 + rType = objabi.R_CONST + rAdd = int64(rx.Symbol.Value) + + case xcoff.R_RBR: + rSize = 4 + rType = objabi.R_CALLPOWER + rAdd = 0 + } + r, _ := sb.AddRel(rType) + r.SetOff(rOff) + r.SetSiz(rSize) + r.SetSym(rSym) + r.SetAdd(rAdd) + } + } + return textp, nil +} + +// Convert symbol xcoff type to sym.SymKind +// Returns nil if this shouldn't be added into loader (like .file or .dw symbols ) +func getSymbolType(f *xcoff.File, s *xcoff.Symbol) (stype sym.SymKind, err string) { + // .file symbol + if s.SectionNumber == -2 { + if s.StorageClass == xcoff.C_FILE { + return sym.Sxxx, "" + } + return sym.Sxxx, "unrecognised StorageClass for sectionNumber = -2" + } + + // extern symbols + // TODO(aix) + if s.SectionNumber == 0 { + return sym.Sxxx, "" + } + + sectType := f.Sections[s.SectionNumber-1].SectionHeader.Type + switch sectType { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Section type 0x%x not implemented", sectType) + case xcoff.STYP_DWARF, xcoff.STYP_DEBUG: + return sym.Sxxx, "" + case xcoff.STYP_DATA, xcoff.STYP_BSS, xcoff.STYP_TEXT: + } + + switch s.StorageClass { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x not implemented", s.StorageClass) + case xcoff.C_HIDEXT, xcoff.C_EXT, xcoff.C_WEAKEXT: + switch s.AuxCSect.StorageMappingClass { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x and Storage Map 0x%x not implemented", s.StorageClass, s.AuxCSect.StorageMappingClass) + + // Program Code + case xcoff.XMC_PR: + if sectType == xcoff.STYP_TEXT { + return sym.STEXT, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_PR", sectType, s.StorageClass) + + // Read/Write Data + case xcoff.XMC_RW: + if sectType == xcoff.STYP_DATA { + return sym.SDATA, "" + } + if sectType == xcoff.STYP_BSS { + return sym.SBSS, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_RW", sectType, s.StorageClass) + + // Function descriptor + case xcoff.XMC_DS: + if sectType == xcoff.STYP_DATA { + return sym.SDATA, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass) + + // TOC anchor and TOC entry + case xcoff.XMC_TC0, xcoff.XMC_TE: + if sectType == xcoff.STYP_DATA { + return sym.SXCOFFTOC, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass) + + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..6607e5dc6474f14b437525bd63f31e8588b1e0d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/asm.go @@ -0,0 +1,258 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "log" +) + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op uint32) { + initfunc.AddUint32(ctxt.Arch, op) + } + + // Emit the following function: + // + // local.dso_init: + // la.pcrel $a0, local.moduledata + // b runtime.addmoduledata + + // 0000000000000000 : + // 0: 1a000004 pcalau12i $a0, 0 + // 0: R_LARCH_PCALA_HI20 local.moduledata + o(0x1a000004) + rel, _ := initfunc.AddRel(objabi.R_ADDRLOONG64U) + rel.SetOff(0) + rel.SetSiz(4) + rel.SetSym(ctxt.Moduledata) + + // 4: 02c00084 addi.d $a0, $a0, 0 + // 4: R_LARCH_PCALA_LO12 local.moduledata + o(0x02c00084) + rel2, _ := initfunc.AddRel(objabi.R_ADDRLOONG64) + rel2.SetOff(4) + rel2.SetSiz(4) + rel2.SetSym(ctxt.Moduledata) + + // 8: 50000000 b 0 + // 8: R_LARCH_B26 runtime.addmoduledata + o(0x50000000) + rel3, _ := initfunc.AddRel(objabi.R_CALLLOONG64) + rel3.SetOff(8) + rel3.SetSiz(4) + rel3.SetSym(addmoduledata) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + log.Fatalf("adddynrel not implemented") + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + // loong64 ELF relocation (endian neutral) + // offset uint64 + // symreloc uint64 // The high 32-bit is the symbol, the low 32-bit is the relocation type. + // addend int64 + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + switch r.Size { + case 4: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_32) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + case 8: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_64) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + default: + return false + } + case objabi.R_ADDRLOONG64TLS: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_TLS_LE_LO12) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_ADDRLOONG64TLSU: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_TLS_LE_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_CALLLOONG64: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_B26) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_LOONG64_TLS_IE_PCREL_HI: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_TLS_IE_PC_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) + + case objabi.R_LOONG64_TLS_IE_LO: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_TLS_IE_PC_LO12) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) + + case objabi.R_ADDRLOONG64: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_PCALA_LO12) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_ADDRLOONG64U: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_PCALA_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_LOONG64_GOT_HI: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_GOT_PC_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) + + case objabi.R_LOONG64_GOT_LO: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_GOT_PC_LO12) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) + } + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { + return +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + rs := r.Sym() + if target.IsExternal() { + switch r.Type() { + default: + return val, 0, false + case objabi.R_ADDRLOONG64, + objabi.R_ADDRLOONG64U: + // set up addend for eventual relocation via outer symbol. + rs, _ := ld.FoldSubSymbolOffset(ldr, rs) + rst := ldr.SymType(rs) + if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { + ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) + } + return val, 1, true + case objabi.R_ADDRLOONG64TLS, + objabi.R_ADDRLOONG64TLSU, + objabi.R_CALLLOONG64, + objabi.R_JMPLOONG64, + objabi.R_LOONG64_TLS_IE_PCREL_HI, + objabi.R_LOONG64_TLS_IE_LO, + objabi.R_LOONG64_GOT_HI, + objabi.R_LOONG64_GOT_LO: + return val, 1, true + } + } + + const isOk = true + const noExtReloc = 0 + + switch r.Type() { + case objabi.R_CONST: + return r.Add(), noExtReloc, isOk + case objabi.R_GOTOFF: + return ldr.SymValue(r.Sym()) + r.Add() - ldr.SymValue(syms.GOT), noExtReloc, isOk + case objabi.R_ADDRLOONG64, + objabi.R_ADDRLOONG64U: + pc := ldr.SymValue(s) + int64(r.Off()) + t := calculatePCAlignedReloc(r.Type(), ldr.SymAddr(rs)+r.Add(), pc) + if r.Type() == objabi.R_ADDRLOONG64 { + return int64(val&0xffc003ff | (t << 10)), noExtReloc, isOk + } + return int64(val&0xfe00001f | (t << 5)), noExtReloc, isOk + case objabi.R_ADDRLOONG64TLS, + objabi.R_ADDRLOONG64TLSU: + t := ldr.SymAddr(rs) + r.Add() + if r.Type() == objabi.R_ADDRLOONG64TLS { + return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk + } + return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk + case objabi.R_CALLLOONG64, + objabi.R_JMPLOONG64: + pc := ldr.SymValue(s) + int64(r.Off()) + t := ldr.SymAddr(rs) + r.Add() - pc + return int64(val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16)), noExtReloc, isOk + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_ADDRLOONG64, + objabi.R_ADDRLOONG64U, + objabi.R_LOONG64_GOT_HI, + objabi.R_LOONG64_GOT_LO: + + return ld.ExtrelocViaOuterSym(ldr, r, s), true + + case objabi.R_ADDRLOONG64TLS, + objabi.R_ADDRLOONG64TLSU, + objabi.R_CONST, + objabi.R_GOTOFF, + objabi.R_CALLLOONG64, + objabi.R_JMPLOONG64, + objabi.R_LOONG64_TLS_IE_PCREL_HI, + objabi.R_LOONG64_TLS_IE_LO: + return ld.ExtrelocSimple(ldr, r), true + } + return loader.ExtReloc{}, false +} + +func isRequestingLowPageBits(t objabi.RelocType) bool { + switch t { + case objabi.R_ADDRLOONG64: + return true + } + return false +} + +// Calculates the value to put into the immediate slot, according to the +// desired relocation type, target and PC. +// The value to use varies based on the reloc type. Namely, the absolute low +// bits of the target are to be used for the low part, while the page-aligned +// offset is to be used for the higher part. A "page" here is not related to +// the system's actual page size, but rather a fixed 12-bit range (designed to +// cooperate with ADDI/LD/ST's 12-bit immediates). +func calculatePCAlignedReloc(t objabi.RelocType, tgt int64, pc int64) int64 { + if isRequestingLowPageBits(t) { + // corresponding immediate field is 12 bits wide + return tgt & 0xfff + } + + pageDelta := (tgt >> 12) - (pc >> 12) + if tgt&0xfff >= 0x800 { + // adjust for sign-extended addition of the low bits + pageDelta += 1 + } + // corresponding immediate field is 20 bits wide + return pageDelta & 0xfffff +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..a6309f1a3a296c817b3ea23114e1e35c6ffc6897 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/l.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 16 +) + +/* Used by ../../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 3 + dwarfRegLR = 1 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..b68ed494f6960ebe671c33211b8d13a194f73afb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/loong64/obj.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchLoong64 + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + CodePad: []byte{0x00, 0x00, 0x2a, 0x00}, // BREAK 0 + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Machoreloc1: machoreloc1, + Gentext: gentext, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib64/ld-linux-loongarch-lp64d.so.1", + LinuxdynldMusl: "/lib64/ld-musl-loongarch.so.1", + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + case objabi.Hlinux: /* loong64 elf */ + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..5d7e5c7fe5ed5515d4a9d3fc0b8a4fb1adb429fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/asm.go @@ -0,0 +1,155 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2016 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" +) + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + return +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write32(uint32(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + if r.Size != 4 { + return false + } + out.Write32(uint32(elf.R_MIPS_32) | uint32(elfsym)<<8) + case objabi.R_ADDRMIPS: + out.Write32(uint32(elf.R_MIPS_LO16) | uint32(elfsym)<<8) + case objabi.R_ADDRMIPSU: + out.Write32(uint32(elf.R_MIPS_HI16) | uint32(elfsym)<<8) + case objabi.R_ADDRMIPSTLS: + out.Write32(uint32(elf.R_MIPS_TLS_TPREL_LO16) | uint32(elfsym)<<8) + case objabi.R_CALLMIPS, objabi.R_JMPMIPS: + out.Write32(uint32(elf.R_MIPS_26) | uint32(elfsym)<<8) + } + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { + return +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func applyrel(arch *sys.Arch, ldr *loader.Loader, rt objabi.RelocType, off int32, s loader.Sym, val int64, t int64) int64 { + o := uint32(val) + switch rt { + case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSTLS: + return int64(o&0xffff0000 | uint32(t)&0xffff) + case objabi.R_ADDRMIPSU: + return int64(o&0xffff0000 | uint32((t+(1<<15))>>16)&0xffff) + case objabi.R_CALLMIPS, objabi.R_JMPMIPS: + return int64(o&0xfc000000 | uint32(t>>2)&^0xfc000000) + default: + return val + } +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + rs := r.Sym() + if target.IsExternal() { + switch r.Type() { + default: + return val, 0, false + + case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: + // set up addend for eventual relocation via outer symbol. + _, off := ld.FoldSubSymbolOffset(ldr, rs) + xadd := r.Add() + off + return applyrel(target.Arch, ldr, r.Type(), r.Off(), s, val, xadd), 1, true + + case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: + return applyrel(target.Arch, ldr, r.Type(), r.Off(), s, val, r.Add()), 1, true + } + } + + const isOk = true + const noExtReloc = 0 + switch rt := r.Type(); rt { + case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: + t := ldr.SymValue(rs) + r.Add() + return applyrel(target.Arch, ldr, rt, r.Off(), s, val, t), noExtReloc, isOk + case objabi.R_CALLMIPS, objabi.R_JMPMIPS: + t := ldr.SymValue(rs) + r.Add() + + if t&3 != 0 { + ldr.Errorf(s, "direct call is not aligned: %s %x", ldr.SymName(rs), t) + } + + // check if target address is in the same 256 MB region as the next instruction + if (ldr.SymValue(s)+int64(r.Off())+4)&0xf0000000 != (t & 0xf0000000) { + ldr.Errorf(s, "direct call too far: %s %x", ldr.SymName(rs), t) + } + + return applyrel(target.Arch, ldr, rt, r.Off(), s, val, t), noExtReloc, isOk + case objabi.R_ADDRMIPSTLS: + // thread pointer is at 0x7000 offset from the start of TLS data area + t := ldr.SymValue(rs) + r.Add() - 0x7000 + if t < -32768 || t >= 32678 { + ldr.Errorf(s, "TLS offset out of range %d", t) + } + return applyrel(target.Arch, ldr, rt, r.Off(), s, val, t), noExtReloc, isOk + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: + return ld.ExtrelocViaOuterSym(ldr, r, s), true + + case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: + return ld.ExtrelocSimple(ldr, r), true + } + return loader.ExtReloc{}, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/l.go new file mode 100644 index 0000000000000000000000000000000000000000..affc48cc06956c40bb989846b7176942b1dbea4b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/l.go @@ -0,0 +1,74 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2016 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips + +// Writing object files. + +// cmd/9l/l.h from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2016 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + MaxAlign = 32 // max data alignment + MinAlign = 1 // min data alignment + FuncAlign = 4 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + DWARFREGSP = 29 + DWARFREGLR = 31 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..c76e267cc232843ef623075fbb54c9d0735f0c47 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips/obj.go @@ -0,0 +1,106 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2016 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "internal/buildcfg" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS + musl := "/lib/ld-musl-mips.so.1" + if buildcfg.GOARCH == "mipsle" { + arch = sys.ArchMIPSLE + musl = "/lib/ld-musl-mipsel.so.1" + } + + theArch := ld.Arch{ + Funcalign: FuncAlign, + Maxalign: MaxAlign, + Minalign: MinAlign, + Dwarfregsp: DWARFREGSP, + Dwarfreglr: DWARFREGLR, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + Machoreloc1: machoreloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib/ld.so.1", + LinuxdynldMusl: musl, + + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 8, + SetupPLT: elfsetupplt, + + // Historically GNU ld creates a read-only + // .dynamic section. + DynamicReadOnly: true, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + case objabi.Hlinux: /* mips elf */ + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + } +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + ld.Exitf("adddynrel currently unimplemented for MIPS") + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..e82d986184110955d0015691d88177a8cf7258a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/asm.go @@ -0,0 +1,342 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" +) + +var ( + // dtOffsets contains offsets for entries within the .dynamic section. + // These are used to fix up symbol values once they are known. + dtOffsets map[elf.DynTag]int64 + + // dynSymCount contains the number of entries in the .dynsym section. + // This is used to populate the DT_MIPS_SYMTABNO entry in the .dynamic + // section. + dynSymCount uint64 + + // gotLocalCount contains the number of local global offset table + // entries. This is used to populate the DT_MIPS_LOCAL_GOTNO entry in + // the .dynamic section. + gotLocalCount uint64 + + // gotSymIndex contains the index of the first dynamic symbol table + // entry that corresponds to an entry in the global offset table. + // This is used to populate the DT_MIPS_GOTSYM entry in the .dynamic + // section. + gotSymIndex uint64 +) + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + if *ld.FlagD || ctxt.Target.IsExternal() { + return + } + + dynamic := ldr.MakeSymbolUpdater(ctxt.ArchSyms.Dynamic) + + ld.Elfwritedynent(ctxt.Arch, dynamic, elf.DT_MIPS_RLD_VERSION, 1) + ld.Elfwritedynent(ctxt.Arch, dynamic, elf.DT_MIPS_BASE_ADDRESS, 0) + + // elfsetupplt should have been called and gotLocalCount should now + // have its correct value. + if gotLocalCount == 0 { + ctxt.Errorf(0, "internal error: elfsetupplt has not been called") + } + ld.Elfwritedynent(ctxt.Arch, dynamic, elf.DT_MIPS_LOCAL_GOTNO, gotLocalCount) + + // DT_* entries have to exist prior to elfdynhash(), which finalises the + // table by adding DT_NULL. However, the values for the following entries + // are not know until after dynreloc() has completed. Add the symbols now, + // then update their values prior to code generation. + dts := []elf.DynTag{ + elf.DT_MIPS_SYMTABNO, + elf.DT_MIPS_GOTSYM, + } + dtOffsets = make(map[elf.DynTag]int64) + for _, dt := range dts { + ld.Elfwritedynent(ctxt.Arch, dynamic, dt, 0) + dtOffsets[dt] = dynamic.Size() - 8 + } +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + switch r.Type() { + case objabi.R_CALLMIPS, objabi.R_JMPMIPS: + if targType != sym.SDYNIMPORT { + // Nothing to do, the relocation will be laid out in reloc + return true + } + if target.IsExternal() { + // External linker will do this relocation. + return true + } + + // Internal linking, build a PLT entry and change the relocation + // target to that entry. + if r.Add() != 0 { + ldr.Errorf(s, "PLT call with non-zero addend (%v)", r.Add()) + } + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + return true + } + + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + + // mips64 ELF relocation (endian neutral) + // offset uint64 + // sym uint32 + // ssym uint8 + // type3 uint8 + // type2 uint8 + // type uint8 + // addend int64 + + addend := r.Xadd + + out.Write64(uint64(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + out.Write32(uint32(elfsym)) + out.Write8(0) + out.Write8(0) + out.Write8(0) + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + switch r.Size { + case 4: + out.Write8(uint8(elf.R_MIPS_32)) + case 8: + out.Write8(uint8(elf.R_MIPS_64)) + default: + return false + } + case objabi.R_ADDRMIPS: + out.Write8(uint8(elf.R_MIPS_LO16)) + case objabi.R_ADDRMIPSU: + out.Write8(uint8(elf.R_MIPS_HI16)) + case objabi.R_ADDRMIPSTLS: + out.Write8(uint8(elf.R_MIPS_TLS_TPREL_LO16)) + if ctxt.Target.IsOpenbsd() { + // OpenBSD mips64 does not currently offset TLS by 0x7000, + // as such we need to add this back to get the correct offset + // via the external linker. + addend += 0x7000 + } + case objabi.R_CALLMIPS, + objabi.R_JMPMIPS: + out.Write8(uint8(elf.R_MIPS_26)) + } + out.Write64(uint64(addend)) + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() != 0 { + return + } + + // Load resolver address from got[0] into r25. + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 0, objabi.R_ADDRMIPSU, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x3c0e0000) // lui $14, %hi(&GOTPLT[0]) + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 0, objabi.R_ADDRMIPS, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0xddd90000) // ld $25, %lo(&GOTPLT[0])($14) + + // Load return address into r15, the index of the got.plt entry into r24, then + // JALR to the resolver. The address of the got.plt entry is currently in r24, + // which we have to turn into an index. + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 0, objabi.R_ADDRMIPS, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x25ce0000) // addiu $14, $14, %lo(&GOTPLT[0]) + plt.AddUint32(ctxt.Arch, 0x030ec023) // subu $24, $24, $14 + plt.AddUint32(ctxt.Arch, 0x03e07825) // move $15, $31 + plt.AddUint32(ctxt.Arch, 0x0018c0c2) // srl $24, $24, 3 + plt.AddUint32(ctxt.Arch, 0x0320f809) // jalr $25 + plt.AddUint32(ctxt.Arch, 0x2718fffe) // subu $24, $24, 2 + + if gotplt.Size() != 0 { + ctxt.Errorf(gotplt.Sym(), "got.plt is not empty") + } + + // Reserve got[0] for resolver address (populated by dynamic loader). + gotplt.AddUint32(ctxt.Arch, 0) + gotplt.AddUint32(ctxt.Arch, 0) + gotLocalCount++ + + // Reserve got[1] for ELF object pointer (populated by dynamic loader). + gotplt.AddUint32(ctxt.Arch, 0) + gotplt.AddUint32(ctxt.Arch, 0) + gotLocalCount++ +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + dynamic := ldr.MakeSymbolUpdater(syms.Dynamic) + + const dynSymEntrySize = 20 + if gotSymIndex == 0 { + // Compute and update GOT symbol index. + gotSymIndex = uint64(ldr.SymSize(syms.DynSym) / dynSymEntrySize) + dynamic.SetUint(target.Arch, dtOffsets[elf.DT_MIPS_GOTSYM], gotSymIndex) + } + if dynSymCount == 0 { + dynSymCount = uint64(ldr.SymSize(syms.DynSym) / dynSymEntrySize) + } + + ld.Adddynsym(ldr, target, syms, s) + dynSymCount++ + + if !target.IsElf() { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } + + plt := ldr.MakeSymbolUpdater(syms.PLT) + gotplt := ldr.MakeSymbolUpdater(syms.GOTPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // Load got.plt entry into r25. + plt.AddSymRef(target.Arch, gotplt.Sym(), gotplt.Size(), objabi.R_ADDRMIPSU, 4) + plt.SetUint32(target.Arch, plt.Size()-4, 0x3c0f0000) // lui $15, %hi(.got.plt entry) + plt.AddSymRef(target.Arch, gotplt.Sym(), gotplt.Size(), objabi.R_ADDRMIPS, 4) + plt.SetUint32(target.Arch, plt.Size()-4, 0xddf90000) // ld $25, %lo(.got.plt entry)($15) + + // Load address of got.plt entry into r24 and JALR to address in r25. + plt.AddUint32(target.Arch, 0x03200008) // jr $25 + plt.AddSymRef(target.Arch, gotplt.Sym(), gotplt.Size(), objabi.R_ADDRMIPS, 4) + plt.SetUint32(target.Arch, plt.Size()-4, 0x65f80000) // daddiu $24, $15, %lo(.got.plt entry) + + // Add pointer to plt[0] to got.plt + gotplt.AddAddrPlus(target.Arch, plt.Sym(), 0) + + ldr.SetPlt(s, int32(plt.Size()-16)) + + // Update dynamic symbol count. + dynamic.SetUint(target.Arch, dtOffsets[elf.DT_MIPS_SYMTABNO], dynSymCount) +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + if target.IsExternal() { + switch r.Type() { + default: + return val, 0, false + + case objabi.R_ADDRMIPS, + objabi.R_ADDRMIPSU, + objabi.R_ADDRMIPSTLS, + objabi.R_CALLMIPS, + objabi.R_JMPMIPS: + return val, 1, true + } + } + + const isOk = true + const noExtReloc = 0 + rs := r.Sym() + switch r.Type() { + case objabi.R_ADDRMIPS, + objabi.R_ADDRMIPSU: + t := ldr.SymValue(rs) + r.Add() + if r.Type() == objabi.R_ADDRMIPS { + return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk + } + return int64(val&0xffff0000 | ((t+1<<15)>>16)&0xffff), noExtReloc, isOk + case objabi.R_ADDRMIPSTLS: + // thread pointer is at 0x7000 offset from the start of TLS data area + t := ldr.SymValue(rs) + r.Add() - 0x7000 + if target.IsOpenbsd() { + // OpenBSD mips64 does not currently offset TLS by 0x7000, + // as such we need to add this back to get the correct offset. + t += 0x7000 + } + if t < -32768 || t >= 32678 { + ldr.Errorf(s, "TLS offset out of range %d", t) + } + return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk + case objabi.R_CALLMIPS, + objabi.R_JMPMIPS: + // Low 26 bits = (S + A) >> 2 + t := ldr.SymValue(rs) + r.Add() + return int64(val&0xfc000000 | (t>>2)&^0xfc000000), noExtReloc, isOk + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_ADDRMIPS, + objabi.R_ADDRMIPSU: + return ld.ExtrelocViaOuterSym(ldr, r, s), true + + case objabi.R_ADDRMIPSTLS, + objabi.R_CALLMIPS, + objabi.R_JMPMIPS: + return ld.ExtrelocSimple(ldr, r), true + } + return loader.ExtReloc{}, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..837af0eb05b5a407a2e6f86a47efba4f93c0c379 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/l.go @@ -0,0 +1,74 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips64 + +// Writing object files. + +// cmd/9l/l.h from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 8 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 29 + dwarfRegLR = 31 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..193ad1f27110b8d01034016b6d4ba06e64859f5f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/mips64/obj.go @@ -0,0 +1,113 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package mips64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "internal/buildcfg" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS64 + musl := "/lib/ld-musl-mips64.so.1" + if buildcfg.GOARCH == "mips64le" { + arch = sys.ArchMIPS64LE + musl = "/lib/ld-musl-mips64el.so.1" + } + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + Machoreloc1: machoreloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib64/ld64.so.1", + LinuxdynldMusl: musl, + Freebsddynld: "XXX", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + + // Historically GNU ld creates a read-only + // .dynamic section. + DynamicReadOnly: true, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + if *ld.FlagRound == -1 { + *ld.FlagRound = 16 * 1024 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(16*1024, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* mips64 elf */ + objabi.Hopenbsd: + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + } + + dynSymCount = 0 + gotLocalCount = 0 + gotSymIndex = 0 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..de5614e92af918c73d10032acb85603f59fac712 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/asm.go @@ -0,0 +1,1762 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ppc64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "encoding/binary" + "fmt" + "internal/buildcfg" + "log" + "strconv" + "strings" +) + +// The build configuration supports PC-relative instructions and relocations (limited to tested targets). +var hasPCrel = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" + +const ( + // For genstub, the type of stub required by the caller. + STUB_TOC = iota + STUB_PCREL +) + +var stubStrs = []string{ + STUB_TOC: "_callstub_toc", + STUB_PCREL: "_callstub_pcrel", +} + +const ( + OP_TOCRESTORE = 0xe8410018 // ld r2,24(r1) + OP_TOCSAVE = 0xf8410018 // std r2,24(r1) + OP_NOP = 0x60000000 // nop + OP_BL = 0x48000001 // bl 0 + OP_BCTR = 0x4e800420 // bctr + OP_BCTRL = 0x4e800421 // bctrl + OP_BCL = 0x40000001 // bcl + OP_ADDI = 0x38000000 // addi + OP_ADDIS = 0x3c000000 // addis + OP_LD = 0xe8000000 // ld + OP_PLA_PFX = 0x06100000 // pla (prefix instruction word) + OP_PLA_SFX = 0x38000000 // pla (suffix instruction word) + OP_PLD_PFX_PCREL = 0x04100000 // pld (prefix instruction word, R=1) + OP_PLD_SFX = 0xe4000000 // pld (suffix instruction word) + OP_MFLR = 0x7c0802a6 // mflr + OP_MTLR = 0x7c0803a6 // mtlr + OP_MFCTR = 0x7c0902a6 // mfctr + OP_MTCTR = 0x7c0903a6 // mtctr + + OP_ADDIS_R12_R2 = OP_ADDIS | 12<<21 | 2<<16 // addis r12,r2,0 + OP_ADDIS_R12_R12 = OP_ADDIS | 12<<21 | 12<<16 // addis r12,r12,0 + OP_ADDI_R12_R12 = OP_ADDI | 12<<21 | 12<<16 // addi r12,r12,0 + OP_PLD_SFX_R12 = OP_PLD_SFX | 12<<21 // pld r12,0 (suffix instruction word) + OP_PLA_SFX_R12 = OP_PLA_SFX | 12<<21 // pla r12,0 (suffix instruction word) + OP_LIS_R12 = OP_ADDIS | 12<<21 // lis r12,0 + OP_LD_R12_R12 = OP_LD | 12<<21 | 12<<16 // ld r12,0(r12) + OP_MTCTR_R12 = OP_MTCTR | 12<<21 // mtctr r12 + OP_MFLR_R12 = OP_MFLR | 12<<21 // mflr r12 + OP_MFLR_R0 = OP_MFLR | 0<<21 // mflr r0 + OP_MTLR_R0 = OP_MTLR | 0<<21 // mtlr r0 + + // This is a special, preferred form of bcl to obtain the next + // instruction address (NIA, aka PC+4) in LR. + OP_BCL_NIA = OP_BCL | 20<<21 | 31<<16 | 1<<2 // bcl 20,31,$+4 + + // Masks to match opcodes + MASK_PLD_PFX = 0xfff70000 + MASK_PLD_SFX = 0xfc1f0000 // Also checks RA = 0 if check value is OP_PLD_SFX. + MASK_PLD_RT = 0x03e00000 // Extract RT from the pld suffix. + MASK_OP_LD = 0xfc000003 + MASK_OP_ADDIS = 0xfc000000 +) + +// Generate a stub to call between TOC and NOTOC functions. See genpltstub for more details about calling stubs. +// This is almost identical to genpltstub, except the location of the target symbol is known at link time. +func genstub(ctxt *ld.Link, ldr *loader.Loader, r loader.Reloc, ri int, s loader.Sym, stubType int) (ssym loader.Sym, firstUse bool) { + addendStr := "" + if r.Add() != 0 { + addendStr = fmt.Sprintf("%+d", r.Add()) + } + + stubName := fmt.Sprintf("%s%s.%s", stubStrs[stubType], addendStr, ldr.SymName(r.Sym())) + stub := ldr.CreateSymForUpdate(stubName, 0) + firstUse = stub.Size() == 0 + if firstUse { + switch stubType { + // A call from a function using a TOC pointer. + case STUB_TOC: + stub.AddUint32(ctxt.Arch, OP_TOCSAVE) // std r2,24(r1) + stub.AddSymRef(ctxt.Arch, r.Sym(), r.Add(), objabi.R_ADDRPOWER_TOCREL_DS, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_ADDIS_R12_R2) // addis r12,r2,targ@toc@ha + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_ADDI_R12_R12) // addi r12,targ@toc@l(r12) + + // A call from PC relative function. + case STUB_PCREL: + if buildcfg.GOPPC64 >= 10 { + // Set up address of targ in r12, PCrel + stub.AddSymRef(ctxt.Arch, r.Sym(), r.Add(), objabi.R_ADDRPOWER_PCREL34, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_PLA_PFX) + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_PLA_SFX_R12) // pla r12, r + } else { + // The target may not be a P10. Generate a P8 compatible stub. + stub.AddUint32(ctxt.Arch, OP_MFLR_R0) // mflr r0 + stub.AddUint32(ctxt.Arch, OP_BCL_NIA) // bcl 20,31,1f + stub.AddUint32(ctxt.Arch, OP_MFLR_R12) // 1: mflr r12 (r12 is the address of this instruction) + stub.AddUint32(ctxt.Arch, OP_MTLR_R0) // mtlr r0 + stub.AddSymRef(ctxt.Arch, r.Sym(), r.Add()+8, objabi.R_ADDRPOWER_PCREL, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_ADDIS_R12_R12) // addis r12,(r - 1b) + 8 + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_ADDI_R12_R12) // addi r12,(r - 1b) + 12 + } + } + // Jump to the loaded pointer + stub.AddUint32(ctxt.Arch, OP_MTCTR_R12) // mtctr r12 + stub.AddUint32(ctxt.Arch, OP_BCTR) // bctr + stub.SetType(sym.STEXT) + } + + // Update the relocation to use the call stub + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(ri, stub.Sym()) + + // Rewrite the TOC restore slot (a nop) if the caller uses a TOC pointer. + switch stubType { + case STUB_TOC: + rewritetoinsn(&ctxt.Target, ldr, su, int64(r.Off()+4), 0xFFFFFFFF, OP_NOP, OP_TOCRESTORE) + } + + return stub.Sym(), firstUse +} + +func genpltstub(ctxt *ld.Link, ldr *loader.Loader, r loader.Reloc, ri int, s loader.Sym) (sym loader.Sym, firstUse bool) { + // The ppc64 ABI PLT has similar concepts to other + // architectures, but is laid out quite differently. When we + // see a relocation to a dynamic symbol (indicating that the + // call needs to go through the PLT), we generate up to three + // stubs and reserve a PLT slot. + // + // 1) The call site is a "bl x" where genpltstub rewrites it to + // "bl x_stub". Depending on the properties of the caller + // (see ELFv2 1.5 4.2.5.3), a nop may be expected immediately + // after the bl. This nop is rewritten to ld r2,24(r1) to + // restore the toc pointer saved by x_stub. + // + // 2) We reserve space for a pointer in the .plt section (once + // per referenced dynamic function). .plt is a data + // section filled solely by the dynamic linker (more like + // .plt.got on other architectures). Initially, the + // dynamic linker will fill each slot with a pointer to the + // corresponding x@plt entry point. + // + // 3) We generate a "call stub" x_stub based on the properties + // of the caller. + // + // 4) We generate the "symbol resolver stub" x@plt (once per + // dynamic function). This is solely a branch to the glink + // resolver stub. + // + // 5) We generate the glink resolver stub (only once). This + // computes which symbol resolver stub we came through and + // invokes the dynamic resolver via a pointer provided by + // the dynamic linker. This will patch up the .plt slot to + // point directly at the function so future calls go + // straight from the call stub to the real function, and + // then call the function. + + // NOTE: It's possible we could make ppc64 closer to other + // architectures: ppc64's .plt is like .plt.got on other + // platforms and ppc64's .glink is like .plt on other + // platforms. + + // Find all relocations that reference dynamic imports. + // Reserve PLT entries for these symbols and generate call + // stubs. The call stubs need to live in .text, which is why we + // need to do this pass this early. + + // Reserve PLT entry and generate symbol resolver + addpltsym(ctxt, ldr, r.Sym()) + + // The stub types are described in gencallstub. + stubType := 0 + stubTypeStr := "" + + // For now, the choice of call stub type is determined by whether + // the caller maintains a TOC pointer in R2. A TOC pointer implies + // we can always generate a position independent stub. + // + // For dynamic calls made from an external object, a caller maintains + // a TOC pointer only when an R_PPC64_REL24 relocation is used. + // An R_PPC64_REL24_NOTOC relocation does not use or maintain + // a TOC pointer, and almost always implies a Power10 target. + // + // For dynamic calls made from a Go caller, a TOC relative stub is + // always needed when a TOC pointer is maintained (specifically, if + // the Go caller is PIC, and cannot use PCrel instructions). + if (r.Type() == objabi.ElfRelocOffset+objabi.RelocType(elf.R_PPC64_REL24)) || (!ldr.AttrExternal(s) && ldr.AttrShared(s) && !hasPCrel) { + stubTypeStr = "_tocrel" + stubType = 1 + } else { + stubTypeStr = "_notoc" + stubType = 3 + } + n := fmt.Sprintf("_pltstub%s.%s", stubTypeStr, ldr.SymName(r.Sym())) + + // When internal linking, all text symbols share the same toc pointer. + stub := ldr.CreateSymForUpdate(n, 0) + firstUse = stub.Size() == 0 + if firstUse { + gencallstub(ctxt, ldr, stubType, stub, r.Sym()) + } + + // Update the relocation to use the call stub + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(ri, stub.Sym()) + + // A type 1 call must restore the toc pointer after the call. + if stubType == 1 { + su.MakeWritable() + p := su.Data() + + // Check for a toc pointer restore slot (a nop), and rewrite to restore the toc pointer. + var nop uint32 + if len(p) >= int(r.Off()+8) { + nop = ctxt.Arch.ByteOrder.Uint32(p[r.Off()+4:]) + } + if nop != OP_NOP { + ldr.Errorf(s, "Symbol %s is missing toc restoration slot at offset %d", ldr.SymName(s), r.Off()+4) + } + ctxt.Arch.ByteOrder.PutUint32(p[r.Off()+4:], OP_TOCRESTORE) + } + + return stub.Sym(), firstUse +} + +// Scan relocs and generate PLT stubs and generate/fixup ABI defined functions created by the linker. +func genstubs(ctxt *ld.Link, ldr *loader.Loader) { + var stubs []loader.Sym + var abifuncs []loader.Sym + for _, s := range ctxt.Textp { + relocs := ldr.Relocs(s) + for i := 0; i < relocs.Count(); i++ { + switch r := relocs.At(i); r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24), objabi.R_CALLPOWER: + switch ldr.SymType(r.Sym()) { + case sym.SDYNIMPORT: + // This call goes through the PLT, generate and call through a PLT stub. + if sym, firstUse := genpltstub(ctxt, ldr, r, i, s); firstUse { + stubs = append(stubs, sym) + } + + case sym.SXREF: + // Is this an ELF ABI defined function which is (in practice) + // generated by the linker to save/restore callee save registers? + // These are defined similarly for both PPC64 ELF and ELFv2. + targName := ldr.SymName(r.Sym()) + if strings.HasPrefix(targName, "_save") || strings.HasPrefix(targName, "_rest") { + if sym, firstUse := rewriteABIFuncReloc(ctxt, ldr, targName, r); firstUse { + abifuncs = append(abifuncs, sym) + } + } + case sym.STEXT: + targ := r.Sym() + if (ldr.AttrExternal(targ) && ldr.SymLocalentry(targ) != 1) || !ldr.AttrExternal(targ) { + // All local symbols share the same TOC pointer. This caller has a valid TOC + // pointer in R2. Calls into a Go symbol preserve R2. No call stub is needed. + } else { + // This caller has a TOC pointer. The callee might clobber it. R2 needs to be saved + // and restored. + if sym, firstUse := genstub(ctxt, ldr, r, i, s, STUB_TOC); firstUse { + stubs = append(stubs, sym) + } + } + } + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24_P9NOTOC): + // This can be treated identically to R_PPC64_REL24_NOTOC, as stubs are determined by + // GOPPC64 and -buildmode. + fallthrough + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24_NOTOC): + switch ldr.SymType(r.Sym()) { + case sym.SDYNIMPORT: + // This call goes through the PLT, generate and call through a PLT stub. + if sym, firstUse := genpltstub(ctxt, ldr, r, i, s); firstUse { + stubs = append(stubs, sym) + } + + case sym.SXREF: + // TODO: This is not supported yet. + ldr.Errorf(s, "Unsupported NOTOC external reference call into %s", ldr.SymName(r.Sym())) + + case sym.STEXT: + targ := r.Sym() + if (ldr.AttrExternal(targ) && ldr.SymLocalentry(targ) <= 1) || (!ldr.AttrExternal(targ) && (!ldr.AttrShared(targ) || hasPCrel)) { + // This is NOTOC to NOTOC call (st_other is 0 or 1). No call stub is needed. + } else { + // This is a NOTOC to TOC function. Generate a calling stub. + if sym, firstUse := genstub(ctxt, ldr, r, i, s, STUB_PCREL); firstUse { + stubs = append(stubs, sym) + } + } + } + + // Handle objects compiled with -fno-plt. Rewrite local calls to avoid indirect calling. + // These are 0 sized relocs. They mark the mtctr r12, or bctrl + ld r2,24(r1). + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLTSEQ): + if ldr.SymType(r.Sym()) == sym.STEXT { + // This should be an mtctr instruction. Turn it into a nop. + su := ldr.MakeSymbolUpdater(s) + const MASK_OP_MTCTR = 63<<26 | 0x3FF<<11 | 0x1FF<<1 + rewritetonop(&ctxt.Target, ldr, su, int64(r.Off()), MASK_OP_MTCTR, OP_MTCTR) + } + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLTCALL): + if ldr.SymType(r.Sym()) == sym.STEXT { + // This relocation should point to a bctrl followed by a ld r2, 24(41) + // Convert the bctrl into a bl. + su := ldr.MakeSymbolUpdater(s) + rewritetoinsn(&ctxt.Target, ldr, su, int64(r.Off()), 0xFFFFFFFF, OP_BCTRL, OP_BL) + + // Turn this reloc into an R_CALLPOWER, and convert the TOC restore into a nop. + su.SetRelocType(i, objabi.R_CALLPOWER) + localEoffset := int64(ldr.SymLocalentry(r.Sym())) + if localEoffset == 1 { + ldr.Errorf(s, "Unsupported NOTOC call to %s", ldr.SymName(r.Sym())) + } + su.SetRelocAdd(i, r.Add()+localEoffset) + r.SetSiz(4) + rewritetonop(&ctxt.Target, ldr, su, int64(r.Off()+4), 0xFFFFFFFF, OP_TOCRESTORE) + } + } + } + } + + // Append any usage of the go versions of ELF save/restore + // functions to the end of the callstub list to minimize + // chances a trampoline might be needed. + stubs = append(stubs, abifuncs...) + + // Put stubs at the beginning (instead of the end). + // So when resolving the relocations to calls to the stubs, + // the addresses are known and trampolines can be inserted + // when necessary. + ctxt.Textp = append(stubs, ctxt.Textp...) +} + +func genaddmoduledata(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op uint32) { + initfunc.AddUint32(ctxt.Arch, op) + } + + // Write a function to load this module's local.moduledata. This is shared code. + // + // package link + // void addmoduledata() { + // runtime.addmoduledata(local.moduledata) + // } + + if !hasPCrel { + // Regenerate TOC from R12 (the address of this function). + sz := initfunc.AddSymRef(ctxt.Arch, ctxt.DotTOC[0], 0, objabi.R_ADDRPOWER_PCREL, 8) + initfunc.SetUint32(ctxt.Arch, sz-8, 0x3c4c0000) // addis r2, r12, .TOC.-func@ha + initfunc.SetUint32(ctxt.Arch, sz-4, 0x38420000) // addi r2, r2, .TOC.-func@l + } + + // This is Go ABI. Stack a frame and save LR. + o(OP_MFLR_R0) // mflr r0 + o(0xf801ffe1) // stdu r0, -32(r1) + + // Get the moduledata pointer from GOT and put into R3. + var tgt loader.Sym + if s := ldr.Lookup("local.moduledata", 0); s != 0 { + tgt = s + } else if s := ldr.Lookup("local.pluginmoduledata", 0); s != 0 { + tgt = s + } else { + tgt = ldr.LookupOrCreateSym("runtime.firstmoduledata", 0) + } + + if !hasPCrel { + sz := initfunc.AddSymRef(ctxt.Arch, tgt, 0, objabi.R_ADDRPOWER_GOT, 8) + initfunc.SetUint32(ctxt.Arch, sz-8, 0x3c620000) // addis r3, r2, local.moduledata@got@ha + initfunc.SetUint32(ctxt.Arch, sz-4, 0xe8630000) // ld r3, local.moduledata@got@l(r3) + } else { + sz := initfunc.AddSymRef(ctxt.Arch, tgt, 0, objabi.R_ADDRPOWER_GOT_PCREL34, 8) + // Note, this is prefixed instruction. It must not cross a 64B boundary. + // It is doubleworld aligned here, so it will never cross (this function is 16B aligned, minimum). + initfunc.SetUint32(ctxt.Arch, sz-8, OP_PLD_PFX_PCREL) + initfunc.SetUint32(ctxt.Arch, sz-4, OP_PLD_SFX|(3<<21)) // pld r3, local.moduledata@got@pcrel + } + + // Call runtime.addmoduledata + sz := initfunc.AddSymRef(ctxt.Arch, addmoduledata, 0, objabi.R_CALLPOWER, 4) + initfunc.SetUint32(ctxt.Arch, sz-4, OP_BL) // bl runtime.addmoduledata + o(OP_NOP) // nop (for TOC restore) + + // Pop stack frame and return. + o(0xe8010000) // ld r0, 0(r1) + o(OP_MTLR_R0) // mtlr r0 + o(0x38210020) // addi r1,r1,32 + o(0x4e800020) // blr +} + +// Rewrite ELF (v1 or v2) calls to _savegpr0_n, _savegpr1_n, _savefpr_n, _restfpr_n, _savevr_m, or +// _restvr_m (14<=n<=31, 20<=m<=31). Redirect them to runtime.elf_restgpr0+(n-14)*4, +// runtime.elf_restvr+(m-20)*8, and similar. +// +// These functions are defined in the ELFv2 ABI (generated when using gcc -Os option) to save and +// restore callee-saved registers (as defined in the PPC64 ELF ABIs) from registers n or m to 31 of +// the named type. R12 and R0 are sometimes used in exceptional ways described in the ABI. +// +// Final note, this is only needed when linking internally. The external linker will generate these +// functions if they are used. +func rewriteABIFuncReloc(ctxt *ld.Link, ldr *loader.Loader, tname string, r loader.Reloc) (sym loader.Sym, firstUse bool) { + s := strings.Split(tname, "_") + // A valid call will split like {"", "savegpr0", "20"} + if len(s) != 3 { + return 0, false // Not an abi func. + } + minReg := 14 // _savegpr0_{n}, _savegpr1_{n}, _savefpr_{n}, 14 <= n <= 31 + offMul := 4 // 1 instruction per register op. + switch s[1] { + case "savegpr0", "savegpr1", "savefpr": + case "restgpr0", "restgpr1", "restfpr": + case "savevr", "restvr": + minReg = 20 // _savevr_{n} or _restvr_{n}, 20 <= n <= 31 + offMul = 8 // 2 instructions per register op. + default: + return 0, false // Not an abi func + } + n, e := strconv.Atoi(s[2]) + if e != nil || n < minReg || n > 31 || r.Add() != 0 { + return 0, false // Invalid register number, or non-zero addend. Not an abi func. + } + + // tname is a valid relocation to an ABI defined register save/restore function. Re-relocate + // them to a go version of these functions in runtime/asm_ppc64x.s + ts := ldr.LookupOrCreateSym("runtime.elf_"+s[1], 0) + r.SetSym(ts) + r.SetAdd(int64((n - minReg) * offMul)) + firstUse = !ldr.AttrReachable(ts) + if firstUse { + // This function only becomes reachable now. It has been dropped from + // the text section (it was unreachable until now), it needs included. + ldr.SetAttrReachable(ts, true) + } + return ts, firstUse +} + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + if ctxt.DynlinkingGo() { + genaddmoduledata(ctxt, ldr) + } + + if ctxt.LinkMode == ld.LinkInternal { + genstubs(ctxt, ldr) + } +} + +// Create a calling stub. The stubType maps directly to the properties listed in the ELFv2 1.5 +// section 4.2.5.3. +// +// There are 3 cases today (as paraphrased from the ELFv2 document): +// +// 1. R2 holds the TOC pointer on entry. The call stub must save R2 into the ELFv2 TOC stack save slot. +// +// 2. R2 holds the TOC pointer on entry. The caller has already saved R2 to the TOC stack save slot. +// +// 3. R2 does not hold the TOC pointer on entry. The caller has no expectations of R2. +// +// Go only needs case 1 and 3 today. Go symbols which have AttrShare set could use case 2, but case 1 always +// works in those cases too. +func gencallstub(ctxt *ld.Link, ldr *loader.Loader, stubType int, stub *loader.SymbolBuilder, targ loader.Sym) { + plt := ctxt.PLT + stub.SetType(sym.STEXT) + + switch stubType { + case 1: + // Save TOC, then load targ address from PLT using TOC. + stub.AddUint32(ctxt.Arch, OP_TOCSAVE) // std r2,24(r1) + stub.AddSymRef(ctxt.Arch, plt, int64(ldr.SymPlt(targ)), objabi.R_ADDRPOWER_TOCREL_DS, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_ADDIS_R12_R2) // addis r12,r2,targ@plt@toc@ha + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_LD_R12_R12) // ld r12,targ@plt@toc@l(r12) + case 3: + // No TOC needs to be saved, but the stub may need to position-independent. + if buildcfg.GOPPC64 >= 10 { + // Power10 is supported, load targ address into r12 using PCrel load. + stub.AddSymRef(ctxt.Arch, plt, int64(ldr.SymPlt(targ)), objabi.R_ADDRPOWER_PCREL34, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_PLD_PFX_PCREL) + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_PLD_SFX_R12) // pld r12, targ@plt + } else if !isLinkingPIC(ctxt) { + // This stub doesn't need to be PIC. Load targ address from the PLT via its absolute address. + stub.AddSymRef(ctxt.Arch, plt, int64(ldr.SymPlt(targ)), objabi.R_ADDRPOWER_DS, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_LIS_R12) // lis r12,targ@plt@ha + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_LD_R12_R12) // ld r12,targ@plt@l(r12) + } else { + // Generate a PIC stub. This is ugly as the stub must determine its location using + // POWER8 or older instruction. These stubs are likely the combination of using + // GOPPC64 < 8 and linking external objects built with CFLAGS="... -mcpu=power10 ..." + stub.AddUint32(ctxt.Arch, OP_MFLR_R0) // mflr r0 + stub.AddUint32(ctxt.Arch, OP_BCL_NIA) // bcl 20,31,1f + stub.AddUint32(ctxt.Arch, OP_MFLR_R12) // 1: mflr r12 (r12 is the address of this instruction) + stub.AddUint32(ctxt.Arch, OP_MTLR_R0) // mtlr r0 + stub.AddSymRef(ctxt.Arch, plt, int64(ldr.SymPlt(targ))+8, objabi.R_ADDRPOWER_PCREL, 8) + stub.SetUint32(ctxt.Arch, stub.Size()-8, OP_ADDIS_R12_R12) // addis r12,(targ@plt - 1b) + 8 + stub.SetUint32(ctxt.Arch, stub.Size()-4, OP_ADDI_R12_R12) // addi r12,(targ@plt - 1b) + 12 + stub.AddUint32(ctxt.Arch, OP_LD_R12_R12) // ld r12, 0(r12) + } + default: + log.Fatalf("gencallstub does not support ELFv2 ABI property %d", stubType) + } + + // Jump to the loaded pointer + stub.AddUint32(ctxt.Arch, OP_MTCTR_R12) // mtctr r12 + stub.AddUint32(ctxt.Arch, OP_BCTR) // bctr +} + +// Rewrite the instruction at offset into newinsn. Also, verify the +// existing instruction under mask matches the check value. +func rewritetoinsn(target *ld.Target, ldr *loader.Loader, su *loader.SymbolBuilder, offset int64, mask, check, newinsn uint32) { + su.MakeWritable() + op := target.Arch.ByteOrder.Uint32(su.Data()[offset:]) + if op&mask != check { + ldr.Errorf(su.Sym(), "Rewrite offset 0x%x to 0x%08X failed check (0x%08X&0x%08X != 0x%08X)", offset, newinsn, op, mask, check) + } + su.SetUint32(target.Arch, offset, newinsn) +} + +// Rewrite the instruction at offset into a hardware nop instruction. Also, verify the +// existing instruction under mask matches the check value. +func rewritetonop(target *ld.Target, ldr *loader.Loader, su *loader.SymbolBuilder, offset int64, mask, check uint32) { + rewritetoinsn(target, ldr, su, offset, mask, check, OP_NOP) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + if target.IsElf() { + return addelfdynrel(target, ldr, syms, s, r, rIdx) + } else if target.IsAIX() { + return ld.Xcoffadddynrel(target, ldr, syms, s, r, rIdx) + } + return false +} + +func addelfdynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24_NOTOC), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24_P9NOTOC): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLPOWER) + + if targType == sym.SDYNIMPORT { + // Should have been handled in elfsetupplt + ldr.Errorf(s, "unexpected R_PPC64_REL24_NOTOC/R_PPC64_REL24_P9NOTOC for dyn import") + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CALLPOWER) + + // This is a local call, so the caller isn't setting + // up r12 and r2 is the same for the caller and + // callee. Hence, we need to go to the local entry + // point. (If we don't do this, the callee will try + // to use r12 to compute r2.) + localEoffset := int64(ldr.SymLocalentry(targ)) + if localEoffset == 1 { + ldr.Errorf(s, "Unsupported NOTOC call to %s", targ) + } + su.SetRelocAdd(rIdx, r.Add()+localEoffset) + + if targType == sym.SDYNIMPORT { + // Should have been handled in genstubs + ldr.Errorf(s, "unexpected R_PPC64_REL24 for dyn import") + } + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PCREL34): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDRPOWER_PCREL34) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_GOT_PCREL34): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDRPOWER_PCREL34) + if targType != sym.STEXT { + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_PPC64_GLOB_DAT)) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + } else { + // The address of targ is known at link time. Rewrite to "pla rt,targ" from "pld rt,targ@got" + rewritetoinsn(target, ldr, su, int64(r.Off()), MASK_PLD_PFX, OP_PLD_PFX_PCREL, OP_PLA_PFX) + pla_sfx := target.Arch.ByteOrder.Uint32(su.Data()[r.Off()+4:])&MASK_PLD_RT | OP_PLA_SFX + rewritetoinsn(target, ldr, su, int64(r.Off()+4), MASK_PLD_SFX, OP_PLD_SFX, pla_sfx) + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC_REL32): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_PPC_REL32 for dyn import") + } + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_ADDR64): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + if targType == sym.SDYNIMPORT { + // These happen in .toc sections + ld.Adddynsym(ldr, target, syms, targ) + + rela := ldr.MakeSymbolUpdater(syms.Rela) + rela.AddAddrPlus(target.Arch, s, int64(r.Off())) + rela.AddUint64(target.Arch, elf.R_INFO(uint32(ldr.SymDynid(targ)), uint32(elf.R_PPC64_ADDR64))) + rela.AddUint64(target.Arch, uint64(r.Add())) + su.SetRelocType(rIdx, objabi.ElfRelocOffset) // ignore during relocsym + } else if target.IsPIE() && target.IsInternal() { + // For internal linking PIE, this R_ADDR relocation cannot + // be resolved statically. We need to generate a dynamic + // relocation. Let the code below handle it. + break + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO|sym.RV_CHECK_OVERFLOW) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_LO): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_HA): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_HI): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HI|sym.RV_CHECK_OVERFLOW) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_DS): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS|sym.RV_CHECK_OVERFLOW) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_LO_DS): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_LO): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO) + su.SetRelocAdd(rIdx, r.Add()+2) // Compensate for relocation size of 2 + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_HI): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HI|sym.RV_CHECK_OVERFLOW) + su.SetRelocAdd(rIdx, r.Add()+2) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_HA): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) + su.SetRelocAdd(rIdx, r.Add()+2) + return true + + // When compiling with gcc's -fno-plt option (no PLT), the following code and relocation + // sequences may be present to call an external function: + // + // 1. addis Rx,foo@R_PPC64_PLT16_HA + // 2. ld 12,foo@R_PPC64_PLT16_LO_DS(Rx) + // 3. mtctr 12 ; foo@R_PPC64_PLTSEQ + // 4. bctrl ; foo@R_PPC64_PLTCALL + // 5. ld r2,24(r1) + // + // Note, 5 is required to follow the R_PPC64_PLTCALL. Similarly, relocations targeting + // instructions 3 and 4 are zero sized informational relocations. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLT16_HA), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLT16_LO_DS): + su := ldr.MakeSymbolUpdater(s) + isPLT16_LO_DS := r.Type() == objabi.ElfRelocOffset+objabi.RelocType(elf.R_PPC64_PLT16_LO_DS) + if isPLT16_LO_DS { + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS) + } else { + ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) + } + su.SetRelocType(rIdx, objabi.R_POWER_TOC) + if targType == sym.SDYNIMPORT { + // This is an external symbol, make space in the GOT and retarget the reloc. + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_PPC64_GLOB_DAT)) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + } else if targType == sym.STEXT { + if isPLT16_LO_DS { + // Expect an ld opcode to nop + rewritetonop(target, ldr, su, int64(r.Off()), MASK_OP_LD, OP_LD) + } else { + // Expect an addis opcode to nop + rewritetonop(target, ldr, su, int64(r.Off()), MASK_OP_ADDIS, OP_ADDIS) + } + // And we can ignore this reloc now. + su.SetRelocType(rIdx, objabi.ElfRelocOffset) + } else { + ldr.Errorf(s, "unexpected PLT relocation target symbol type %s", targType.String()) + } + return true + } + + // Handle references to ELF symbols from our own object files. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_ADDR: + if ldr.SymType(s) == sym.STEXT { + log.Fatalf("R_ADDR relocation in text symbol %s is unsupported\n", ldr.SymName(s)) + } + if target.IsPIE() && target.IsInternal() { + // When internally linking, generate dynamic relocations + // for all typical R_ADDR relocations. The exception + // are those R_ADDR that are created as part of generating + // the dynamic relocations and must be resolved statically. + // + // There are three phases relevant to understanding this: + // + // dodata() // we are here + // address() // symbol address assignment + // reloc() // resolution of static R_ADDR relocs + // + // At this point symbol addresses have not been + // assigned yet (as the final size of the .rela section + // will affect the addresses), and so we cannot write + // the Elf64_Rela.r_offset now. Instead we delay it + // until after the 'address' phase of the linker is + // complete. We do this via Addaddrplus, which creates + // a new R_ADDR relocation which will be resolved in + // the 'reloc' phase. + // + // These synthetic static R_ADDR relocs must be skipped + // now, or else we will be caught in an infinite loop + // of generating synthetic relocs for our synthetic + // relocs. + // + // Furthermore, the rela sections contain dynamic + // relocations with R_ADDR relocations on + // Elf64_Rela.r_offset. This field should contain the + // symbol offset as determined by reloc(), not the + // final dynamically linked address as a dynamic + // relocation would provide. + switch ldr.SymName(s) { + case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic": + return false + } + } else { + // Either internally linking a static executable, + // in which case we can resolve these relocations + // statically in the 'reloc' phase, or externally + // linking, in which case the relocation will be + // prepared in the 'reloc' phase and passed to the + // external linker in the 'asmb' phase. + if ldr.SymType(s) != sym.SDATA && ldr.SymType(s) != sym.SRODATA { + break + } + } + // Generate R_PPC64_RELATIVE relocations for best + // efficiency in the dynamic linker. + // + // As noted above, symbol addresses have not been + // assigned yet, so we can't generate the final reloc + // entry yet. We ultimately want: + // + // r_offset = s + r.Off + // r_info = R_PPC64_RELATIVE + // r_addend = targ + r.Add + // + // The dynamic linker will set *offset = base address + + // addend. + // + // AddAddrPlus is used for r_offset and r_addend to + // generate new R_ADDR relocations that will update + // these fields in the 'reloc' phase. + rela := ldr.MakeSymbolUpdater(syms.Rela) + rela.AddAddrPlus(target.Arch, s, int64(r.Off())) + if r.Siz() == 8 { + rela.AddUint64(target.Arch, elf.R_INFO(0, uint32(elf.R_PPC64_RELATIVE))) + } else { + ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) + } + rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + + // Not mark r done here. So we still apply it statically, + // so in the file content we'll also have the right offset + // to the relocation target. So it can be examined statically + // (e.g. go version). + return true + } + + return false +} + +func xcoffreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + rs := r.Xsym + + emitReloc := func(v uint16, off uint64) { + out.Write64(uint64(sectoff) + off) + out.Write32(uint32(ldr.SymDynid(rs))) + out.Write16(v) + } + + var v uint16 + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + v = ld.XCOFF_R_POS + if r.Size == 4 { + v |= 0x1F << 8 + } else { + v |= 0x3F << 8 + } + emitReloc(v, 0) + case objabi.R_ADDRPOWER_TOCREL: + case objabi.R_ADDRPOWER_TOCREL_DS: + emitReloc(ld.XCOFF_R_TOCU|(0x0F<<8), 2) + emitReloc(ld.XCOFF_R_TOCL|(0x0F<<8), 6) + case objabi.R_POWER_TLS_LE: + // This only supports 16b relocations. It is fixed up in archreloc. + emitReloc(ld.XCOFF_R_TLS_LE|0x0F<<8, 2) + case objabi.R_CALLPOWER: + if r.Size != 4 { + return false + } + emitReloc(ld.XCOFF_R_RBR|0x19<<8, 0) + case objabi.R_XCOFFREF: + emitReloc(ld.XCOFF_R_REF|0x3F<<8, 0) + } + return true +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + // Beware that bit0~bit15 start from the third byte of an instruction in Big-Endian machines. + rt := r.Type + if rt == objabi.R_ADDR || rt == objabi.R_POWER_TLS || rt == objabi.R_CALLPOWER || rt == objabi.R_DWARFSECREF { + } else { + if ctxt.Arch.ByteOrder == binary.BigEndian { + sectoff += 2 + } + } + out.Write64(uint64(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + switch rt { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + switch r.Size { + case 4: + out.Write64(uint64(elf.R_PPC64_ADDR32) | uint64(elfsym)<<32) + case 8: + out.Write64(uint64(elf.R_PPC64_ADDR64) | uint64(elfsym)<<32) + default: + return false + } + case objabi.R_ADDRPOWER_D34: + out.Write64(uint64(elf.R_PPC64_D34) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_PCREL34: + out.Write64(uint64(elf.R_PPC64_PCREL34) | uint64(elfsym)<<32) + case objabi.R_POWER_TLS: + out.Write64(uint64(elf.R_PPC64_TLS) | uint64(elfsym)<<32) + case objabi.R_POWER_TLS_LE: + out.Write64(uint64(elf.R_PPC64_TPREL16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_TPREL16_LO) | uint64(elfsym)<<32) + case objabi.R_POWER_TLS_LE_TPREL34: + out.Write64(uint64(elf.R_PPC64_TPREL34) | uint64(elfsym)<<32) + case objabi.R_POWER_TLS_IE_PCREL34: + out.Write64(uint64(elf.R_PPC64_GOT_TPREL_PCREL34) | uint64(elfsym)<<32) + case objabi.R_POWER_TLS_IE: + out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_LO_DS) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER: + out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_ADDR16_LO) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_DS: + out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_ADDR16_LO_DS) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_GOT: + out.Write64(uint64(elf.R_PPC64_GOT16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_GOT16_LO_DS) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_GOT_PCREL34: + out.Write64(uint64(elf.R_PPC64_GOT_PCREL34) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_PCREL: + out.Write64(uint64(elf.R_PPC64_REL16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_REL16_LO) | uint64(elfsym)<<32) + r.Xadd += 4 + case objabi.R_ADDRPOWER_TOCREL: + out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_TOC16_LO) | uint64(elfsym)<<32) + case objabi.R_ADDRPOWER_TOCREL_DS: + out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_TOC16_LO_DS) | uint64(elfsym)<<32) + case objabi.R_CALLPOWER: + if r.Size != 4 { + return false + } + if !hasPCrel { + out.Write64(uint64(elf.R_PPC64_REL24) | uint64(elfsym)<<32) + } else { + // TOC is not used in PCrel compiled Go code. + out.Write64(uint64(elf.R_PPC64_REL24_NOTOC) | uint64(elfsym)<<32) + } + + } + out.Write64(uint64(r.Xadd)) + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // The dynamic linker stores the address of the + // dynamic resolver and the DSO identifier in the two + // doublewords at the beginning of the .plt section + // before the PLT array. Reserve space for these. + plt.SetSize(16) + } +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +// Return the value of .TOC. for symbol s +func symtoc(ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) int64 { + v := ldr.SymVersion(s) + if out := ldr.OuterSym(s); out != 0 { + v = ldr.SymVersion(out) + } + + toc := syms.DotTOC[v] + if toc == 0 { + ldr.Errorf(s, "TOC-relative relocation in object without .TOC.") + return 0 + } + + return ldr.SymValue(toc) +} + +// archreloctoc relocates a TOC relative symbol. +func archreloctoc(ldr *loader.Loader, target *ld.Target, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) int64 { + rs := r.Sym() + var o1, o2 uint32 + var t int64 + useAddi := false + + if target.IsBigEndian() { + o1 = uint32(val >> 32) + o2 = uint32(val) + } else { + o1 = uint32(val) + o2 = uint32(val >> 32) + } + + // On AIX, TOC data accesses are always made indirectly against R2 (a sequence of addis+ld+load/store). If the + // The target of the load is known, the sequence can be written into addis+addi+load/store. On Linux, + // TOC data accesses are always made directly against R2 (e.g addis+load/store). + if target.IsAIX() { + if !strings.HasPrefix(ldr.SymName(rs), "TOC.") { + ldr.Errorf(s, "archreloctoc called for a symbol without TOC anchor") + } + relocs := ldr.Relocs(rs) + tarSym := relocs.At(0).Sym() + + if target.IsInternal() && tarSym != 0 && ldr.AttrReachable(tarSym) && ldr.SymSect(tarSym).Seg == &ld.Segdata { + t = ldr.SymValue(tarSym) + r.Add() - ldr.SymValue(syms.TOC) + // change ld to addi in the second instruction + o2 = (o2 & 0x03FF0000) | 0xE<<26 + useAddi = true + } else { + t = ldr.SymValue(rs) + r.Add() - ldr.SymValue(syms.TOC) + } + } else { + t = ldr.SymValue(rs) + r.Add() - symtoc(ldr, syms, s) + } + + if t != int64(int32(t)) { + ldr.Errorf(s, "TOC relocation for %s is too big to relocate %s: 0x%x", ldr.SymName(s), rs, t) + } + + if t&0x8000 != 0 { + t += 0x10000 + } + + o1 |= uint32((t >> 16) & 0xFFFF) + + switch r.Type() { + case objabi.R_ADDRPOWER_TOCREL_DS: + if useAddi { + o2 |= uint32(t) & 0xFFFF + } else { + if t&3 != 0 { + ldr.Errorf(s, "bad DS reloc for %s: %d", ldr.SymName(s), ldr.SymValue(rs)) + } + o2 |= uint32(t) & 0xFFFC + } + case objabi.R_ADDRPOWER_TOCREL: + o2 |= uint32(t) & 0xffff + default: + return -1 + } + + if target.IsBigEndian() { + return int64(o1)<<32 | int64(o2) + } + return int64(o2)<<32 | int64(o1) +} + +// archrelocaddr relocates a symbol address. +// This code is for linux only. +func archrelocaddr(ldr *loader.Loader, target *ld.Target, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) int64 { + rs := r.Sym() + if target.IsAIX() { + ldr.Errorf(s, "archrelocaddr called for %s relocation\n", ldr.SymName(rs)) + } + o1, o2 := unpackInstPair(target, val) + + // Verify resulting address fits within a 31 bit (2GB) address space. + // This is a restriction arising from the usage of lis (HA) + d-form + // (LO) instruction sequences used to implement absolute relocations + // on PPC64 prior to ISA 3.1 (P10). For consistency, maintain this + // restriction for ISA 3.1 unless it becomes problematic. + t := ldr.SymAddr(rs) + r.Add() + if t < 0 || t >= 1<<31 { + ldr.Errorf(s, "relocation for %s is too big (>=2G): 0x%x", ldr.SymName(s), ldr.SymValue(rs)) + } + + // Note, relocations imported from external objects may not have cleared bits + // within a relocatable field. They need cleared before applying the relocation. + switch r.Type() { + case objabi.R_ADDRPOWER_PCREL34: + // S + A - P + t -= (ldr.SymValue(s) + int64(r.Off())) + o1 &^= 0x3ffff + o2 &^= 0x0ffff + o1 |= computePrefix34HI(t) + o2 |= computeLO(int32(t)) + case objabi.R_ADDRPOWER_D34: + o1 &^= 0x3ffff + o2 &^= 0x0ffff + o1 |= computePrefix34HI(t) + o2 |= computeLO(int32(t)) + case objabi.R_ADDRPOWER: + o1 &^= 0xffff + o2 &^= 0xffff + o1 |= computeHA(int32(t)) + o2 |= computeLO(int32(t)) + case objabi.R_ADDRPOWER_DS: + o1 &^= 0xffff + o2 &^= 0xfffc + o1 |= computeHA(int32(t)) + o2 |= computeLO(int32(t)) + if t&3 != 0 { + ldr.Errorf(s, "bad DS reloc for %s: %d", ldr.SymName(s), ldr.SymValue(rs)) + } + default: + return -1 + } + + return packInstPair(target, o1, o2) +} + +// Determine if the code was compiled so that the TOC register R2 is initialized and maintained. +func r2Valid(ctxt *ld.Link) bool { + return isLinkingPIC(ctxt) +} + +// Determine if this is linking a position-independent binary. +func isLinkingPIC(ctxt *ld.Link) bool { + switch ctxt.BuildMode { + case ld.BuildModeCArchive, ld.BuildModeCShared, ld.BuildModePIE, ld.BuildModeShared, ld.BuildModePlugin: + return true + } + // -linkshared option + return ctxt.IsSharedGoLink() +} + +// resolve direct jump relocation r in s, and add trampoline if necessary. +func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { + + // Trampolines are created if the branch offset is too large and the linker cannot insert a call stub to handle it. + // For internal linking, trampolines are always created for long calls. + // For external linking, the linker can insert a call stub to handle a long call, but depends on having the TOC address in + // r2. For those build modes with external linking where the TOC address is not maintained in r2, trampolines must be created. + if ctxt.IsExternal() && r2Valid(ctxt) { + // The TOC pointer is valid. The external linker will insert trampolines. + return + } + + relocs := ldr.Relocs(s) + r := relocs.At(ri) + var t int64 + // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet + // laid out. Conservatively use a trampoline. This should be rare, as we lay out packages + // in dependency order. + if ldr.SymValue(rs) != 0 { + t = ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) + } + switch r.Type() { + case objabi.R_CALLPOWER: + + // If branch offset is too far then create a trampoline. + + if (ctxt.IsExternal() && ldr.SymSect(s) != ldr.SymSect(rs)) || (ctxt.IsInternal() && int64(int32(t<<6)>>6) != t) || ldr.SymValue(rs) == 0 || (*ld.FlagDebugTramp > 1 && ldr.SymPkg(s) != ldr.SymPkg(rs)) { + var tramp loader.Sym + for i := 0; ; i++ { + + // Using r.Add as part of the name is significant in functions like duffzero where the call + // target is at some offset within the function. Calls to duff+8 and duff+256 must appear as + // distinct trampolines. + + oName := ldr.SymName(rs) + name := oName + if r.Add() == 0 { + name += fmt.Sprintf("-tramp%d", i) + } else { + name += fmt.Sprintf("%+x-tramp%d", r.Add(), i) + } + + // Look up the trampoline in case it already exists + + tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + if oName == "runtime.deferreturn" { + ldr.SetIsDeferReturnTramp(tramp, true) + } + if ldr.SymValue(tramp) == 0 { + break + } + // Note, the trampoline is always called directly. The addend of the original relocation is accounted for in the + // trampoline itself. + t = ldr.SymValue(tramp) - (ldr.SymValue(s) + int64(r.Off())) + + // With internal linking, the trampoline can be used if it is not too far. + // With external linking, the trampoline must be in this section for it to be reused. + if (ctxt.IsInternal() && int64(int32(t<<6)>>6) == t) || (ctxt.IsExternal() && ldr.SymSect(s) == ldr.SymSect(tramp)) { + break + } + } + if ldr.SymType(tramp) == 0 { + trampb := ldr.MakeSymbolUpdater(tramp) + ctxt.AddTramp(trampb) + gentramp(ctxt, ldr, trampb, rs, r.Add()) + } + sb := ldr.MakeSymbolUpdater(s) + relocs := sb.Relocs() + r := relocs.At(ri) + r.SetSym(tramp) + r.SetAdd(0) // This was folded into the trampoline target address + } + default: + ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + } +} + +func gentramp(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(16) // 4 instructions + P := make([]byte, tramp.Size()) + var o1, o2 uint32 + + // ELFv2 save/restore functions use R0/R12 in special ways, therefore trampolines + // as generated here will not always work correctly. + if strings.HasPrefix(ldr.SymName(target), "runtime.elf_") { + log.Fatalf("Internal linker does not support trampolines to ELFv2 ABI"+ + " register save/restore function %s", ldr.SymName(target)) + } + + if ctxt.IsAIX() { + // On AIX, the address is retrieved with a TOC symbol. + // For internal linking, the "Linux" way might still be used. + // However, all text symbols are accessed with a TOC symbol as + // text relocations aren't supposed to be possible. + // So, keep using the external linking way to be more AIX friendly. + o1 = uint32(OP_ADDIS_R12_R2) // addis r12, r2, toctargetaddr hi + o2 = uint32(OP_LD_R12_R12) // ld r12, r12, toctargetaddr lo + + toctramp := ldr.CreateSymForUpdate("TOC."+ldr.SymName(tramp.Sym()), 0) + toctramp.SetType(sym.SXCOFFTOC) + toctramp.AddAddrPlus(ctxt.Arch, target, offset) + + r, _ := tramp.AddRel(objabi.R_ADDRPOWER_TOCREL_DS) + r.SetOff(0) + r.SetSiz(8) // generates 2 relocations: HA + LO + r.SetSym(toctramp.Sym()) + } else if hasPCrel { + // pla r12, addr (PCrel). This works for static or PIC, with or without a valid TOC pointer. + o1 = uint32(OP_PLA_PFX) + o2 = uint32(OP_PLA_SFX_R12) // pla r12, addr + + // The trampoline's position is not known yet, insert a relocation. + r, _ := tramp.AddRel(objabi.R_ADDRPOWER_PCREL34) + r.SetOff(0) + r.SetSiz(8) // This spans 2 words. + r.SetSym(target) + r.SetAdd(offset) + } else { + // Used for default build mode for an executable + // Address of the call target is generated using + // relocation and doesn't depend on r2 (TOC). + o1 = uint32(OP_LIS_R12) // lis r12,targetaddr hi + o2 = uint32(OP_ADDI_R12_R12) // addi r12,r12,targetaddr lo + + t := ldr.SymValue(target) + if t == 0 || r2Valid(ctxt) || ctxt.IsExternal() { + // Target address is unknown, generate relocations + r, _ := tramp.AddRel(objabi.R_ADDRPOWER) + if r2Valid(ctxt) { + // Use a TOC relative address if R2 holds the TOC pointer + o1 |= uint32(2 << 16) // Transform lis r31,ha into addis r31,r2,ha + r.SetType(objabi.R_ADDRPOWER_TOCREL) + } + r.SetOff(0) + r.SetSiz(8) // generates 2 relocations: HA + LO + r.SetSym(target) + r.SetAdd(offset) + } else { + // The target address is known, resolve it + t += offset + o1 |= (uint32(t) + 0x8000) >> 16 // HA + o2 |= uint32(t) & 0xFFFF // LO + } + } + + o3 := uint32(OP_MTCTR_R12) // mtctr r12 + o4 := uint32(OP_BCTR) // bctr + ctxt.Arch.ByteOrder.PutUint32(P, o1) + ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) + ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) + ctxt.Arch.ByteOrder.PutUint32(P[12:], o4) + tramp.SetData(P) +} + +// Unpack a pair of 32 bit instruction words from +// a 64 bit relocation into instN and instN+1 in endian order. +func unpackInstPair(target *ld.Target, r int64) (uint32, uint32) { + if target.IsBigEndian() { + return uint32(r >> 32), uint32(r) + } + return uint32(r), uint32(r >> 32) +} + +// Pack a pair of 32 bit instruction words o1, o2 into 64 bit relocation +// in endian order. +func packInstPair(target *ld.Target, o1, o2 uint32) int64 { + if target.IsBigEndian() { + return (int64(o1) << 32) | int64(o2) + } + return int64(o1) | (int64(o2) << 32) +} + +// Compute the high-adjusted value (always a signed 32b value) per the ELF ABI. +// The returned value is always 0 <= x <= 0xFFFF. +func computeHA(val int32) uint32 { + return uint32(uint16((val + 0x8000) >> 16)) +} + +// Compute the low value (the lower 16 bits of any 32b value) per the ELF ABI. +// The returned value is always 0 <= x <= 0xFFFF. +func computeLO(val int32) uint32 { + return uint32(uint16(val)) +} + +// Compute the high 18 bits of a signed 34b constant. Used to pack the high 18 bits +// of a prefix34 relocation field. This assumes the input is already restricted to +// 34 bits. +func computePrefix34HI(val int64) uint32 { + return uint32((val >> 16) & 0x3FFFF) +} + +func computeTLSLEReloc(target *ld.Target, ldr *loader.Loader, rs, s loader.Sym) int64 { + // The thread pointer points 0x7000 bytes after the start of the + // thread local storage area as documented in section "3.7.2 TLS + // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI + // Specification". + v := ldr.SymValue(rs) - 0x7000 + if target.IsAIX() { + // On AIX, the thread pointer points 0x7800 bytes after + // the TLS. + v -= 0x800 + } + + if int64(int32(v)) != v { + ldr.Errorf(s, "TLS offset out of range %d", v) + } + return v +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (relocatedOffset int64, nExtReloc int, ok bool) { + rs := r.Sym() + if target.IsExternal() { + // On AIX, relocations (except TLS ones) must be also done to the + // value with the current addresses. + switch rt := r.Type(); rt { + default: + if !target.IsAIX() { + return val, nExtReloc, false + } + case objabi.R_POWER_TLS, objabi.R_POWER_TLS_IE_PCREL34, objabi.R_POWER_TLS_LE_TPREL34, objabi.R_ADDRPOWER_GOT_PCREL34: + nExtReloc = 1 + return val, nExtReloc, true + case objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: + if target.IsAIX() && rt == objabi.R_POWER_TLS_LE { + // Fixup val, an addis/addi pair of instructions, which generate a 32b displacement + // from the threadpointer (R13), into a 16b relocation. XCOFF only supports 16b + // TLS LE relocations. Likewise, verify this is an addis/addi sequence. + const expectedOpcodes = 0x3C00000038000000 + const expectedOpmasks = 0xFC000000FC000000 + if uint64(val)&expectedOpmasks != expectedOpcodes { + ldr.Errorf(s, "relocation for %s+%d is not an addis/addi pair: %16x", ldr.SymName(rs), r.Off(), uint64(val)) + } + nval := (int64(uint32(0x380d0000)) | val&0x03e00000) << 32 // addi rX, r13, $0 + nval |= int64(OP_NOP) // nop + val = nval + nExtReloc = 1 + } else { + nExtReloc = 2 + } + return val, nExtReloc, true + case objabi.R_ADDRPOWER, + objabi.R_ADDRPOWER_DS, + objabi.R_ADDRPOWER_TOCREL, + objabi.R_ADDRPOWER_TOCREL_DS, + objabi.R_ADDRPOWER_GOT, + objabi.R_ADDRPOWER_PCREL: + nExtReloc = 2 // need two ELF relocations, see elfreloc1 + if !target.IsAIX() { + return val, nExtReloc, true + } + case objabi.R_CALLPOWER, objabi.R_ADDRPOWER_D34, objabi.R_ADDRPOWER_PCREL34: + nExtReloc = 1 + if !target.IsAIX() { + return val, nExtReloc, true + } + } + } + + switch r.Type() { + case objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: + return archreloctoc(ldr, target, syms, r, s, val), nExtReloc, true + case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, objabi.R_ADDRPOWER_D34, objabi.R_ADDRPOWER_PCREL34: + return archrelocaddr(ldr, target, syms, r, s, val), nExtReloc, true + case objabi.R_CALLPOWER: + // Bits 6 through 29 = (S + A - P) >> 2 + + t := ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) + + tgtName := ldr.SymName(rs) + + // If we are linking PIE or shared code, non-PCrel golang generated object files have an extra 2 instruction prologue + // to regenerate the TOC pointer from R12. The exception are two special case functions tested below. Note, + // local call offsets for externally generated objects are accounted for when converting into golang relocs. + if !hasPCrel && !ldr.AttrExternal(rs) && ldr.AttrShared(rs) && tgtName != "runtime.duffzero" && tgtName != "runtime.duffcopy" { + // Furthermore, only apply the offset if the target looks like the start of a function call. + if r.Add() == 0 && ldr.SymType(rs) == sym.STEXT { + t += 8 + } + } + + if t&3 != 0 { + ldr.Errorf(s, "relocation for %s+%d is not aligned: %d", ldr.SymName(rs), r.Off(), t) + } + // If branch offset is too far then create a trampoline. + + if int64(int32(t<<6)>>6) != t { + ldr.Errorf(s, "direct call too far: %s %x", ldr.SymName(rs), t) + } + return val | int64(uint32(t)&^0xfc000003), nExtReloc, true + case objabi.R_POWER_TOC: // S + A - .TOC. + return ldr.SymValue(rs) + r.Add() - symtoc(ldr, syms, s), nExtReloc, true + + case objabi.R_ADDRPOWER_PCREL: // S + A - P + t := ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) + ha, l := unpackInstPair(target, val) + l |= computeLO(int32(t)) + ha |= computeHA(int32(t)) + return packInstPair(target, ha, l), nExtReloc, true + + case objabi.R_POWER_TLS: + const OP_ADD = 31<<26 | 266<<1 + const MASK_OP_ADD = 0x3F<<26 | 0x1FF<<1 + if val&MASK_OP_ADD != OP_ADD { + ldr.Errorf(s, "R_POWER_TLS reloc only supports XO form ADD, not %08X", val) + } + // Verify RB is R13 in ADD RA,RB,RT. + if (val>>11)&0x1F != 13 { + // If external linking is made to support this, it may expect the linker to rewrite RB. + ldr.Errorf(s, "R_POWER_TLS reloc requires R13 in RB (%08X).", uint32(val)) + } + return val, nExtReloc, true + + case objabi.R_POWER_TLS_IE: + // Convert TLS_IE relocation to TLS_LE if supported. + if !(target.IsPIE() && target.IsElf()) { + log.Fatalf("cannot handle R_POWER_TLS_IE (sym %s) when linking non-PIE, non-ELF binaries internally", ldr.SymName(s)) + } + + // We are an ELF binary, we can safely convert to TLS_LE from: + // addis to, r2, x@got@tprel@ha + // ld to, to, x@got@tprel@l(to) + // + // to TLS_LE by converting to: + // addis to, r0, x@tprel@ha + // addi to, to, x@tprel@l(to) + + const OP_MASK = 0x3F << 26 + const OP_RA_MASK = 0x1F << 16 + // convert r2 to r0, and ld to addi + mask := packInstPair(target, OP_RA_MASK, OP_MASK) + addi_op := packInstPair(target, 0, OP_ADDI) + val &^= mask + val |= addi_op + fallthrough + + case objabi.R_POWER_TLS_LE: + v := computeTLSLEReloc(target, ldr, rs, s) + o1, o2 := unpackInstPair(target, val) + o1 |= computeHA(int32(v)) + o2 |= computeLO(int32(v)) + return packInstPair(target, o1, o2), nExtReloc, true + + case objabi.R_POWER_TLS_IE_PCREL34: + // Convert TLS_IE relocation to TLS_LE if supported. + if !(target.IsPIE() && target.IsElf()) { + log.Fatalf("cannot handle R_POWER_TLS_IE (sym %s) when linking non-PIE, non-ELF binaries internally", ldr.SymName(s)) + } + + // We are an ELF binary, we can safely convert to TLS_LE_TPREL34 from: + // pld rX, x@got@tprel@pcrel + // + // to TLS_LE_TPREL32 by converting to: + // pla rX, x@tprel + + const OP_MASK_PFX = 0xFFFFFFFF // Discard prefix word + const OP_MASK = (0x3F << 26) | 0xFFFF // Preserve RT, RA + const OP_PFX = 1<<26 | 2<<24 + const OP_PLA = 14 << 26 + mask := packInstPair(target, OP_MASK_PFX, OP_MASK) + pla_op := packInstPair(target, OP_PFX, OP_PLA) + val &^= mask + val |= pla_op + fallthrough + + case objabi.R_POWER_TLS_LE_TPREL34: + v := computeTLSLEReloc(target, ldr, rs, s) + o1, o2 := unpackInstPair(target, val) + o1 |= computePrefix34HI(v) + o2 |= computeLO(int32(v)) + return packInstPair(target, o1, o2), nExtReloc, true + } + + return val, nExtReloc, false +} + +func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64, p []byte) (relocatedOffset int64) { + rs := r.Sym() + switch rv & sym.RV_TYPE_MASK { + default: + ldr.Errorf(s, "unexpected relocation variant %d", rv) + fallthrough + + case sym.RV_NONE: + return t + + case sym.RV_POWER_LO: + if rv&sym.RV_CHECK_OVERFLOW != 0 { + // Whether to check for signed or unsigned + // overflow depends on the instruction + var o1 uint32 + if target.IsBigEndian() { + o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) + } else { + o1 = binary.LittleEndian.Uint32(p[r.Off():]) + } + switch o1 >> 26 { + case 24, // ori + 26, // xori + 28: // andi + if t>>16 != 0 { + goto overflow + } + + default: + if int64(int16(t)) != t { + goto overflow + } + } + } + + return int64(int16(t)) + + case sym.RV_POWER_HA: + t += 0x8000 + fallthrough + + // Fallthrough + case sym.RV_POWER_HI: + t >>= 16 + + if rv&sym.RV_CHECK_OVERFLOW != 0 { + // Whether to check for signed or unsigned + // overflow depends on the instruction + var o1 uint32 + if target.IsBigEndian() { + o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) + } else { + o1 = binary.LittleEndian.Uint32(p[r.Off():]) + } + switch o1 >> 26 { + case 25, // oris + 27, // xoris + 29: // andis + if t>>16 != 0 { + goto overflow + } + + default: + if int64(int16(t)) != t { + goto overflow + } + } + } + + return int64(int16(t)) + + case sym.RV_POWER_DS: + var o1 uint32 + if target.IsBigEndian() { + o1 = uint32(binary.BigEndian.Uint16(p[r.Off():])) + } else { + o1 = uint32(binary.LittleEndian.Uint16(p[r.Off():])) + } + if t&3 != 0 { + ldr.Errorf(s, "relocation for %s+%d is not aligned: %d", ldr.SymName(rs), r.Off(), t) + } + if (rv&sym.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { + goto overflow + } + return int64(o1)&0x3 | int64(int16(t)) + } + +overflow: + ldr.Errorf(s, "relocation for %s+%d is too big: %d", ldr.SymName(rs), r.Off(), t) + return t +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_POWER_TLS, objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE, objabi.R_POWER_TLS_IE_PCREL34, objabi.R_POWER_TLS_LE_TPREL34, objabi.R_CALLPOWER: + return ld.ExtrelocSimple(ldr, r), true + case objabi.R_ADDRPOWER, + objabi.R_ADDRPOWER_DS, + objabi.R_ADDRPOWER_TOCREL, + objabi.R_ADDRPOWER_TOCREL_DS, + objabi.R_ADDRPOWER_GOT, + objabi.R_ADDRPOWER_GOT_PCREL34, + objabi.R_ADDRPOWER_PCREL, + objabi.R_ADDRPOWER_D34, + objabi.R_ADDRPOWER_PCREL34: + return ld.ExtrelocViaOuterSym(ldr, r, s), true + } + return loader.ExtReloc{}, false +} + +func addpltsym(ctxt *ld.Link, ldr *loader.Loader, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, &ctxt.Target, &ctxt.ArchSyms, s) + + if ctxt.IsELF { + plt := ldr.MakeSymbolUpdater(ctxt.PLT) + rela := ldr.MakeSymbolUpdater(ctxt.RelaPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // Create the glink resolver if necessary + glink := ensureglinkresolver(ctxt, ldr) + + // Write symbol resolver stub (just a branch to the + // glink resolver stub) + rel, _ := glink.AddRel(objabi.R_CALLPOWER) + rel.SetOff(int32(glink.Size())) + rel.SetSiz(4) + rel.SetSym(glink.Sym()) + glink.AddUint32(ctxt.Arch, 0x48000000) // b .glink + + // In the ppc64 ABI, the dynamic linker is responsible + // for writing the entire PLT. We just need to + // reserve 8 bytes for each PLT entry and generate a + // JMP_SLOT dynamic relocation for it. + // + // TODO(austin): ABI v1 is different + ldr.SetPlt(s, int32(plt.Size())) + + plt.Grow(plt.Size() + 8) + plt.SetSize(plt.Size() + 8) + + rela.AddAddrPlus(ctxt.Arch, plt.Sym(), int64(ldr.SymPlt(s))) + rela.AddUint64(ctxt.Arch, elf.R_INFO(uint32(ldr.SymDynid(s)), uint32(elf.R_PPC64_JMP_SLOT))) + rela.AddUint64(ctxt.Arch, 0) + } else { + ctxt.Errorf(s, "addpltsym: unsupported binary format") + } +} + +// Generate the glink resolver stub if necessary and return the .glink section. +func ensureglinkresolver(ctxt *ld.Link, ldr *loader.Loader) *loader.SymbolBuilder { + glink := ldr.CreateSymForUpdate(".glink", 0) + if glink.Size() != 0 { + return glink + } + + // This is essentially the resolver from the ppc64 ELFv2 ABI. + // At entry, r12 holds the address of the symbol resolver stub + // for the target routine and the argument registers hold the + // arguments for the target routine. + // + // PC-rel offsets are computed once the final codesize of the + // resolver is known. + // + // This stub is PIC, so first get the PC of label 1 into r11. + glink.AddUint32(ctxt.Arch, OP_MFLR_R0) // mflr r0 + glink.AddUint32(ctxt.Arch, OP_BCL_NIA) // bcl 20,31,1f + glink.AddUint32(ctxt.Arch, 0x7d6802a6) // 1: mflr r11 + glink.AddUint32(ctxt.Arch, OP_MTLR_R0) // mtlr r0 + + // Compute the .plt array index from the entry point address + // into r0. This is computed relative to label 1 above. + glink.AddUint32(ctxt.Arch, 0x38000000) // li r0,-(res_0-1b) + glink.AddUint32(ctxt.Arch, 0x7c006214) // add r0,r0,r12 + glink.AddUint32(ctxt.Arch, 0x7c0b0050) // sub r0,r0,r11 + glink.AddUint32(ctxt.Arch, 0x7800f082) // srdi r0,r0,2 + + // Load the PC-rel offset of ".plt - 1b", and add it to 1b. + // This is stored after this stub and before the resolvers. + glink.AddUint32(ctxt.Arch, 0xe98b0000) // ld r12,res_0-1b-8(r11) + glink.AddUint32(ctxt.Arch, 0x7d6b6214) // add r11,r11,r12 + + // Load r12 = dynamic resolver address and r11 = DSO + // identifier from the first two doublewords of the PLT. + glink.AddUint32(ctxt.Arch, 0xe98b0000) // ld r12,0(r11) + glink.AddUint32(ctxt.Arch, 0xe96b0008) // ld r11,8(r11) + + // Jump to the dynamic resolver + glink.AddUint32(ctxt.Arch, OP_MTCTR_R12) // mtctr r12 + glink.AddUint32(ctxt.Arch, OP_BCTR) // bctr + + // Store the PC-rel offset to the PLT + r, _ := glink.AddRel(objabi.R_PCREL) + r.SetSym(ctxt.PLT) + r.SetSiz(8) + r.SetOff(int32(glink.Size())) + r.SetAdd(glink.Size()) // Adjust the offset to be relative to label 1 above. + glink.AddUint64(ctxt.Arch, 0) // The offset to the PLT. + + // Resolve PC-rel offsets above now the final size of the stub is known. + res0m1b := glink.Size() - 8 // res_0 - 1b + glink.SetUint32(ctxt.Arch, 16, 0x38000000|uint32(uint16(-res0m1b))) + glink.SetUint32(ctxt.Arch, 32, 0xe98b0000|uint32(uint16(res0m1b-8))) + + // The symbol resolvers must immediately follow. + // res_0: + + // Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes + // before the first symbol resolver stub. + du := ldr.MakeSymbolUpdater(ctxt.Dynamic) + ld.Elfwritedynentsymplus(ctxt, du, elf.DT_PPC64_GLINK, glink.Sym(), glink.Size()-32) + + return glink +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..e8d3b686a4d702fd456d337cb451f9bd01f83c80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/l.go @@ -0,0 +1,74 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ppc64 + +// Writing object files. + +// cmd/9l/l.h from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 16 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 1 + dwarfRegLR = 65 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..703c8ec2e85c0cd29e9f874893eafaa0c108f11a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/ppc64/obj.go @@ -0,0 +1,116 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ppc64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "internal/buildcfg" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchPPC64LE + dynld := "/lib64/ld64.so.2" + musl := "/lib/ld-musl-powerpc64le.so.1" + + if buildcfg.GOARCH == "ppc64" { + arch = sys.ArchPPC64 + dynld = "/lib64/ld64.so.1" + musl = "/lib/ld-musl-powerpc64.so.1" + } + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + TrampLimit: 0x1c00000, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + Trampoline: trampoline, + Machoreloc1: machoreloc1, + Xcoffreloc1: xcoffreloc1, + + ELF: ld.ELFArch{ + Linuxdynld: dynld, + LinuxdynldMusl: musl, + + Freebsddynld: "XXX", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* ppc64 elf */ + objabi.Hopenbsd: + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Haix: + ld.Xcoffinit(ctxt) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..6a4dd012404f2398daf0c1c14152fd4721ac61f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/asm.go @@ -0,0 +1,717 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/internal/obj/riscv" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "fmt" + "log" + "sort" +) + +// fakeLabelName matches the RISCV_FAKE_LABEL_NAME from binutils. +const fakeLabelName = ".L0 " + +func gentext(ctxt *ld.Link, ldr *loader.Loader) {} + +func findHI20Reloc(ldr *loader.Loader, s loader.Sym, val int64) *loader.Reloc { + outer := ldr.OuterSym(s) + if outer == 0 { + return nil + } + relocs := ldr.Relocs(outer) + start := sort.Search(relocs.Count(), func(i int) bool { return ldr.SymValue(outer)+int64(relocs.At(i).Off()) >= val }) + for idx := start; idx < relocs.Count(); idx++ { + r := relocs.At(idx) + if ldr.SymValue(outer)+int64(r.Off()) != val { + break + } + if r.Type() == objabi.R_RISCV_GOT_HI20 || r.Type() == objabi.R_RISCV_PCREL_HI20 { + return &r + } + } + return nil +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_CALL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_CALL_PLT): + + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in RISCV call", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_CALL) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_GOT_HI20): + if targType != sym.SDYNIMPORT { + // TODO(jsing): Could convert to non-GOT reference. + } + + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_RISCV_64)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_GOT_HI20) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_HI20): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_HI20) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_LO12_I): + if r.Add() != 0 { + ldr.Errorf(s, "R_RISCV_PCREL_LO12_I with non-zero addend") + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_LO12_I) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_LO12_S): + if r.Add() != 0 { + ldr.Errorf(s, "R_RISCV_PCREL_LO12_S with non-zero addend") + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_LO12_S) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RVC_BRANCH): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_RVC_BRANCH) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RVC_JUMP): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_RVC_JUMP) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_BRANCH): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_RISCV_BRANCH) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RELAX): + // Ignore relaxations, at least for now. + return true + + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + } + + // Reread the reloc to incorporate any changes in type above. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_RISCV_CALL: + if targType != sym.SDYNIMPORT { + // nothing to do, the relocation will be laid out in reloc + return true + } + if target.IsExternal() { + // External linker will do this relocation. + return true + } + // Internal linking. + if r.Add() != 0 { + ldr.Errorf(s, "PLT reference with non-zero addend (%v)", r.Add()) + } + // Build a PLT entry and change the relocation target to that entry. + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + + return true + } + + return false +} + +func genSymsLate(ctxt *ld.Link, ldr *loader.Loader) { + if ctxt.LinkMode != ld.LinkExternal { + return + } + + // Generate a local text symbol for each relocation target, as the + // R_RISCV_PCREL_LO12_* relocations generated by elfreloc1 need it. + if ctxt.Textp == nil { + log.Fatal("genSymsLate called before Textp has been assigned") + } + var hi20Syms []loader.Sym + for _, s := range ctxt.Textp { + relocs := ldr.Relocs(s) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.Type() != objabi.R_RISCV_CALL && r.Type() != objabi.R_RISCV_PCREL_ITYPE && + r.Type() != objabi.R_RISCV_PCREL_STYPE && r.Type() != objabi.R_RISCV_TLS_IE { + continue + } + if r.Off() == 0 && ldr.SymType(s) == sym.STEXT { + // Use the symbol for the function instead of creating + // an overlapping symbol. + continue + } + + // TODO(jsing): Consider generating ELF symbols without needing + // loader symbols, in order to reduce memory consumption. This + // would require changes to genelfsym so that it called + // putelfsym and putelfsyment as appropriate. + sb := ldr.MakeSymbolBuilder(fakeLabelName) + sb.SetType(sym.STEXT) + sb.SetValue(ldr.SymValue(s) + int64(r.Off())) + sb.SetLocal(true) + sb.SetReachable(true) + sb.SetVisibilityHidden(true) + sb.SetSect(ldr.SymSect(s)) + if outer := ldr.OuterSym(s); outer != 0 { + ldr.AddInteriorSym(outer, sb.Sym()) + } + hi20Syms = append(hi20Syms, sb.Sym()) + } + } + ctxt.Textp = append(ctxt.Textp, hi20Syms...) + ldr.SortSyms(ctxt.Textp) +} + +func findHI20Symbol(ctxt *ld.Link, ldr *loader.Loader, val int64) loader.Sym { + idx := sort.Search(len(ctxt.Textp), func(i int) bool { return ldr.SymValue(ctxt.Textp[i]) >= val }) + if idx >= len(ctxt.Textp) { + return 0 + } + if s := ctxt.Textp[idx]; ldr.SymValue(s) == val && ldr.SymType(s) == sym.STEXT { + return s + } + return 0 +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + switch r.Type { + case objabi.R_ADDR, objabi.R_DWARFSECREF: + out.Write64(uint64(sectoff)) + switch r.Size { + case 4: + out.Write64(uint64(elf.R_RISCV_32) | uint64(elfsym)<<32) + case 8: + out.Write64(uint64(elf.R_RISCV_64) | uint64(elfsym)<<32) + default: + ld.Errorf(nil, "unknown size %d for %v relocation", r.Size, r.Type) + return false + } + out.Write64(uint64(r.Xadd)) + + case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_RISCV_JAL) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE: + // Find the text symbol for the AUIPC instruction targeted + // by this relocation. + relocs := ldr.Relocs(s) + offset := int64(relocs.At(ri).Off()) + hi20Sym := findHI20Symbol(ctxt, ldr, ldr.SymValue(s)+offset) + if hi20Sym == 0 { + ld.Errorf(nil, "failed to find text symbol for HI20 relocation at %d (%x)", sectoff, ldr.SymValue(s)+offset) + return false + } + hi20ElfSym := ld.ElfSymForReloc(ctxt, hi20Sym) + + // Emit two relocations - a R_RISCV_PCREL_HI20 relocation and a + // corresponding R_RISCV_PCREL_LO12_I or R_RISCV_PCREL_LO12_S relocation. + // Note that the LO12 relocation must point to a target that has a valid + // HI20 PC-relative relocation text symbol, which in turn points to the + // given symbol. For further details see section 8.4.9 of the RISC-V ABIs + // Specification: + // + // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf + // + var hiRel, loRel elf.R_RISCV + switch r.Type { + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE: + hiRel, loRel = elf.R_RISCV_PCREL_HI20, elf.R_RISCV_PCREL_LO12_I + case objabi.R_RISCV_PCREL_STYPE: + hiRel, loRel = elf.R_RISCV_PCREL_HI20, elf.R_RISCV_PCREL_LO12_S + case objabi.R_RISCV_TLS_IE: + hiRel, loRel = elf.R_RISCV_TLS_GOT_HI20, elf.R_RISCV_PCREL_LO12_I + } + out.Write64(uint64(sectoff)) + out.Write64(uint64(hiRel) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(loRel) | uint64(hi20ElfSym)<<32) + out.Write64(uint64(0)) + + case objabi.R_RISCV_TLS_LE: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_RISCV_TPREL_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_RISCV_TPREL_LO12_I) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + + default: + return false + } + + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() != 0 { + return + } + if gotplt.Size() != 0 { + ctxt.Errorf(gotplt.Sym(), "got.plt is not empty") + } + + // See section 8.4.6 of the RISC-V ABIs Specification: + // + // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf + // + // 1: auipc t2, %pcrel_hi(.got.plt) + // sub t1, t1, t3 # shifted .got.plt offset + hdr size + 12 + // l[w|d] t3, %pcrel_lo(1b)(t2) # _dl_runtime_resolve + // addi t1, t1, -(hdr size + 12) # shifted .got.plt offset + // addi t0, t2, %pcrel_lo(1b) # &.got.plt + // srli t1, t1, log2(16/PTRSIZE) # .got.plt offset + // l[w|d] t0, PTRSIZE(t0) # link map + // jr t3 + + plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 0, objabi.R_RISCV_PCREL_HI20, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x00000397) // auipc t2,0x0 + + sb := ldr.MakeSymbolBuilder(fakeLabelName) + sb.SetType(sym.STEXT) + sb.SetValue(ldr.SymValue(plt.Sym()) + plt.Size() - 4) + sb.SetLocal(true) + sb.SetReachable(true) + sb.SetVisibilityHidden(true) + plt.AddInteriorSym(sb.Sym()) + + plt.AddUint32(ctxt.Arch, 0x41c30333) // sub t1,t1,t3 + + plt.AddSymRef(ctxt.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x0003be03) // ld t3,0(t2) + + plt.AddUint32(ctxt.Arch, 0xfd430313) // addi t1,t1,-44 + + plt.AddSymRef(ctxt.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4) + plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x00038293) // addi t0,t2,0 + + plt.AddUint32(ctxt.Arch, 0x00135313) // srli t1,t1,0x1 + plt.AddUint32(ctxt.Arch, 0x0082b283) // ld t0,8(t0) + plt.AddUint32(ctxt.Arch, 0x00008e02) // jr t3 + + gotplt.AddAddrPlus(ctxt.Arch, dynamic, 0) // got.plt[0] = _dl_runtime_resolve + gotplt.AddUint64(ctxt.Arch, 0) // got.plt[1] = link map +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + plt := ldr.MakeSymbolUpdater(syms.PLT) + gotplt := ldr.MakeSymbolUpdater(syms.GOTPLT) + rela := ldr.MakeSymbolUpdater(syms.RelaPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // See section 8.4.6 of the RISC-V ABIs Specification: + // + // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf + // + // 1: auipc t3, %pcrel_hi(function@.got.plt) + // l[w|d] t3, %pcrel_lo(1b)(t3) + // jalr t1, t3 + // nop + + plt.AddSymRef(target.Arch, gotplt.Sym(), gotplt.Size(), objabi.R_RISCV_PCREL_HI20, 4) + plt.SetUint32(target.Arch, plt.Size()-4, 0x00000e17) // auipc t3,0x0 + + sb := ldr.MakeSymbolBuilder(fakeLabelName) + sb.SetType(sym.STEXT) + sb.SetValue(ldr.SymValue(plt.Sym()) + plt.Size() - 4) + sb.SetLocal(true) + sb.SetReachable(true) + sb.SetVisibilityHidden(true) + plt.AddInteriorSym(sb.Sym()) + + plt.AddSymRef(target.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4) + plt.SetUint32(target.Arch, plt.Size()-4, 0x000e3e03) // ld t3,0(t3) + plt.AddUint32(target.Arch, 0x000e0367) // jalr t1,t3 + plt.AddUint32(target.Arch, 0x00000001) // nop + + ldr.SetPlt(s, int32(plt.Size()-16)) + + // add to got.plt: pointer to plt[0] + gotplt.AddAddrPlus(target.Arch, plt.Sym(), 0) + + // rela + rela.AddAddrPlus(target.Arch, gotplt.Sym(), gotplt.Size()-8) + sDynid := ldr.SymDynid(s) + + rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_RISCV_JUMP_SLOT))) + rela.AddUint64(target.Arch, 0) +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + log.Fatalf("machoreloc1 not implemented") + return false +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + rs := r.Sym() + pc := ldr.SymValue(s) + int64(r.Off()) + + // If the call points to a trampoline, see if we can reach the symbol + // directly. This situation can occur when the relocation symbol is + // not assigned an address until after the trampolines are generated. + if r.Type() == objabi.R_RISCV_JAL_TRAMP { + relocs := ldr.Relocs(rs) + if relocs.Count() != 1 { + ldr.Errorf(s, "trampoline %v has %d relocations", ldr.SymName(rs), relocs.Count()) + } + tr := relocs.At(0) + if tr.Type() != objabi.R_RISCV_CALL { + ldr.Errorf(s, "trampoline %v has unexpected relocation %v", ldr.SymName(rs), tr.Type()) + } + trs := tr.Sym() + if ldr.SymValue(trs) != 0 && ldr.SymType(trs) != sym.SDYNIMPORT && ldr.SymType(trs) != sym.SUNDEFEXT { + trsOff := ldr.SymValue(trs) + tr.Add() - pc + if trsOff >= -(1<<20) && trsOff < (1<<20) { + r.SetType(objabi.R_RISCV_JAL) + r.SetSym(trs) + r.SetAdd(tr.Add()) + rs = trs + } + } + + } + + if target.IsExternal() { + switch r.Type() { + case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP: + return val, 1, true + + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE, objabi.R_RISCV_TLS_LE: + return val, 2, true + } + + return val, 0, false + } + + off := ldr.SymValue(rs) + r.Add() - pc + + switch r.Type() { + case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP: + // Generate instruction immediates. + imm, err := riscv.EncodeJImmediate(off) + if err != nil { + ldr.Errorf(s, "cannot encode J-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + immMask := int64(riscv.JTypeImmMask) + + val = (val &^ immMask) | int64(imm) + + return val, 0, true + + case objabi.R_RISCV_TLS_IE: + log.Fatalf("cannot handle R_RISCV_TLS_IE (sym %s) when linking internally", ldr.SymName(s)) + return val, 0, false + + case objabi.R_RISCV_TLS_LE: + // Generate LUI and ADDIW instruction immediates. + off := r.Add() + + low, high, err := riscv.Split32BitImmediate(off) + if err != nil { + ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off) + } + + luiImm, err := riscv.EncodeUImmediate(high) + if err != nil { + ldr.Errorf(s, "cannot encode R_RISCV_TLS_LE LUI relocation offset for %s: %v", ldr.SymName(rs), err) + } + + addiwImm, err := riscv.EncodeIImmediate(low) + if err != nil { + ldr.Errorf(s, "cannot encode R_RISCV_TLS_LE I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + + lui := int64(uint32(val)) + addiw := int64(uint32(val >> 32)) + + lui = (lui &^ riscv.UTypeImmMask) | int64(uint32(luiImm)) + addiw = (addiw &^ riscv.ITypeImmMask) | int64(uint32(addiwImm)) + + return addiw<<32 | lui, 0, true + + case objabi.R_RISCV_BRANCH: + pc := ldr.SymValue(s) + int64(r.Off()) + off := ldr.SymValue(rs) + r.Add() - pc + + imm, err := riscv.EncodeBImmediate(off) + if err != nil { + ldr.Errorf(s, "cannot encode B-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + ins := (int64(uint32(val)) &^ riscv.BTypeImmMask) | int64(uint32(imm)) + + return ins, 0, true + + case objabi.R_RISCV_RVC_BRANCH, objabi.R_RISCV_RVC_JUMP: + pc := ldr.SymValue(s) + int64(r.Off()) + off := ldr.SymValue(rs) + r.Add() - pc + + var err error + var imm, immMask int64 + switch r.Type() { + case objabi.R_RISCV_RVC_BRANCH: + immMask = riscv.CBTypeImmMask + imm, err = riscv.EncodeCBImmediate(off) + if err != nil { + ldr.Errorf(s, "cannot encode CB-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + case objabi.R_RISCV_RVC_JUMP: + immMask = riscv.CJTypeImmMask + imm, err = riscv.EncodeCJImmediate(off) + if err != nil { + ldr.Errorf(s, "cannot encode CJ-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + default: + panic(fmt.Sprintf("unknown relocation type: %v", r.Type())) + } + + ins := (int64(uint16(val)) &^ immMask) | int64(uint16(imm)) + + return ins, 0, true + + case objabi.R_RISCV_GOT_HI20, objabi.R_RISCV_PCREL_HI20: + pc := ldr.SymValue(s) + int64(r.Off()) + off := ldr.SymValue(rs) + r.Add() - pc + + // Generate AUIPC immediates. + _, high, err := riscv.Split32BitImmediate(off) + if err != nil { + ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off) + } + + auipcImm, err := riscv.EncodeUImmediate(high) + if err != nil { + ldr.Errorf(s, "cannot encode R_RISCV_PCREL_ AUIPC relocation offset for %s: %v", ldr.SymName(rs), err) + } + + auipc := int64(uint32(val)) + auipc = (auipc &^ riscv.UTypeImmMask) | int64(uint32(auipcImm)) + + return auipc, 0, true + + case objabi.R_RISCV_PCREL_LO12_I, objabi.R_RISCV_PCREL_LO12_S: + hi20Reloc := findHI20Reloc(ldr, rs, ldr.SymValue(rs)) + if hi20Reloc == nil { + ldr.Errorf(s, "missing HI20 relocation for LO12 relocation with %s (%d)", ldr.SymName(rs), rs) + } + + pc := ldr.SymValue(s) + int64(hi20Reloc.Off()) + off := ldr.SymValue(hi20Reloc.Sym()) + hi20Reloc.Add() - pc + + low, _, err := riscv.Split32BitImmediate(off) + if err != nil { + ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off) + } + + var imm, immMask int64 + switch r.Type() { + case objabi.R_RISCV_PCREL_LO12_I: + immMask = riscv.ITypeImmMask + imm, err = riscv.EncodeIImmediate(low) + if err != nil { + ldr.Errorf(s, "cannot encode objabi.R_RISCV_PCREL_LO12_I I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + case objabi.R_RISCV_PCREL_LO12_S: + immMask = riscv.STypeImmMask + imm, err = riscv.EncodeSImmediate(low) + if err != nil { + ldr.Errorf(s, "cannot encode R_RISCV_PCREL_LO12_S S-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + default: + panic(fmt.Sprintf("unknown relocation type: %v", r.Type())) + } + + ins := int64(uint32(val)) + ins = (ins &^ immMask) | int64(uint32(imm)) + return ins, 0, true + + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE: + // Generate AUIPC and second instruction immediates. + low, high, err := riscv.Split32BitImmediate(off) + if err != nil { + ldr.Errorf(s, "pc-relative relocation does not fit in 32 bits: %d", off) + } + + auipcImm, err := riscv.EncodeUImmediate(high) + if err != nil { + ldr.Errorf(s, "cannot encode AUIPC relocation offset for %s: %v", ldr.SymName(rs), err) + } + + var secondImm, secondImmMask int64 + switch r.Type() { + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE: + secondImmMask = riscv.ITypeImmMask + secondImm, err = riscv.EncodeIImmediate(low) + if err != nil { + ldr.Errorf(s, "cannot encode I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + case objabi.R_RISCV_PCREL_STYPE: + secondImmMask = riscv.STypeImmMask + secondImm, err = riscv.EncodeSImmediate(low) + if err != nil { + ldr.Errorf(s, "cannot encode S-type instruction relocation offset for %s: %v", ldr.SymName(rs), err) + } + default: + panic(fmt.Sprintf("unknown relocation type: %v", r.Type())) + } + + auipc := int64(uint32(val)) + second := int64(uint32(val >> 32)) + + auipc = (auipc &^ riscv.UTypeImmMask) | int64(uint32(auipcImm)) + second = (second &^ secondImmMask) | int64(uint32(secondImm)) + + return second<<32 | auipc, 0, true + } + + return val, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + log.Fatalf("archrelocvariant") + return -1 +} + +func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP: + return ld.ExtrelocSimple(ldr, r), true + + case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE, objabi.R_RISCV_TLS_LE: + return ld.ExtrelocViaOuterSym(ldr, r, s), true + } + return loader.ExtReloc{}, false +} + +func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { + relocs := ldr.Relocs(s) + r := relocs.At(ri) + + switch r.Type() { + case objabi.R_RISCV_JAL: + pc := ldr.SymValue(s) + int64(r.Off()) + off := ldr.SymValue(rs) + r.Add() - pc + + // Relocation symbol has an address and is directly reachable, + // therefore there is no need for a trampoline. + if ldr.SymValue(rs) != 0 && off >= -(1<<20) && off < (1<<20) && (*ld.FlagDebugTramp <= 1 || ldr.SymPkg(s) == ldr.SymPkg(rs)) { + break + } + + // Relocation symbol is too far for a direct call or has not + // yet been given an address. See if an existing trampoline is + // reachable and if so, reuse it. Otherwise we need to create + // a new trampoline. + var tramp loader.Sym + for i := 0; ; i++ { + oName := ldr.SymName(rs) + name := fmt.Sprintf("%s-tramp%d", oName, i) + if r.Add() != 0 { + name = fmt.Sprintf("%s%+x-tramp%d", oName, r.Add(), i) + } + tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + ldr.SetAttrReachable(tramp, true) + if ldr.SymType(tramp) == sym.SDYNIMPORT { + // Do not reuse trampoline defined in other module. + continue + } + if oName == "runtime.deferreturn" { + ldr.SetIsDeferReturnTramp(tramp, true) + } + if ldr.SymValue(tramp) == 0 { + // Either trampoline does not exist or we found one + // that does not have an address assigned and will be + // laid down immediately after the current function. + break + } + + trampOff := ldr.SymValue(tramp) - (ldr.SymValue(s) + int64(r.Off())) + if trampOff >= -(1<<20) && trampOff < (1<<20) { + // An existing trampoline that is reachable. + break + } + } + if ldr.SymType(tramp) == 0 { + trampb := ldr.MakeSymbolUpdater(tramp) + ctxt.AddTramp(trampb) + genCallTramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, int64(r.Add())) + } + sb := ldr.MakeSymbolUpdater(s) + if ldr.SymValue(rs) == 0 { + // In this case the target symbol has not yet been assigned an + // address, so we have to assume a trampoline is required. Mark + // this as a call via a trampoline so that we can potentially + // switch to a direct call during relocation. + sb.SetRelocType(ri, objabi.R_RISCV_JAL_TRAMP) + } + relocs := sb.Relocs() + r := relocs.At(ri) + r.SetSym(tramp) + r.SetAdd(0) + + case objabi.R_RISCV_CALL: + // Nothing to do, already using AUIPC+JALR. + + default: + ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + } +} + +func genCallTramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.AddUint32(arch, 0x00000f97) // AUIPC $0, X31 + tramp.AddUint32(arch, 0x000f8067) // JALR X0, (X31) + + r, _ := tramp.AddRel(objabi.R_RISCV_CALL) + r.SetSiz(8) + r.SetSym(target) + r.SetAdd(offset) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/l.go new file mode 100644 index 0000000000000000000000000000000000000000..a30265772630d2a554b76cb21d4621f0d84fdb07 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/l.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 + funcAlign = 8 + + dwarfRegLR = 1 + dwarfRegSP = 2 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..940a8d611c8c17bb51c9ba00cbfcb2d4bb9d707e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/riscv64/obj.go @@ -0,0 +1,72 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchRISCV64 + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + + // TrampLimit is set such that we always run the trampoline + // generation code. This is necessary since calls to external + // symbols require the use of trampolines, regardless of the + // text size. + TrampLimit: 1, + Trampoline: trampoline, + + Gentext: gentext, + GenSymsLate: genSymsLate, + Machoreloc1: machoreloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib/ld.so.1", + + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Netbsddynld: "XXX", + Openbsddynld: "/usr/libexec/ld.so", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + case objabi.Hlinux, objabi.Hfreebsd, objabi.Hopenbsd: + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..dee03484102bb120ca1e381228c7a32ad5f5f348 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/asm.go @@ -0,0 +1,449 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" +) + +// gentext generates assembly to append the local moduledata to the global +// moduledata linked list at initialization time. This is only done if the runtime +// is in a different module. +// +// : +// larl %r2, +// jg +// undef +// +// The job of appending the moduledata is delegated to runtime.addmoduledata. +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + // larl %r2, + initfunc.AddUint8(0xc0) + initfunc.AddUint8(0x20) + initfunc.AddSymRef(ctxt.Arch, ctxt.Moduledata, 6, objabi.R_PCREL, 4) + r1 := initfunc.Relocs() + ldr.SetRelocVariant(initfunc.Sym(), r1.Count()-1, sym.RV_390_DBL) + + // jg + initfunc.AddUint8(0xc0) + initfunc.AddUint8(0xf4) + initfunc.AddSymRef(ctxt.Arch, addmoduledata, 6, objabi.R_CALL, 4) + r2 := initfunc.Relocs() + ldr.SetRelocVariant(initfunc.Sym(), r2.Count()-1, sym.RV_390_DBL) + + // undef (for debugging) + initfunc.AddUint32(ctxt.Arch, 0) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d", r.Type()) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_12), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOT12): + ldr.Errorf(s, "s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type()-objabi.ElfRelocOffset) + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_8), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_16), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_32), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_390_nn relocation for dynamic symbol %s", ldr.SymName(targ)) + } + + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PC16), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PC32), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PC64): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_390_PCnn relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOT16), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOT32), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOT64): + ldr.Errorf(s, "unimplemented S390x relocation: %v", r.Type()-objabi.ElfRelocOffset) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PLT16DBL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PLT32DBL): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_390_DBL) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + r.SetSym(syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PLT32), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PLT64): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + r.SetSym(syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_COPY): + ldr.Errorf(s, "unimplemented S390x relocation: %v", r.Type()-objabi.ElfRelocOffset) + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GLOB_DAT): + ldr.Errorf(s, "unimplemented S390x relocation: %v", r.Type()-objabi.ElfRelocOffset) + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_JMP_SLOT): + ldr.Errorf(s, "unimplemented S390x relocation: %v", r.Type()-objabi.ElfRelocOffset) + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_RELATIVE): + ldr.Errorf(s, "unimplemented S390x relocation: %v", r.Type()-objabi.ElfRelocOffset) + return false + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOTOFF): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_390_GOTOFF relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_GOTOFF) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOTPC): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + r.SetSym(syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PC16DBL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_PC32DBL): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_390_DBL) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_390_PCnnDBL relocation for dynamic symbol %s", ldr.SymName(targ)) + } + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOTPCDBL): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_390_DBL) + r.SetSym(syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_390_GOTENT): + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_390_GLOB_DAT)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + ldr.SetRelocVariant(s, rIdx, sym.RV_390_DBL) + r.SetSym(syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))+int64(r.Siz())) + return true + } + // Handle references to ELF symbols from our own object files. + return targType != sym.SDYNIMPORT +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write64(uint64(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + siz := r.Size + switch r.Type { + default: + return false + case objabi.R_TLS_LE: + switch siz { + default: + return false + case 4: + // WARNING - silently ignored by linker in ELF64 + out.Write64(uint64(elf.R_390_TLS_LE32) | uint64(elfsym)<<32) + case 8: + // WARNING - silently ignored by linker in ELF32 + out.Write64(uint64(elf.R_390_TLS_LE64) | uint64(elfsym)<<32) + } + case objabi.R_TLS_IE: + switch siz { + default: + return false + case 4: + out.Write64(uint64(elf.R_390_TLS_IEENT) | uint64(elfsym)<<32) + } + case objabi.R_ADDR, objabi.R_DWARFSECREF: + switch siz { + default: + return false + case 4: + out.Write64(uint64(elf.R_390_32) | uint64(elfsym)<<32) + case 8: + out.Write64(uint64(elf.R_390_64) | uint64(elfsym)<<32) + } + case objabi.R_GOTPCREL: + if siz == 4 { + out.Write64(uint64(elf.R_390_GOTENT) | uint64(elfsym)<<32) + } else { + return false + } + case objabi.R_PCREL, objabi.R_PCRELDBL, objabi.R_CALL: + elfrel := elf.R_390_NONE + rVariant := ldr.RelocVariant(s, ri) + isdbl := rVariant&sym.RV_TYPE_MASK == sym.RV_390_DBL + // TODO(mundaym): all DBL style relocations should be + // signalled using the variant - see issue 14218. + switch r.Type { + case objabi.R_PCRELDBL, objabi.R_CALL: + isdbl = true + } + if ldr.SymType(r.Xsym) == sym.SDYNIMPORT && (ldr.SymElfType(r.Xsym) == elf.STT_FUNC || r.Type == objabi.R_CALL) { + if isdbl { + switch siz { + case 2: + elfrel = elf.R_390_PLT16DBL + case 4: + elfrel = elf.R_390_PLT32DBL + } + } else { + switch siz { + case 4: + elfrel = elf.R_390_PLT32 + case 8: + elfrel = elf.R_390_PLT64 + } + } + } else { + if isdbl { + switch siz { + case 2: + elfrel = elf.R_390_PC16DBL + case 4: + elfrel = elf.R_390_PC32DBL + } + } else { + switch siz { + case 2: + elfrel = elf.R_390_PC16 + case 4: + elfrel = elf.R_390_PC32 + case 8: + elfrel = elf.R_390_PC64 + } + } + } + if elfrel == elf.R_390_NONE { + return false // unsupported size/dbl combination + } + out.Write64(uint64(elfrel) | uint64(elfsym)<<32) + } + + out.Write64(uint64(r.Xadd)) + return true +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // stg %r1,56(%r15) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0xf0) + plt.AddUint8(0x38) + plt.AddUint8(0x00) + plt.AddUint8(0x24) + // larl %r1,_GLOBAL_OFFSET_TABLE_ + plt.AddUint8(0xc0) + plt.AddUint8(0x10) + plt.AddSymRef(ctxt.Arch, got.Sym(), 6, objabi.R_PCRELDBL, 4) + // mvc 48(8,%r15),8(%r1) + plt.AddUint8(0xd2) + plt.AddUint8(0x07) + plt.AddUint8(0xf0) + plt.AddUint8(0x30) + plt.AddUint8(0x10) + plt.AddUint8(0x08) + // lg %r1,16(%r1) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x00) + plt.AddUint8(0x04) + // br %r1 + plt.AddUint8(0x07) + plt.AddUint8(0xf1) + // nopr %r0 + plt.AddUint8(0x07) + plt.AddUint8(0x00) + // nopr %r0 + plt.AddUint8(0x07) + plt.AddUint8(0x00) + // nopr %r0 + plt.AddUint8(0x07) + plt.AddUint8(0x00) + + // assume got->size == 0 too + got.AddAddrPlus(ctxt.Arch, dynamic, 0) + + got.AddUint64(ctxt.Arch, 0) + got.AddUint64(ctxt.Arch, 0) + } +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { + return val, 0, false +} + +func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64, p []byte) int64 { + switch rv & sym.RV_TYPE_MASK { + default: + ldr.Errorf(s, "unexpected relocation variant %d", rv) + return t + + case sym.RV_NONE: + return t + + case sym.RV_390_DBL: + if t&1 != 0 { + ldr.Errorf(s, "%s+%v is not 2-byte aligned", ldr.SymName(r.Sym()), ldr.SymValue(r.Sym())) + } + return t >> 1 + } +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + if target.IsElf() { + plt := ldr.MakeSymbolUpdater(syms.PLT) + got := ldr.MakeSymbolUpdater(syms.GOT) + rela := ldr.MakeSymbolUpdater(syms.RelaPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + // larl %r1,_GLOBAL_OFFSET_TABLE_+index + + plt.AddUint8(0xc0) + plt.AddUint8(0x10) + plt.AddPCRelPlus(target.Arch, got.Sym(), got.Size()+6) + pltrelocs := plt.Relocs() + ldr.SetRelocVariant(plt.Sym(), pltrelocs.Count()-1, sym.RV_390_DBL) + + // add to got: pointer to current pos in plt + got.AddAddrPlus(target.Arch, plt.Sym(), plt.Size()+8) // weird but correct + // lg %r1,0(%r1) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x00) + plt.AddUint8(0x00) + plt.AddUint8(0x04) + // br %r1 + plt.AddUint8(0x07) + plt.AddUint8(0xf1) + // basr %r1,%r0 + plt.AddUint8(0x0d) + plt.AddUint8(0x10) + // lgf %r1,12(%r1) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x0c) + plt.AddUint8(0x00) + plt.AddUint8(0x14) + // jg .plt + plt.AddUint8(0xc0) + plt.AddUint8(0xf4) + + plt.AddUint32(target.Arch, uint32(-((plt.Size() - 2) >> 1))) // roll-your-own relocation + //.plt index + plt.AddUint32(target.Arch, uint32(rela.Size())) // rela size before current entry + + // rela + rela.AddAddrPlus(target.Arch, got.Sym(), got.Size()-8) + + sDynid := ldr.SymDynid(s) + rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_390_JMP_SLOT))) + rela.AddUint64(target.Arch, 0) + + ldr.SetPlt(s, int32(plt.Size()-32)) + + } else { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/l.go new file mode 100644 index 0000000000000000000000000000000000000000..f040587547873e8ff1d34a536ee56c406d098cc3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/l.go @@ -0,0 +1,74 @@ +// Inferno utils/5l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +// Writing object files. + +// cmd/9l/l.h from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +const ( + maxAlign = 32 // max data alignment + minAlign = 2 // min data alignment + funcAlign = 16 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 15 + dwarfRegLR = 14 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..76aa962a82b954a5e5a7cb751524ba96eeae82da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/s390x/obj.go @@ -0,0 +1,91 @@ +// Inferno utils/5l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchS390X + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib64/ld64.so.1", + LinuxdynldMusl: "/lib/ld-musl-s390x.so.1", + + // not relevant for s390x + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + + Reloc1: elfreloc1, + RelocSize: 24, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hlinux: // s390x ELF + ld.Elfinit(ctxt) + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 0x10000 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/compilation_unit.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/compilation_unit.go new file mode 100644 index 0000000000000000000000000000000000000000..3d6cc3cf93418dd76b2fc1ef2f73cc07fd083920 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/compilation_unit.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import "cmd/internal/dwarf" + +// LoaderSym holds a loader.Sym value. We can't refer to this +// type from the sym package since loader imports sym. +type LoaderSym uint32 + +// A CompilationUnit represents a set of source files that are compiled +// together. Since all Go sources in a Go package are compiled together, +// there's one CompilationUnit per package that represents all Go sources in +// that package, plus one for each assembly file. +// +// Equivalently, there's one CompilationUnit per object file in each Library +// loaded by the linker. +// +// These are used for both DWARF and pclntab generation. +type CompilationUnit struct { + Lib *Library // Our library + PclnIndex int // Index of this CU in pclntab + PCs []dwarf.Range // PC ranges, relative to Textp[0] + DWInfo *dwarf.DWDie // CU root DIE + FileTable []string // The file table used in this compilation unit. + + Consts LoaderSym // Package constants DIEs + FuncDIEs []LoaderSym // Function DIE subtrees + VarDIEs []LoaderSym // Global variable DIEs + AbsFnDIEs []LoaderSym // Abstract function DIE subtrees + RangeSyms []LoaderSym // Symbols for debug_range + Textp []LoaderSym // Text symbols in this CU +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/library.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/library.go new file mode 100644 index 0000000000000000000000000000000000000000..876b5ff153e1ca67110cb6e773509cdb60c2ab6c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/library.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import "cmd/internal/goobj" + +type Library struct { + Objref string + Srcref string + File string + Pkg string + Shlib string + Fingerprint goobj.FingerprintType + Autolib []goobj.ImportedPkg + Imports []*Library + Main bool + Units []*CompilationUnit + + Textp []LoaderSym // text syms defined in this library + DupTextSyms []LoaderSym // dupok text syms defined in this library +} + +func (l Library) String() string { + return l.Pkg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/reloc.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/reloc.go new file mode 100644 index 0000000000000000000000000000000000000000..e614caa5d89c7d7b72d4679d7b2d203fab007df8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/reloc.go @@ -0,0 +1,72 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "debug/elf" + "debug/macho" +) + +// RelocVariant is a linker-internal variation on a relocation. +type RelocVariant uint8 + +const ( + RV_NONE RelocVariant = iota + RV_POWER_LO + RV_POWER_HI + RV_POWER_HA + RV_POWER_DS + + // RV_390_DBL is a s390x-specific relocation variant that indicates that + // the value to be placed into the relocatable field should first be + // divided by 2. + RV_390_DBL + + RV_CHECK_OVERFLOW RelocVariant = 1 << 7 + RV_TYPE_MASK RelocVariant = RV_CHECK_OVERFLOW - 1 +) + +func RelocName(arch *sys.Arch, r objabi.RelocType) string { + switch { + case r >= objabi.MachoRelocOffset: // Mach-O + nr := (r - objabi.MachoRelocOffset) >> 1 + switch arch.Family { + case sys.AMD64: + return macho.RelocTypeX86_64(nr).String() + case sys.ARM64: + return macho.RelocTypeARM64(nr).String() + default: + panic("unreachable") + } + case r >= objabi.ElfRelocOffset: // ELF + nr := r - objabi.ElfRelocOffset + switch arch.Family { + case sys.AMD64: + return elf.R_X86_64(nr).String() + case sys.ARM: + return elf.R_ARM(nr).String() + case sys.ARM64: + return elf.R_AARCH64(nr).String() + case sys.I386: + return elf.R_386(nr).String() + case sys.Loong64: + return elf.R_LARCH(nr).String() + case sys.MIPS, sys.MIPS64: + return elf.R_MIPS(nr).String() + case sys.PPC64: + return elf.R_PPC64(nr).String() + case sys.S390X: + return elf.R_390(nr).String() + case sys.RISCV64: + return elf.R_RISCV(nr).String() + default: + panic("unreachable") + } + } + + return r.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/segment.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/segment.go new file mode 100644 index 0000000000000000000000000000000000000000..c889e71ad64b8baf7cb6a581a7a98ac3f79ca1cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/segment.go @@ -0,0 +1,68 @@ +// Inferno utils/8l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sym + +// Terrible but standard terminology. +// A segment describes a block of file to load into memory. +// A section further describes the pieces of that block for +// use in debuggers and such. + +type Segment struct { + Rwx uint8 // permission as usual unix bits (5 = r-x etc) + Vaddr uint64 // virtual address + Length uint64 // length in memory + Fileoff uint64 // file offset + Filelen uint64 // length on disk + Sections []*Section +} + +type Section struct { + Rwx uint8 + Extnum int16 + Align int32 + Name string + Vaddr uint64 + Length uint64 + Seg *Segment + Elfsect interface{} // an *ld.ElfShdr + Reloff uint64 + Rellen uint64 + // Relcount is the number of *host* relocations applied to this section + // (when external linking). + // Incremented atomically on multiple goroutines. + // Note: this may differ from number of Go relocations, as one Go relocation + // may turn into multiple host relocations. + Relcount uint32 + Sym LoaderSym // symbol for the section, if any + Index uint16 // each section has a unique index, used internally + + Compressed bool +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symbol.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symbol.go new file mode 100644 index 0000000000000000000000000000000000000000..2f2c839006f11d4fbd7b328af63f2d2a8d8def82 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symbol.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import ( + "cmd/internal/obj" + "internal/buildcfg" +) + +const ( + SymVerABI0 = 0 + SymVerABIInternal = 1 + SymVerABICount = 2 // Number of internal ABIs + SymVerStatic = 10 // Minimum version used by static (file-local) syms +) + +func ABIToVersion(abi obj.ABI) int { + switch abi { + case obj.ABI0: + return SymVerABI0 + case obj.ABIInternal: + if !buildcfg.Experiment.RegabiWrappers { + // If wrappers are not enabled, ABI0 and ABIInternal are actually same + // so we normalize everything to ABI0. + return SymVerABI0 + } + return SymVerABIInternal + } + return -1 +} + +func VersionToABI(v int) (obj.ABI, bool) { + switch v { + case SymVerABI0: + return obj.ABI0, true + case SymVerABIInternal: + return obj.ABIInternal, true + } + return ^obj.ABI(0), false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind.go new file mode 100644 index 0000000000000000000000000000000000000000..08cafb206b93097dfa6b174423d9f989ef546aa7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind.go @@ -0,0 +1,190 @@ +// Derived from Inferno utils/6l/l.h and related files. +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sym + +import "cmd/internal/objabi" + +// A SymKind describes the kind of memory represented by a symbol. +type SymKind uint8 + +// Defined SymKind values. +// +// TODO(rsc): Give idiomatic Go names. +// +//go:generate stringer -type=SymKind +const ( + Sxxx SymKind = iota + STEXT + SELFRXSECT + SMACHOPLT + + // Read-only sections. + STYPE + SSTRING + SGOSTRING + SGOFUNC + SGCBITS + SRODATA + SFUNCTAB + + SELFROSECT + + // Read-only sections with relocations. + // + // Types STYPE-SFUNCTAB above are written to the .rodata section by default. + // When linking a shared object, some conceptually "read only" types need to + // be written to by relocations and putting them in a section called + // ".rodata" interacts poorly with the system linkers. The GNU linkers + // support this situation by arranging for sections of the name + // ".data.rel.ro.XXX" to be mprotected read only by the dynamic linker after + // relocations have applied, so when the Go linker is creating a shared + // object it checks all objects of the above types and bumps any object that + // has a relocation to it to the corresponding type below, which are then + // written to sections with appropriate magic names. + STYPERELRO + SSTRINGRELRO + SGOSTRINGRELRO + SGOFUNCRELRO + SGCBITSRELRO + SRODATARELRO + SFUNCTABRELRO + + // Part of .data.rel.ro if it exists, otherwise part of .rodata. + STYPELINK + SITABLINK + SSYMTAB + SPCLNTAB + + // Writable sections. + SFirstWritable + SBUILDINFO + SELFSECT + SMACHO + SMACHOGOT + SWINDOWS + SELFGOT + SNOPTRDATA + SINITARR + SDATA + SXCOFFTOC + SBSS + SNOPTRBSS + SLIBFUZZER_8BIT_COUNTER + SCOVERAGE_COUNTER + SCOVERAGE_AUXVAR + STLSBSS + SXREF + SMACHOSYMSTR + SMACHOSYMTAB + SMACHOINDIRECTPLT + SMACHOINDIRECTGOT + SFILEPATH + SDYNIMPORT + SHOSTOBJ + SUNDEFEXT // Undefined symbol for resolution by external linker + + // Sections for debugging information + SDWARFSECT + // DWARF symbol types + SDWARFCUINFO + SDWARFCONST + SDWARFFCN + SDWARFABSFCN + SDWARFTYPE + SDWARFVAR + SDWARFRANGE + SDWARFLOC + SDWARFLINES + + // SEH symbol types + SSEHUNWINDINFO + SSEHSECT +) + +// AbiSymKindToSymKind maps values read from object files (which are +// of type cmd/internal/objabi.SymKind) to values of type SymKind. +var AbiSymKindToSymKind = [...]SymKind{ + objabi.Sxxx: Sxxx, + objabi.STEXT: STEXT, + objabi.SRODATA: SRODATA, + objabi.SNOPTRDATA: SNOPTRDATA, + objabi.SDATA: SDATA, + objabi.SBSS: SBSS, + objabi.SNOPTRBSS: SNOPTRBSS, + objabi.STLSBSS: STLSBSS, + objabi.SDWARFCUINFO: SDWARFCUINFO, + objabi.SDWARFCONST: SDWARFCONST, + objabi.SDWARFFCN: SDWARFFCN, + objabi.SDWARFABSFCN: SDWARFABSFCN, + objabi.SDWARFTYPE: SDWARFTYPE, + objabi.SDWARFVAR: SDWARFVAR, + objabi.SDWARFRANGE: SDWARFRANGE, + objabi.SDWARFLOC: SDWARFLOC, + objabi.SDWARFLINES: SDWARFLINES, + objabi.SLIBFUZZER_8BIT_COUNTER: SLIBFUZZER_8BIT_COUNTER, + objabi.SCOVERAGE_COUNTER: SCOVERAGE_COUNTER, + objabi.SCOVERAGE_AUXVAR: SCOVERAGE_AUXVAR, + objabi.SSEHUNWINDINFO: SSEHUNWINDINFO, +} + +// ReadOnly are the symbol kinds that form read-only sections. In some +// cases, if they will require relocations, they are transformed into +// rel-ro sections using relROMap. +var ReadOnly = []SymKind{ + STYPE, + SSTRING, + SGOSTRING, + SGOFUNC, + SGCBITS, + SRODATA, + SFUNCTAB, +} + +// RelROMap describes the transformation of read-only symbols to rel-ro +// symbols. +var RelROMap = map[SymKind]SymKind{ + STYPE: STYPERELRO, + SSTRING: SSTRINGRELRO, + SGOSTRING: SGOSTRINGRELRO, + SGOFUNC: SGOFUNCRELRO, + SGCBITS: SGCBITSRELRO, + SRODATA: SRODATARELRO, + SFUNCTAB: SFUNCTABRELRO, +} + +// IsData returns true if the type is a data type. +func (t SymKind) IsData() bool { + return t == SDATA || t == SNOPTRDATA || t == SBSS || t == SNOPTRBSS +} + +func (t SymKind) IsDWARF() bool { + return t >= SDWARFSECT && t <= SDWARFLINES +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind_string.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind_string.go new file mode 100644 index 0000000000000000000000000000000000000000..62b4fd92e505816c82e9e72297ca2a399e920b3a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/sym/symkind_string.go @@ -0,0 +1,83 @@ +// Code generated by "stringer -type=SymKind"; DO NOT EDIT. + +package sym + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Sxxx-0] + _ = x[STEXT-1] + _ = x[SELFRXSECT-2] + _ = x[SMACHOPLT-3] + _ = x[STYPE-4] + _ = x[SSTRING-5] + _ = x[SGOSTRING-6] + _ = x[SGOFUNC-7] + _ = x[SGCBITS-8] + _ = x[SRODATA-9] + _ = x[SFUNCTAB-10] + _ = x[SELFROSECT-11] + _ = x[STYPERELRO-12] + _ = x[SSTRINGRELRO-13] + _ = x[SGOSTRINGRELRO-14] + _ = x[SGOFUNCRELRO-15] + _ = x[SGCBITSRELRO-16] + _ = x[SRODATARELRO-17] + _ = x[SFUNCTABRELRO-18] + _ = x[STYPELINK-19] + _ = x[SITABLINK-20] + _ = x[SSYMTAB-21] + _ = x[SPCLNTAB-22] + _ = x[SFirstWritable-23] + _ = x[SBUILDINFO-24] + _ = x[SELFSECT-25] + _ = x[SMACHO-26] + _ = x[SMACHOGOT-27] + _ = x[SWINDOWS-28] + _ = x[SELFGOT-29] + _ = x[SNOPTRDATA-30] + _ = x[SINITARR-31] + _ = x[SDATA-32] + _ = x[SXCOFFTOC-33] + _ = x[SBSS-34] + _ = x[SNOPTRBSS-35] + _ = x[SLIBFUZZER_8BIT_COUNTER-36] + _ = x[SCOVERAGE_COUNTER-37] + _ = x[SCOVERAGE_AUXVAR-38] + _ = x[STLSBSS-39] + _ = x[SXREF-40] + _ = x[SMACHOSYMSTR-41] + _ = x[SMACHOSYMTAB-42] + _ = x[SMACHOINDIRECTPLT-43] + _ = x[SMACHOINDIRECTGOT-44] + _ = x[SFILEPATH-45] + _ = x[SDYNIMPORT-46] + _ = x[SHOSTOBJ-47] + _ = x[SUNDEFEXT-48] + _ = x[SDWARFSECT-49] + _ = x[SDWARFCUINFO-50] + _ = x[SDWARFCONST-51] + _ = x[SDWARFFCN-52] + _ = x[SDWARFABSFCN-53] + _ = x[SDWARFTYPE-54] + _ = x[SDWARFVAR-55] + _ = x[SDWARFRANGE-56] + _ = x[SDWARFLOC-57] + _ = x[SDWARFLINES-58] + _ = x[SSEHUNWINDINFO-59] + _ = x[SSEHSECT-60] +} + +const _SymKind_name = "SxxxSTEXTSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSFirstWritableSBUILDINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_8BIT_COUNTERSCOVERAGE_COUNTERSCOVERAGE_AUXVARSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSSEHUNWINDINFOSSEHSECT" + +var _SymKind_index = [...]uint16{0, 4, 9, 19, 28, 33, 40, 49, 56, 63, 70, 78, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 220, 230, 238, 244, 253, 261, 268, 278, 286, 291, 300, 304, 313, 336, 353, 369, 376, 381, 393, 405, 422, 439, 448, 458, 466, 475, 485, 497, 508, 517, 529, 539, 548, 559, 568, 579, 593, 601} + +func (i SymKind) String() string { + if i >= SymKind(len(_SymKind_index)-1) { + return "SymKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _SymKind_name[_SymKind_index[i]:_SymKind_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..413a809414037ae17d66a43f031448921d2d9cd1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/asm.go @@ -0,0 +1,715 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package wasm + +import ( + "bytes" + "cmd/internal/obj" + "cmd/internal/obj/wasm" + "cmd/internal/objabi" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "encoding/binary" + "fmt" + "internal/buildcfg" + "io" + "regexp" +) + +const ( + I32 = 0x7F + I64 = 0x7E + F32 = 0x7D + F64 = 0x7C +) + +const ( + sectionCustom = 0 + sectionType = 1 + sectionImport = 2 + sectionFunction = 3 + sectionTable = 4 + sectionMemory = 5 + sectionGlobal = 6 + sectionExport = 7 + sectionStart = 8 + sectionElement = 9 + sectionCode = 10 + sectionData = 11 +) + +// funcValueOffset is the offset between the PC_F value of a function and the index of the function in WebAssembly +const funcValueOffset = 0x1000 // TODO(neelance): make function addresses play nice with heap addresses + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { +} + +type wasmFunc struct { + Module string + Name string + Type uint32 + Code []byte +} + +type wasmFuncType struct { + Params []byte + Results []byte +} + +func readWasmImport(ldr *loader.Loader, s loader.Sym) obj.WasmImport { + reportError := func(err error) { panic(fmt.Sprintf("failed to read WASM import in sym %v: %v", s, err)) } + + data := ldr.Data(s) + + readUint32 := func() (v uint32) { + v = binary.LittleEndian.Uint32(data) + data = data[4:] + return + } + + readUint64 := func() (v uint64) { + v = binary.LittleEndian.Uint64(data) + data = data[8:] + return + } + + readByte := func() byte { + if len(data) == 0 { + reportError(io.EOF) + } + + b := data[0] + data = data[1:] + return b + } + + readString := func() string { + n := readUint32() + + s := string(data[:n]) + + data = data[n:] + + return s + } + + var wi obj.WasmImport + wi.Module = readString() + wi.Name = readString() + wi.Params = make([]obj.WasmField, readUint32()) + for i := range wi.Params { + wi.Params[i].Type = obj.WasmFieldType(readByte()) + wi.Params[i].Offset = int64(readUint64()) + } + wi.Results = make([]obj.WasmField, readUint32()) + for i := range wi.Results { + wi.Results[i].Type = obj.WasmFieldType(readByte()) + wi.Results[i].Offset = int64(readUint64()) + } + return wi +} + +var wasmFuncTypes = map[string]*wasmFuncType{ + "_rt0_wasm_js": {Params: []byte{}}, // + "_rt0_wasm_wasip1": {Params: []byte{}}, // + "wasm_export__start": {}, // + "wasm_export_run": {Params: []byte{I32, I32}}, // argc, argv + "wasm_export_resume": {Params: []byte{}}, // + "wasm_export_getsp": {Results: []byte{I32}}, // sp + "wasm_pc_f_loop": {Params: []byte{}}, // + "runtime.wasmDiv": {Params: []byte{I64, I64}, Results: []byte{I64}}, // x, y -> x/y + "runtime.wasmTruncS": {Params: []byte{F64}, Results: []byte{I64}}, // x -> int(x) + "runtime.wasmTruncU": {Params: []byte{F64}, Results: []byte{I64}}, // x -> uint(x) + "gcWriteBarrier": {Params: []byte{I64}, Results: []byte{I64}}, // #bytes -> bufptr + "runtime.gcWriteBarrier1": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier2": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier3": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier4": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier5": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier6": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier7": {Results: []byte{I64}}, // -> bufptr + "runtime.gcWriteBarrier8": {Results: []byte{I64}}, // -> bufptr + "cmpbody": {Params: []byte{I64, I64, I64, I64}, Results: []byte{I64}}, // a, alen, b, blen -> -1/0/1 + "memeqbody": {Params: []byte{I64, I64, I64}, Results: []byte{I64}}, // a, b, len -> 0/1 + "memcmp": {Params: []byte{I32, I32, I32}, Results: []byte{I32}}, // a, b, len -> <0/0/>0 + "memchr": {Params: []byte{I32, I32, I32}, Results: []byte{I32}}, // s, c, len -> index +} + +func assignAddress(ldr *loader.Loader, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp bool) (*sym.Section, int, uint64) { + // WebAssembly functions do not live in the same address space as the linear memory. + // Instead, WebAssembly automatically assigns indices. Imported functions (section "import") + // have indices 0 to n. They are followed by native functions (sections "function" and "code") + // with indices n+1 and following. + // + // The following rules describe how wasm handles function indices and addresses: + // PC_F = funcValueOffset + WebAssembly function index (not including the imports) + // s.Value = PC = PC_F<<16 + PC_B + // + // The funcValueOffset is necessary to avoid conflicts with expectations + // that the Go runtime has about function addresses. + // The field "s.Value" corresponds to the concept of PC at runtime. + // However, there is no PC register, only PC_F and PC_B. PC_F denotes the function, + // PC_B the resume point inside of that function. The entry of the function has PC_B = 0. + ldr.SetSymSect(s, sect) + ldr.SetSymValue(s, int64(funcValueOffset+va/ld.MINFUNC)<<16) // va starts at zero + va += uint64(ld.MINFUNC) + return sect, n, va +} + +type wasmDataSect struct { + sect *sym.Section + data []byte +} + +var dataSects []wasmDataSect + +func asmb(ctxt *ld.Link, ldr *loader.Loader) { + sections := []*sym.Section{ + ldr.SymSect(ldr.Lookup("runtime.rodata", 0)), + ldr.SymSect(ldr.Lookup("runtime.typelink", 0)), + ldr.SymSect(ldr.Lookup("runtime.itablink", 0)), + ldr.SymSect(ldr.Lookup("runtime.symtab", 0)), + ldr.SymSect(ldr.Lookup("runtime.pclntab", 0)), + ldr.SymSect(ldr.Lookup("runtime.noptrdata", 0)), + ldr.SymSect(ldr.Lookup("runtime.data", 0)), + } + + dataSects = make([]wasmDataSect, len(sections)) + for i, sect := range sections { + data := ld.DatblkBytes(ctxt, int64(sect.Vaddr), int64(sect.Length)) + dataSects[i] = wasmDataSect{sect, data} + } +} + +// asmb writes the final WebAssembly module binary. +// Spec: https://webassembly.github.io/spec/core/binary/modules.html +func asmb2(ctxt *ld.Link, ldr *loader.Loader) { + types := []*wasmFuncType{ + // For normal Go functions, the single parameter is PC_B, + // the return value is + // 0 if the function returned normally or + // 1 if the stack needs to be unwound. + {Params: []byte{I32}, Results: []byte{I32}}, + } + + // collect host imports (functions that get imported from the WebAssembly host, usually JavaScript) + // we store the import index of each imported function, so the R_WASMIMPORT relocation + // can write the correct index after a "call" instruction + // these are added as import statements to the top of the WebAssembly binary + var hostImports []*wasmFunc + hostImportMap := make(map[loader.Sym]int64) + for _, fn := range ctxt.Textp { + relocs := ldr.Relocs(fn) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.Type() == objabi.R_WASMIMPORT { + if lsym, ok := ldr.WasmImportSym(fn); ok { + wi := readWasmImport(ldr, lsym) + hostImportMap[fn] = int64(len(hostImports)) + hostImports = append(hostImports, &wasmFunc{ + Module: wi.Module, + Name: wi.Name, + Type: lookupType(&wasmFuncType{ + Params: fieldsToTypes(wi.Params), + Results: fieldsToTypes(wi.Results), + }, &types), + }) + } else { + panic(fmt.Sprintf("missing wasm symbol for %s", ldr.SymName(r.Sym()))) + } + } + } + } + + // collect functions with WebAssembly body + var buildid []byte + fns := make([]*wasmFunc, len(ctxt.Textp)) + for i, fn := range ctxt.Textp { + wfn := new(bytes.Buffer) + if ldr.SymName(fn) == "go:buildid" { + writeUleb128(wfn, 0) // number of sets of locals + writeI32Const(wfn, 0) + wfn.WriteByte(0x0b) // end + buildid = ldr.Data(fn) + } else { + // Relocations have variable length, handle them here. + relocs := ldr.Relocs(fn) + P := ldr.Data(fn) + off := int32(0) + for ri := 0; ri < relocs.Count(); ri++ { + r := relocs.At(ri) + if r.Siz() == 0 { + continue // skip marker relocations + } + wfn.Write(P[off:r.Off()]) + off = r.Off() + rs := r.Sym() + switch r.Type() { + case objabi.R_ADDR: + writeSleb128(wfn, ldr.SymValue(rs)+r.Add()) + case objabi.R_CALL: + writeSleb128(wfn, int64(len(hostImports))+ldr.SymValue(rs)>>16-funcValueOffset) + case objabi.R_WASMIMPORT: + writeSleb128(wfn, hostImportMap[rs]) + default: + ldr.Errorf(fn, "bad reloc type %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + continue + } + } + wfn.Write(P[off:]) + } + + typ := uint32(0) + if sig, ok := wasmFuncTypes[ldr.SymName(fn)]; ok { + typ = lookupType(sig, &types) + } + + name := nameRegexp.ReplaceAllString(ldr.SymName(fn), "_") + fns[i] = &wasmFunc{Name: name, Type: typ, Code: wfn.Bytes()} + } + + ctxt.Out.Write([]byte{0x00, 0x61, 0x73, 0x6d}) // magic + ctxt.Out.Write([]byte{0x01, 0x00, 0x00, 0x00}) // version + + // Add any buildid early in the binary: + if len(buildid) != 0 { + writeBuildID(ctxt, buildid) + } + + writeTypeSec(ctxt, types) + writeImportSec(ctxt, hostImports) + writeFunctionSec(ctxt, fns) + writeTableSec(ctxt, fns) + writeMemorySec(ctxt, ldr) + writeGlobalSec(ctxt) + writeExportSec(ctxt, ldr, len(hostImports)) + writeElementSec(ctxt, uint64(len(hostImports)), uint64(len(fns))) + writeCodeSec(ctxt, fns) + writeDataSec(ctxt) + writeProducerSec(ctxt) + if !*ld.FlagS { + writeNameSec(ctxt, len(hostImports), fns) + } +} + +func lookupType(sig *wasmFuncType, types *[]*wasmFuncType) uint32 { + for i, t := range *types { + if bytes.Equal(sig.Params, t.Params) && bytes.Equal(sig.Results, t.Results) { + return uint32(i) + } + } + *types = append(*types, sig) + return uint32(len(*types) - 1) +} + +func writeSecHeader(ctxt *ld.Link, id uint8) int64 { + ctxt.Out.WriteByte(id) + sizeOffset := ctxt.Out.Offset() + ctxt.Out.Write(make([]byte, 5)) // placeholder for length + return sizeOffset +} + +func writeSecSize(ctxt *ld.Link, sizeOffset int64) { + endOffset := ctxt.Out.Offset() + ctxt.Out.SeekSet(sizeOffset) + writeUleb128FixedLength(ctxt.Out, uint64(endOffset-sizeOffset-5), 5) + ctxt.Out.SeekSet(endOffset) +} + +func writeBuildID(ctxt *ld.Link, buildid []byte) { + sizeOffset := writeSecHeader(ctxt, sectionCustom) + writeName(ctxt.Out, "go:buildid") + ctxt.Out.Write(buildid) + writeSecSize(ctxt, sizeOffset) +} + +// writeTypeSec writes the section that declares all function types +// so they can be referenced by index. +func writeTypeSec(ctxt *ld.Link, types []*wasmFuncType) { + sizeOffset := writeSecHeader(ctxt, sectionType) + + writeUleb128(ctxt.Out, uint64(len(types))) + + for _, t := range types { + ctxt.Out.WriteByte(0x60) // functype + writeUleb128(ctxt.Out, uint64(len(t.Params))) + for _, v := range t.Params { + ctxt.Out.WriteByte(byte(v)) + } + writeUleb128(ctxt.Out, uint64(len(t.Results))) + for _, v := range t.Results { + ctxt.Out.WriteByte(byte(v)) + } + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeImportSec writes the section that lists the functions that get +// imported from the WebAssembly host, usually JavaScript. +func writeImportSec(ctxt *ld.Link, hostImports []*wasmFunc) { + sizeOffset := writeSecHeader(ctxt, sectionImport) + + writeUleb128(ctxt.Out, uint64(len(hostImports))) // number of imports + for _, fn := range hostImports { + if fn.Module != "" { + writeName(ctxt.Out, fn.Module) + } else { + writeName(ctxt.Out, wasm.GojsModule) // provided by the import object in wasm_exec.js + } + writeName(ctxt.Out, fn.Name) + ctxt.Out.WriteByte(0x00) // func import + writeUleb128(ctxt.Out, uint64(fn.Type)) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeFunctionSec writes the section that declares the types of functions. +// The bodies of these functions will later be provided in the "code" section. +func writeFunctionSec(ctxt *ld.Link, fns []*wasmFunc) { + sizeOffset := writeSecHeader(ctxt, sectionFunction) + + writeUleb128(ctxt.Out, uint64(len(fns))) + for _, fn := range fns { + writeUleb128(ctxt.Out, uint64(fn.Type)) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeTableSec writes the section that declares tables. Currently there is only a single table +// that is used by the CallIndirect operation to dynamically call any function. +// The contents of the table get initialized by the "element" section. +func writeTableSec(ctxt *ld.Link, fns []*wasmFunc) { + sizeOffset := writeSecHeader(ctxt, sectionTable) + + numElements := uint64(funcValueOffset + len(fns)) + writeUleb128(ctxt.Out, 1) // number of tables + ctxt.Out.WriteByte(0x70) // type: anyfunc + ctxt.Out.WriteByte(0x00) // no max + writeUleb128(ctxt.Out, numElements) // min + + writeSecSize(ctxt, sizeOffset) +} + +// writeMemorySec writes the section that declares linear memories. Currently one linear memory is being used. +// Linear memory always starts at address zero. More memory can be requested with the GrowMemory instruction. +func writeMemorySec(ctxt *ld.Link, ldr *loader.Loader) { + sizeOffset := writeSecHeader(ctxt, sectionMemory) + + dataSection := ldr.SymSect(ldr.Lookup("runtime.data", 0)) + dataEnd := dataSection.Vaddr + dataSection.Length + var initialSize = dataEnd + 16<<20 // 16MB, enough for runtime init without growing + + const wasmPageSize = 64 << 10 // 64KB + + writeUleb128(ctxt.Out, 1) // number of memories + ctxt.Out.WriteByte(0x00) // no maximum memory size + writeUleb128(ctxt.Out, initialSize/wasmPageSize) // minimum (initial) memory size + + writeSecSize(ctxt, sizeOffset) +} + +// writeGlobalSec writes the section that declares global variables. +func writeGlobalSec(ctxt *ld.Link) { + sizeOffset := writeSecHeader(ctxt, sectionGlobal) + + globalRegs := []byte{ + I32, // 0: SP + I64, // 1: CTXT + I64, // 2: g + I64, // 3: RET0 + I64, // 4: RET1 + I64, // 5: RET2 + I64, // 6: RET3 + I32, // 7: PAUSE + } + + writeUleb128(ctxt.Out, uint64(len(globalRegs))) // number of globals + + for _, typ := range globalRegs { + ctxt.Out.WriteByte(typ) + ctxt.Out.WriteByte(0x01) // var + switch typ { + case I32: + writeI32Const(ctxt.Out, 0) + case I64: + writeI64Const(ctxt.Out, 0) + } + ctxt.Out.WriteByte(0x0b) // end + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeExportSec writes the section that declares exports. +// Exports can be accessed by the WebAssembly host, usually JavaScript. +// The wasm_export_* functions and the linear memory get exported. +func writeExportSec(ctxt *ld.Link, ldr *loader.Loader, lenHostImports int) { + sizeOffset := writeSecHeader(ctxt, sectionExport) + + switch buildcfg.GOOS { + case "wasip1": + writeUleb128(ctxt.Out, 2) // number of exports + s := ldr.Lookup("_rt0_wasm_wasip1", 0) + idx := uint32(lenHostImports) + uint32(ldr.SymValue(s)>>16) - funcValueOffset + writeName(ctxt.Out, "_start") // the wasi entrypoint + ctxt.Out.WriteByte(0x00) // func export + writeUleb128(ctxt.Out, uint64(idx)) // funcidx + writeName(ctxt.Out, "memory") // memory in wasi + ctxt.Out.WriteByte(0x02) // mem export + writeUleb128(ctxt.Out, 0) // memidx + case "js": + writeUleb128(ctxt.Out, 4) // number of exports + for _, name := range []string{"run", "resume", "getsp"} { + s := ldr.Lookup("wasm_export_"+name, 0) + idx := uint32(lenHostImports) + uint32(ldr.SymValue(s)>>16) - funcValueOffset + writeName(ctxt.Out, name) // inst.exports.run/resume/getsp in wasm_exec.js + ctxt.Out.WriteByte(0x00) // func export + writeUleb128(ctxt.Out, uint64(idx)) // funcidx + } + writeName(ctxt.Out, "mem") // inst.exports.mem in wasm_exec.js + ctxt.Out.WriteByte(0x02) // mem export + writeUleb128(ctxt.Out, 0) // memidx + default: + ld.Exitf("internal error: writeExportSec: unrecognized GOOS %s", buildcfg.GOOS) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeElementSec writes the section that initializes the tables declared by the "table" section. +// The table for CallIndirect gets initialized in a very simple way so that each table index (PC_F value) +// maps linearly to the function index (numImports + PC_F). +func writeElementSec(ctxt *ld.Link, numImports, numFns uint64) { + sizeOffset := writeSecHeader(ctxt, sectionElement) + + writeUleb128(ctxt.Out, 1) // number of element segments + + writeUleb128(ctxt.Out, 0) // tableidx + writeI32Const(ctxt.Out, funcValueOffset) + ctxt.Out.WriteByte(0x0b) // end + + writeUleb128(ctxt.Out, numFns) // number of entries + for i := uint64(0); i < numFns; i++ { + writeUleb128(ctxt.Out, numImports+i) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeCodeSec writes the section that provides the function bodies for the functions +// declared by the "func" section. +func writeCodeSec(ctxt *ld.Link, fns []*wasmFunc) { + sizeOffset := writeSecHeader(ctxt, sectionCode) + + writeUleb128(ctxt.Out, uint64(len(fns))) // number of code entries + for _, fn := range fns { + writeUleb128(ctxt.Out, uint64(len(fn.Code))) + ctxt.Out.Write(fn.Code) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeDataSec writes the section that provides data that will be used to initialize the linear memory. +func writeDataSec(ctxt *ld.Link) { + sizeOffset := writeSecHeader(ctxt, sectionData) + + type dataSegment struct { + offset int32 + data []byte + } + + // Omit blocks of zeroes and instead emit data segments with offsets skipping the zeroes. + // This reduces the size of the WebAssembly binary. We use 8 bytes as an estimate for the + // overhead of adding a new segment (same as wasm-opt's memory-packing optimization uses). + const segmentOverhead = 8 + + // Generate at most this many segments. A higher number of segments gets rejected by some WebAssembly runtimes. + const maxNumSegments = 100000 + + var segments []*dataSegment + for secIndex, ds := range dataSects { + data := ds.data + offset := int32(ds.sect.Vaddr) + + // skip leading zeroes + for len(data) > 0 && data[0] == 0 { + data = data[1:] + offset++ + } + + for len(data) > 0 { + dataLen := int32(len(data)) + var segmentEnd, zeroEnd int32 + if len(segments)+(len(dataSects)-secIndex) == maxNumSegments { + segmentEnd = dataLen + zeroEnd = dataLen + } else { + for { + // look for beginning of zeroes + for segmentEnd < dataLen && data[segmentEnd] != 0 { + segmentEnd++ + } + // look for end of zeroes + zeroEnd = segmentEnd + for zeroEnd < dataLen && data[zeroEnd] == 0 { + zeroEnd++ + } + // emit segment if omitting zeroes reduces the output size + if zeroEnd-segmentEnd >= segmentOverhead || zeroEnd == dataLen { + break + } + segmentEnd = zeroEnd + } + } + + segments = append(segments, &dataSegment{ + offset: offset, + data: data[:segmentEnd], + }) + data = data[zeroEnd:] + offset += zeroEnd + } + } + + writeUleb128(ctxt.Out, uint64(len(segments))) // number of data entries + for _, seg := range segments { + writeUleb128(ctxt.Out, 0) // memidx + writeI32Const(ctxt.Out, seg.offset) + ctxt.Out.WriteByte(0x0b) // end + writeUleb128(ctxt.Out, uint64(len(seg.data))) + ctxt.Out.Write(seg.data) + } + + writeSecSize(ctxt, sizeOffset) +} + +// writeProducerSec writes an optional section that reports the source language and compiler version. +func writeProducerSec(ctxt *ld.Link) { + sizeOffset := writeSecHeader(ctxt, sectionCustom) + writeName(ctxt.Out, "producers") + + writeUleb128(ctxt.Out, 2) // number of fields + + writeName(ctxt.Out, "language") // field name + writeUleb128(ctxt.Out, 1) // number of values + writeName(ctxt.Out, "Go") // value: name + writeName(ctxt.Out, buildcfg.Version) // value: version + + writeName(ctxt.Out, "processed-by") // field name + writeUleb128(ctxt.Out, 1) // number of values + writeName(ctxt.Out, "Go cmd/compile") // value: name + writeName(ctxt.Out, buildcfg.Version) // value: version + + writeSecSize(ctxt, sizeOffset) +} + +var nameRegexp = regexp.MustCompile(`[^\w.]`) + +// writeNameSec writes an optional section that assigns names to the functions declared by the "func" section. +// The names are only used by WebAssembly stack traces, debuggers and decompilers. +// TODO(neelance): add symbol table of DATA symbols +func writeNameSec(ctxt *ld.Link, firstFnIndex int, fns []*wasmFunc) { + sizeOffset := writeSecHeader(ctxt, sectionCustom) + writeName(ctxt.Out, "name") + + sizeOffset2 := writeSecHeader(ctxt, 0x01) // function names + writeUleb128(ctxt.Out, uint64(len(fns))) + for i, fn := range fns { + writeUleb128(ctxt.Out, uint64(firstFnIndex+i)) + writeName(ctxt.Out, fn.Name) + } + writeSecSize(ctxt, sizeOffset2) + + writeSecSize(ctxt, sizeOffset) +} + +type nameWriter interface { + io.ByteWriter + io.Writer +} + +func writeI32Const(w io.ByteWriter, v int32) { + w.WriteByte(0x41) // i32.const + writeSleb128(w, int64(v)) +} + +func writeI64Const(w io.ByteWriter, v int64) { + w.WriteByte(0x42) // i64.const + writeSleb128(w, v) +} + +func writeName(w nameWriter, name string) { + writeUleb128(w, uint64(len(name))) + w.Write([]byte(name)) +} + +func writeUleb128(w io.ByteWriter, v uint64) { + if v < 128 { + w.WriteByte(uint8(v)) + return + } + more := true + for more { + c := uint8(v & 0x7f) + v >>= 7 + more = v != 0 + if more { + c |= 0x80 + } + w.WriteByte(c) + } +} + +func writeUleb128FixedLength(w io.ByteWriter, v uint64, length int) { + for i := 0; i < length; i++ { + c := uint8(v & 0x7f) + v >>= 7 + if i < length-1 { + c |= 0x80 + } + w.WriteByte(c) + } + if v != 0 { + panic("writeUleb128FixedLength: length too small") + } +} + +func writeSleb128(w io.ByteWriter, v int64) { + more := true + for more { + c := uint8(v & 0x7f) + s := uint8(v & 0x40) + v >>= 7 + more = !((v == 0 && s == 0) || (v == -1 && s != 0)) + if more { + c |= 0x80 + } + w.WriteByte(c) + } +} + +func fieldsToTypes(fields []obj.WasmField) []byte { + b := make([]byte, len(fields)) + for i, f := range fields { + switch f.Type { + case obj.WasmI32, obj.WasmPtr: + b[i] = I32 + case obj.WasmI64: + b[i] = I64 + case obj.WasmF32: + b[i] = F32 + case obj.WasmF64: + b[i] = F64 + default: + panic(fmt.Sprintf("fieldsToTypes: unknown field type: %d", f.Type)) + } + } + return b +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..f8090a3551cdbcb6c4edf3fd5d74840ac6f448b9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/wasm/obj.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package wasm + +import ( + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + theArch := ld.Arch{ + Funcalign: 16, + Maxalign: 32, + Minalign: 1, + + Archinit: archinit, + AssignAddress: assignAddress, + Asmb: asmb, + Asmb2: asmb2, + Gentext: gentext, + } + + return sys.ArchWasm, theArch +} + +func archinit(ctxt *ld.Link) { + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = 0 + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/asm.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..876dbd984fcacfb81568dd09add93413d0b22231 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/asm.go @@ -0,0 +1,493 @@ +// Inferno utils/8l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" + "log" +) + +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + if ctxt.DynlinkingGo() { + // We need get_pc_thunk. + } else { + switch ctxt.BuildMode { + case ld.BuildModeCArchive: + if !ctxt.IsELF { + return + } + case ld.BuildModePIE, ld.BuildModeCShared, ld.BuildModePlugin: + // We need get_pc_thunk. + default: + return + } + } + + // Generate little thunks that load the PC of the next instruction into a register. + thunks := make([]loader.Sym, 0, 7+len(ctxt.Textp)) + for _, r := range [...]struct { + name string + num uint8 + }{ + {"ax", 0}, + {"cx", 1}, + {"dx", 2}, + {"bx", 3}, + // sp + {"bp", 5}, + {"si", 6}, + {"di", 7}, + } { + thunkfunc := ldr.CreateSymForUpdate("__x86.get_pc_thunk."+r.name, 0) + thunkfunc.SetType(sym.STEXT) + ldr.SetAttrLocal(thunkfunc.Sym(), true) + o := func(op ...uint8) { + for _, op1 := range op { + thunkfunc.AddUint8(op1) + } + } + // 8b 04 24 mov (%esp),%eax + // Destination register is in bits 3-5 of the middle byte, so add that in. + o(0x8b, 0x04+r.num<<3, 0x24) + // c3 ret + o(0xc3) + + thunks = append(thunks, thunkfunc.Sym()) + } + ctxt.Textp = append(thunks, ctxt.Textp...) // keep Textp in dependency order + + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op ...uint8) { + for _, op1 := range op { + initfunc.AddUint8(op1) + } + } + + // go.link.addmoduledata: + // 53 push %ebx + // e8 00 00 00 00 call __x86.get_pc_thunk.cx + R_CALL __x86.get_pc_thunk.cx + // 8d 81 00 00 00 00 lea 0x0(%ecx), %eax + R_PCREL ctxt.Moduledata + // 8d 99 00 00 00 00 lea 0x0(%ecx), %ebx + R_GOTPC _GLOBAL_OFFSET_TABLE_ + // e8 00 00 00 00 call runtime.addmoduledata@plt + R_CALL runtime.addmoduledata + // 5b pop %ebx + // c3 ret + + o(0x53) + + o(0xe8) + initfunc.AddSymRef(ctxt.Arch, ldr.Lookup("__x86.get_pc_thunk.cx", 0), 0, objabi.R_CALL, 4) + + o(0x8d, 0x81) + initfunc.AddPCRelPlus(ctxt.Arch, ctxt.Moduledata, 6) + + o(0x8d, 0x99) + gotsym := ldr.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0) + initfunc.AddSymRef(ctxt.Arch, gotsym, 12, objabi.R_PCREL, 4) + o(0xe8) + initfunc.AddSymRef(ctxt.Arch, addmoduledata, 0, objabi.R_CALL, 4) + + o(0x5b) + + o(0xc3) +} + +func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { + targ := r.Sym() + var targType sym.SymKind + if targ != 0 { + targType = ldr.SymType(targ) + } + + switch r.Type() { + default: + if r.Type() >= objabi.ElfRelocOffset { + ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) + return false + } + + // Handle relocations found in ELF object files. + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_PC32): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_386_PC32 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + if targType == 0 || targType == sym.SXREF { + ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_PLT32): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocAdd(rIdx, r.Add()+4) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) + } + + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_GOT32), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_GOT32X): + su := ldr.MakeSymbolUpdater(s) + if targType != sym.SDYNIMPORT { + // have symbol + sData := ldr.Data(s) + + if r.Off() >= 2 && sData[r.Off()-2] == 0x8b { + su.MakeWritable() + + // turn MOVL of GOT entry into LEAL of symbol address, relative to GOT. + writeableData := su.Data() + writeableData[r.Off()-2] = 0x8d + su.SetRelocType(rIdx, objabi.R_GOTOFF) + return true + } + + if r.Off() >= 2 && sData[r.Off()-2] == 0xff && sData[r.Off()-1] == 0xb3 { + su.MakeWritable() + // turn PUSHL of GOT entry into PUSHL of symbol itself. + // use unnecessary SS prefix to keep instruction same length. + writeableData := su.Data() + writeableData[r.Off()-2] = 0x36 + writeableData[r.Off()-1] = 0x68 + su.SetRelocType(rIdx, objabi.R_ADDR) + return true + } + + ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ)) + return false + } + + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_386_GLOB_DAT)) + su.SetRelocType(rIdx, objabi.R_CONST) // write r->add during relocsym + su.SetRelocSym(rIdx, 0) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_GOTOFF): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_GOTOFF) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_GOTPC): + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+4) + return true + + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_386_32): + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected R_386_32 relocation for dynamic symbol %s", ldr.SymName(targ)) + } + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + return true + + case objabi.MachoRelocOffset + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 0: + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_ADDR) + if targType == sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected reloc for dynamic symbol %s", ldr.SymName(targ)) + } + return true + + case objabi.MachoRelocOffset + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 1: + su := ldr.MakeSymbolUpdater(s) + if targType == sym.SDYNIMPORT { + addpltsym(target, ldr, syms, targ) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + su.SetRelocType(rIdx, objabi.R_PCREL) + return true + } + + su.SetRelocType(rIdx, objabi.R_PCREL) + return true + + case objabi.MachoRelocOffset + ld.MACHO_FAKE_GOTPCREL: + su := ldr.MakeSymbolUpdater(s) + if targType != sym.SDYNIMPORT { + // have symbol + // turn MOVL of GOT entry into LEAL of symbol itself + sData := ldr.Data(s) + if r.Off() < 2 || sData[r.Off()-2] != 0x8b { + ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ)) + return false + } + + su.MakeWritable() + writeableData := su.Data() + writeableData[r.Off()-2] = 0x8d + su.SetRelocType(rIdx, objabi.R_PCREL) + return true + } + + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_386_GLOB_DAT)) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + su.SetRelocType(rIdx, objabi.R_PCREL) + return true + } + + // Handle references to ELF symbols from our own object files. + if targType != sym.SDYNIMPORT { + return true + } + + // Reread the reloc to incorporate any changes in type above. + relocs := ldr.Relocs(s) + r = relocs.At(rIdx) + + switch r.Type() { + case objabi.R_CALL, + objabi.R_PCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + addpltsym(target, ldr, syms, targ) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocSym(rIdx, syms.PLT) + su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ))) + return true + + case objabi.R_ADDR: + if ldr.SymType(s) != sym.SDATA { + break + } + if target.IsElf() { + ld.Adddynsym(ldr, target, syms, targ) + rel := ldr.MakeSymbolUpdater(syms.Rel) + rel.AddAddrPlus(target.Arch, s, int64(r.Off())) + rel.AddUint32(target.Arch, elf.R_INFO32(uint32(ldr.SymDynid(targ)), uint32(elf.R_386_32))) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_CONST) // write r->add during relocsym + su.SetRelocSym(rIdx, 0) + return true + } + } + + return false +} + +func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { + out.Write32(uint32(sectoff)) + + elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) + siz := r.Size + switch r.Type { + default: + return false + case objabi.R_ADDR, objabi.R_DWARFSECREF: + if siz == 4 { + out.Write32(uint32(elf.R_386_32) | uint32(elfsym)<<8) + } else { + return false + } + case objabi.R_GOTPCREL: + if siz == 4 { + out.Write32(uint32(elf.R_386_GOTPC)) + if ldr.SymName(r.Xsym) != "_GLOBAL_OFFSET_TABLE_" { + out.Write32(uint32(sectoff)) + out.Write32(uint32(elf.R_386_GOT32) | uint32(elfsym)<<8) + } + } else { + return false + } + case objabi.R_CALL: + if siz == 4 { + if ldr.SymType(r.Xsym) == sym.SDYNIMPORT { + out.Write32(uint32(elf.R_386_PLT32) | uint32(elfsym)<<8) + } else { + out.Write32(uint32(elf.R_386_PC32) | uint32(elfsym)<<8) + } + } else { + return false + } + case objabi.R_PCREL: + if siz == 4 { + out.Write32(uint32(elf.R_386_PC32) | uint32(elfsym)<<8) + } else { + return false + } + case objabi.R_TLS_LE: + if siz == 4 { + out.Write32(uint32(elf.R_386_TLS_LE) | uint32(elfsym)<<8) + } else { + return false + } + case objabi.R_TLS_IE: + if siz == 4 { + out.Write32(uint32(elf.R_386_GOTPC)) + out.Write32(uint32(sectoff)) + out.Write32(uint32(elf.R_386_TLS_GOTIE) | uint32(elfsym)<<8) + } else { + return false + } + } + + return true +} + +func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { + return false +} + +func pereloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { + var v uint32 + + rs := r.Xsym + rt := r.Type + + if ldr.SymDynid(rs) < 0 { + ldr.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) + return false + } + + out.Write32(uint32(sectoff)) + out.Write32(uint32(ldr.SymDynid(rs))) + + switch rt { + default: + return false + + case objabi.R_DWARFSECREF: + v = ld.IMAGE_REL_I386_SECREL + + case objabi.R_ADDR: + v = ld.IMAGE_REL_I386_DIR32 + + case objabi.R_PEIMAGEOFF: + v = ld.IMAGE_REL_I386_DIR32NB + + case objabi.R_CALL, + objabi.R_PCREL: + v = ld.IMAGE_REL_I386_REL32 + } + + out.Write16(uint16(v)) + + return true +} + +func archreloc(*ld.Target, *loader.Loader, *ld.ArchSyms, loader.Reloc, loader.Sym, int64) (int64, int, bool) { + return -1, 0, false +} + +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { + log.Fatalf("unexpected relocation variant") + return -1 +} + +func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { + if plt.Size() == 0 { + // pushl got+4 + plt.AddUint8(0xff) + + plt.AddUint8(0x35) + plt.AddAddrPlus(ctxt.Arch, got.Sym(), 4) + + // jmp *got+8 + plt.AddUint8(0xff) + + plt.AddUint8(0x25) + plt.AddAddrPlus(ctxt.Arch, got.Sym(), 8) + + // zero pad + plt.AddUint32(ctxt.Arch, 0) + + // assume got->size == 0 too + got.AddAddrPlus(ctxt.Arch, dynamic, 0) + + got.AddUint32(ctxt.Arch, 0) + got.AddUint32(ctxt.Arch, 0) + } +} + +func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { + if ldr.SymPlt(s) >= 0 { + return + } + + ld.Adddynsym(ldr, target, syms, s) + + if target.IsElf() { + plt := ldr.MakeSymbolUpdater(syms.PLT) + got := ldr.MakeSymbolUpdater(syms.GOTPLT) + rel := ldr.MakeSymbolUpdater(syms.RelPLT) + if plt.Size() == 0 { + panic("plt is not set up") + } + + // jmpq *got+size + plt.AddUint8(0xff) + + plt.AddUint8(0x25) + plt.AddAddrPlus(target.Arch, got.Sym(), got.Size()) + + // add to got: pointer to current pos in plt + got.AddAddrPlus(target.Arch, plt.Sym(), plt.Size()) + + // pushl $x + plt.AddUint8(0x68) + + plt.AddUint32(target.Arch, uint32(rel.Size())) + + // jmp .plt + plt.AddUint8(0xe9) + + plt.AddUint32(target.Arch, uint32(-(plt.Size() + 4))) + + // rel + rel.AddAddrPlus(target.Arch, got.Sym(), got.Size()-4) + + sDynid := ldr.SymDynid(s) + rel.AddUint32(target.Arch, elf.R_INFO32(uint32(sDynid), uint32(elf.R_386_JMP_SLOT))) + + ldr.SetPlt(s, int32(plt.Size()-16)) + } else { + ldr.Errorf(s, "addpltsym: unsupported binary format") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/l.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/l.go new file mode 100644 index 0000000000000000000000000000000000000000..5875d45f8daf0ac60948b1f5bfbc1d97b62cc09c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/l.go @@ -0,0 +1,43 @@ +// Inferno utils/8l/l.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/l.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +const ( + maxAlign = 32 // max data alignment + minAlign = 1 // min data alignment + funcAlign = 16 +) + +/* Used by ../internal/ld/dwarf.go */ +const ( + dwarfRegSP = 4 + dwarfRegLR = 8 +) diff --git a/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/obj.go b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..4336f01ea3d53677dff711a7627c090dc556f712 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/internal/x86/obj.go @@ -0,0 +1,110 @@ +// Inferno utils/8l/obj.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/obj.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package x86 + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/ld" +) + +func Init() (*sys.Arch, ld.Arch) { + arch := sys.Arch386 + + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + // 0xCC is INT $3 - breakpoint instruction + CodePad: []byte{0xCC}, + + Plan9Magic: uint32(4*11*11 + 7), + + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, + + ELF: ld.ELFArch{ + Linuxdynld: "/lib/ld-linux.so.2", + LinuxdynldMusl: "/lib/ld-musl-i386.so.1", + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/usr/libexec/ld.elf_so", + Solarisdynld: "/lib/ld.so.1", + + Reloc1: elfreloc1, + RelocSize: 8, + SetupPLT: elfsetupplt, + }, + } + + return arch, theArch +} + +func archinit(ctxt *ld.Link) { + switch ctxt.HeadType { + default: + ld.Exitf("unknown -H option: %v", ctxt.HeadType) + + case objabi.Hplan9: /* plan 9 */ + ld.HEADR = 32 + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hlinux, /* elf32 executable */ + objabi.Hfreebsd, + objabi.Hnetbsd, + objabi.Hopenbsd: + ld.Elfinit(ctxt) + + ld.HEADR = ld.ELFRESERVE + if *ld.FlagRound == -1 { + *ld.FlagRound = 4096 + } + if *ld.FlagTextAddr == -1 { + *ld.FlagTextAddr = ld.Rnd(0x08048000, *ld.FlagRound) + int64(ld.HEADR) + } + + case objabi.Hwindows: /* PE executable */ + // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit + return + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a.go new file mode 100644 index 0000000000000000000000000000000000000000..8d9299972b1c141baf3d2172d3b952634133c8a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a separate package because we cannot have Go +// assembly code and cgo code in the same package. + +//go:build darwin + +package asm + +//go:cgo_import_dynamic libc_mach_task_self_ mach_task_self_ "/usr/lib/libSystem.B.dylib" + +// load mach_task_self_ from assembly code +func Mach_task_self() uint32 diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..93547e32f1b7afe9d7742172926dcbd46b3740d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +TEXT ·Mach_task_self(SB),0,$0-4 + MOVQ $libc_mach_task_self_(SB), AX + MOVQ (AX), AX + MOVL AX, ret+0(FP) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..bd3c9d71f57970f782ae5110bcdb0c4ef2e4485f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +TEXT ·Mach_task_self(SB),0,$0-4 + MOVD $libc_mach_task_self_(SB), R0 + MOVD (R0), R0 + MOVW R0, ret+0(FP) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/main.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/main.go new file mode 100644 index 0000000000000000000000000000000000000000..658d3405323a4c3ff69b86a3a44a260e512bbb54 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/dynimportvar/main.go @@ -0,0 +1,32 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that we can access dynamically imported variables. +// We ues mach_task_self_ from darwin's system library. +// Check that loading the variable from C and Go gets the +// same result. + +//go:build darwin + +package main + +/* +#include + +unsigned int Mach_task_self(void) { + return mach_task_self(); +} +*/ +import "C" + +import "cmd/link/testdata/dynimportvar/asm" + +func main() { + c := uint32(C.Mach_task_self()) + a := asm.Mach_task_self() + if a != c { + println("got", a, "want", c) + panic("FAIL") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/main.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/main.go new file mode 100644 index 0000000000000000000000000000000000000000..14ea6f9e0ff8376915972bb7a5bbd7e468fa7237 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/main.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a PE rsrc section is handled correctly (issue 39658). +// +// rsrc.syso is created using binutils with: +// {x86_64,i686}-w64-mingw32-windres -i a.rc -o rsrc_$GOARCH.syso -O coff +// where a.rc is a text file with the following content: +// +// resname RCDATA { +// "Hello Gophers!\0", +// "This is a test.\0", +// } + +package main + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_386.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_386.syso new file mode 100644 index 0000000000000000000000000000000000000000..b4abc58abee609976b890aa7dafe197431d7e011 Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_386.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_amd64.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..0d9699da0492098958291d37e15599236379a346 Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-binutils/rsrc_amd64.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/main.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/main.go new file mode 100644 index 0000000000000000000000000000000000000000..099a71a3fffaaab67aec72b6705f8ee1c9ab42f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/main.go @@ -0,0 +1,43 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a PE rsrc section is handled correctly, when the object files +// have been created by llvm-rc or msvc's rc.exe, which means there's the +// @feat.00 symbol as well as split .rsrc$00 and .rsrc$01 section to deal with. +// +// rsrc.syso is created using llvm with: +// {i686,x86_64,armv7,arm64}-w64-mingw32-windres -i a.rc -o rsrc_$GOARCH.syso -O coff +// where this windres calls into llvm-rc and llvm-cvtres. The source file, +// a.rc, simply contains a reference to its own bytes: +// +// resname RCDATA a.rc +// +// Object dumping the resultant rsrc.syso, we can see the split sections and +// the @feat.00 SEH symbol: +// +// rsrc.syso: file format coff-x86-64 +// +// architecture: x86_64 +// start address: 0x0000000000000000 +// +// Export Table: +// Sections: +// Idx Name Size VMA Type +// 0 .rsrc$01 00000068 0000000000000000 DATA +// 1 .rsrc$02 00000018 0000000000000000 DATA +// +// SYMBOL TABLE: +// [ 0](sec -1)(fl 0x00)(ty 0)(scl 3) (nx 0) 0x00000011 @feat.00 +// [ 1](sec 1)(fl 0x00)(ty 0)(scl 3) (nx 1) 0x00000000 .rsrc$01 +// AUX scnlen 0x68 nreloc 1 nlnno 0 checksum 0x0 assoc 0 comdat 0 +// [ 3](sec 2)(fl 0x00)(ty 0)(scl 3) (nx 1) 0x00000000 .rsrc$02 +// AUX scnlen 0x18 nreloc 0 nlnno 0 checksum 0x0 assoc 0 comdat 0 +// [ 5](sec 2)(fl 0x00)(ty 0)(scl 3) (nx 0) 0x00000000 $R000000 +// RELOCATION RECORDS FOR [.rsrc$01]: +// OFFSET TYPE VALUE +// 0000000000000048 IMAGE_REL_AMD64_ADDR32NB $R000000 + +package main + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_386.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_386.syso new file mode 100644 index 0000000000000000000000000000000000000000..21126c9954fd595269f7e68ffaca4dc32ee3e25d Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_386.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..56f9260b0a3b76ad84ac48b2c5f0a41bb538dee5 Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso new file mode 100644 index 0000000000000000000000000000000000000000..c93a1e9ba0ffa390e122f211ca55ace3355ce27b Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso new file mode 100644 index 0000000000000000000000000000000000000000..7849638ddd42e7e24efcc18665e265ac01f8edcd Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso differ diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/lib.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/lib.go new file mode 100644 index 0000000000000000000000000000000000000000..bc6c69944013ce6208a36e9234161274b4719be2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/lib.go @@ -0,0 +1,8 @@ +package main + +import "C" + +//export GoFunc +func GoFunc() {} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/main.m b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/main.m new file mode 100644 index 0000000000000000000000000000000000000000..1c8175f6cc17b8b8abdfeaa2552876a9f94c6356 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testBuildFortvOS/main.m @@ -0,0 +1,5 @@ +extern void GoFunc(); + +int main(int argc, char **argv) { + GoFunc(); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testHashedSyms/p.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testHashedSyms/p.go new file mode 100644 index 0000000000000000000000000000000000000000..87dddcfcac5f35024e8a799e54a6bdf7e91f05b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testHashedSyms/p.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test case contains two static temps (the array literals) +// with same contents but different sizes. The linker should not +// report a hash collision. The linker can (and actually does) +// dedup the two symbols, by keeping the larger symbol. The dedup +// is not a requirement for correctness and not checked in this test. +// We do check the emitted symbol contents are correct, though. + +package main + +func main() { + F([10]int{1, 2, 3, 4, 5, 6}, [20]int{1, 2, 3, 4, 5, 6}) +} + +//go:noinline +func F(x, y interface{}) { + x1 := x.([10]int) + y1 := y.([20]int) + for i := range y1 { + if i < 6 { + if x1[i] != i+1 || y1[i] != i+1 { + panic("FAIL") + } + } else { + if (i < len(x1) && x1[i] != 0) || y1[i] != 0 { + panic("FAIL") + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/a.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/a.go new file mode 100644 index 0000000000000000000000000000000000000000..1f3b2c52d216c9420575045fef846373c0808671 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/a.go @@ -0,0 +1,8 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +//go:noinline +func A() { println("A") } diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/b.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/b.go new file mode 100644 index 0000000000000000000000000000000000000000..9b55dbf77141a20ad3d358335adccf18b486c8a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/b.go @@ -0,0 +1,8 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +//go:noinline +func B() { println("B") } diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/main.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/main.go new file mode 100644 index 0000000000000000000000000000000000000000..bc15236f1e54ac280dae52fabd666b791a67cb31 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testIndexMismatch/main.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "a" + +func main() { a.A() } diff --git a/platform/dbops/binaries/go/go/src/cmd/link/testdata/testRO/x.go b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testRO/x.go new file mode 100644 index 0000000000000000000000000000000000000000..d77db6d563c590c7df4fb1a65ab20a8906c0ac1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/testdata/testRO/x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that read-only data is indeed read-only. This +// program attempts to modify read-only data, and it +// should fail. + +package main + +import "unsafe" + +var s = "hello" + +func main() { + println(s) + *(*struct { + p *byte + l int + })(unsafe.Pointer(&s)).p = 'H' + println(s) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthello.go b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthello.go new file mode 100644 index 0000000000000000000000000000000000000000..c8d82466dc9d75850006ee9fada06c0a45fbc2c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthello.go @@ -0,0 +1,20 @@ +package main + +import "fmt" + +func main() { + Println("hello, world") + if flag { +//line fmthello.go:999999 + Println("bad line") + for { + } + } +} + +//go:noinline +func Println(s string) { + fmt.Println(s) +} + +var flag bool diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthellocgo.go b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthellocgo.go new file mode 100644 index 0000000000000000000000000000000000000000..6555c3bacfa7990754ca2523db38c56d5d442f48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/fmthellocgo.go @@ -0,0 +1,21 @@ +package main + +import "fmt" +import "C" + +func main() { + Println("hello, world") + if flag { +//line fmthello.go:999999 + Println("bad line") + for { + } + } +} + +//go:noinline +func Println(s string) { + fmt.Println(s) +} + +var flag bool diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/go116.o b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/go116.o new file mode 100644 index 0000000000000000000000000000000000000000..6434d5c8cffe54de18af70cf255f2da061e4b1cb Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/go116.o differ diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/a.go b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/a.go new file mode 100644 index 0000000000000000000000000000000000000000..2729ae0abf0f3efb7220d05cf1e53ac8551b4cc5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/a.go @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func A() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/b.go b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/b.go new file mode 100644 index 0000000000000000000000000000000000000000..a632aafe7b07a6e9bc0b95af50e86d3a1fa58f4c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/b.go @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func B() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/c.go b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/c.go new file mode 100644 index 0000000000000000000000000000000000000000..d73efa73157183626cca73fdca81bb093dbbffdb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/c.go @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func C() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/go.mod b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..f0bd6b760310694449581dc1a63db19d8e444dd2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/testdata/testfilenum/go.mod @@ -0,0 +1,3 @@ +module cmd/objdump/testdata/testfilenum + +go 1.16 diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/testdata/cpu.go b/platform/dbops/binaries/go/go/src/cmd/pprof/testdata/cpu.go new file mode 100644 index 0000000000000000000000000000000000000000..5b682870db8f69ad5ee1ef539c8dcb505c596e58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/testdata/cpu.go @@ -0,0 +1,41 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "fmt" + "os" + "runtime/pprof" + "time" +) + +var output = flag.String("output", "", "pprof profile output file") + +func main() { + flag.Parse() + if *output == "" { + fmt.Fprintf(os.Stderr, "usage: %s -output file.pprof\n", os.Args[0]) + os.Exit(2) + } + + f, err := os.Create(*output) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + defer f.Close() + + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + defer pprof.StopCPUProfile() + + // Spin for long enough to collect some samples. + start := time.Now() + for time.Since(start) < time.Second { + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/gen.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/gen.go new file mode 100644 index 0000000000000000000000000000000000000000..f6a4bb643b2c1aa7a744c091e6b064a65579d1ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/gen.go @@ -0,0 +1,394 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "strings" +) + +// generator is an interface for generating a JSON trace for the trace viewer +// from a trace. Each method in this interface is a handler for a kind of event +// that is interesting to render in the UI via the JSON trace. +type generator interface { + // Global parts. + Sync() // Notifies the generator of an EventSync event. + StackSample(ctx *traceContext, ev *tracev2.Event) + GlobalRange(ctx *traceContext, ev *tracev2.Event) + GlobalMetric(ctx *traceContext, ev *tracev2.Event) + + // Goroutine parts. + GoroutineLabel(ctx *traceContext, ev *tracev2.Event) + GoroutineRange(ctx *traceContext, ev *tracev2.Event) + GoroutineTransition(ctx *traceContext, ev *tracev2.Event) + + // Proc parts. + ProcRange(ctx *traceContext, ev *tracev2.Event) + ProcTransition(ctx *traceContext, ev *tracev2.Event) + + // User annotations. + Log(ctx *traceContext, ev *tracev2.Event) + + // Finish indicates the end of the trace and finalizes generation. + Finish(ctx *traceContext) +} + +// runGenerator produces a trace into ctx by running the generator over the parsed trace. +func runGenerator(ctx *traceContext, g generator, parsed *parsedTrace, opts *genOpts) { + for i := range parsed.events { + ev := &parsed.events[i] + + switch ev.Kind() { + case tracev2.EventSync: + g.Sync() + case tracev2.EventStackSample: + g.StackSample(ctx, ev) + case tracev2.EventRangeBegin, tracev2.EventRangeActive, tracev2.EventRangeEnd: + r := ev.Range() + switch r.Scope.Kind { + case tracev2.ResourceGoroutine: + g.GoroutineRange(ctx, ev) + case tracev2.ResourceProc: + g.ProcRange(ctx, ev) + case tracev2.ResourceNone: + g.GlobalRange(ctx, ev) + } + case tracev2.EventMetric: + g.GlobalMetric(ctx, ev) + case tracev2.EventLabel: + l := ev.Label() + if l.Resource.Kind == tracev2.ResourceGoroutine { + g.GoroutineLabel(ctx, ev) + } + case tracev2.EventStateTransition: + switch ev.StateTransition().Resource.Kind { + case tracev2.ResourceProc: + g.ProcTransition(ctx, ev) + case tracev2.ResourceGoroutine: + g.GoroutineTransition(ctx, ev) + } + case tracev2.EventLog: + g.Log(ctx, ev) + } + } + for i, task := range opts.tasks { + emitTask(ctx, task, i) + if opts.mode&traceviewer.ModeGoroutineOriented != 0 { + for _, region := range task.Regions { + emitRegion(ctx, region) + } + } + } + g.Finish(ctx) +} + +// emitTask emits information about a task into the trace viewer's event stream. +// +// sortIndex sets the order in which this task will appear related to other tasks, +// lowest first. +func emitTask(ctx *traceContext, task *trace.UserTaskSummary, sortIndex int) { + // Collect information about the task. + var startStack, endStack tracev2.Stack + var startG, endG tracev2.GoID + startTime, endTime := ctx.startTime, ctx.endTime + if task.Start != nil { + startStack = task.Start.Stack() + startG = task.Start.Goroutine() + startTime = task.Start.Time() + } + if task.End != nil { + endStack = task.End.Stack() + endG = task.End.Goroutine() + endTime = task.End.Time() + } + arg := struct { + ID uint64 `json:"id"` + StartG uint64 `json:"start_g,omitempty"` + EndG uint64 `json:"end_g,omitempty"` + }{ + ID: uint64(task.ID), + StartG: uint64(startG), + EndG: uint64(endG), + } + + // Emit the task slice and notify the emitter of the task. + ctx.Task(uint64(task.ID), fmt.Sprintf("T%d %s", task.ID, task.Name), sortIndex) + ctx.TaskSlice(traceviewer.SliceEvent{ + Name: task.Name, + Ts: ctx.elapsed(startTime), + Dur: endTime.Sub(startTime), + Resource: uint64(task.ID), + Stack: ctx.Stack(viewerFrames(startStack)), + EndStack: ctx.Stack(viewerFrames(endStack)), + Arg: arg, + }) + // Emit an arrow from the parent to the child. + if task.Parent != nil && task.Start != nil && task.Start.Kind() == tracev2.EventTaskBegin { + ctx.TaskArrow(traceviewer.ArrowEvent{ + Name: "newTask", + Start: ctx.elapsed(task.Start.Time()), + End: ctx.elapsed(task.Start.Time()), + FromResource: uint64(task.Parent.ID), + ToResource: uint64(task.ID), + FromStack: ctx.Stack(viewerFrames(task.Start.Stack())), + }) + } +} + +// emitRegion emits goroutine-based slice events to the UI. The caller +// must be emitting for a goroutine-oriented trace. +// +// TODO(mknyszek): Make regions part of the regular generator loop and +// treat them like ranges so that we can emit regions in traces oriented +// by proc or thread. +func emitRegion(ctx *traceContext, region *trace.UserRegionSummary) { + if region.Name == "" { + return + } + // Collect information about the region. + var startStack, endStack tracev2.Stack + goroutine := tracev2.NoGoroutine + startTime, endTime := ctx.startTime, ctx.endTime + if region.Start != nil { + startStack = region.Start.Stack() + startTime = region.Start.Time() + goroutine = region.Start.Goroutine() + } + if region.End != nil { + endStack = region.End.Stack() + endTime = region.End.Time() + goroutine = region.End.Goroutine() + } + if goroutine == tracev2.NoGoroutine { + return + } + arg := struct { + TaskID uint64 `json:"taskid"` + }{ + TaskID: uint64(region.TaskID), + } + ctx.AsyncSlice(traceviewer.AsyncSliceEvent{ + SliceEvent: traceviewer.SliceEvent{ + Name: region.Name, + Ts: ctx.elapsed(startTime), + Dur: endTime.Sub(startTime), + Resource: uint64(goroutine), + Stack: ctx.Stack(viewerFrames(startStack)), + EndStack: ctx.Stack(viewerFrames(endStack)), + Arg: arg, + }, + Category: "Region", + Scope: fmt.Sprintf("%x", region.TaskID), + TaskColorIndex: uint64(region.TaskID), + }) +} + +// Building blocks for generators. + +// stackSampleGenerator implements a generic handler for stack sample events. +// The provided resource is the resource the stack sample should count against. +type stackSampleGenerator[R resource] struct { + // getResource is a function to extract a resource ID from a stack sample event. + getResource func(*tracev2.Event) R +} + +// StackSample implements a stack sample event handler. It expects ev to be one such event. +func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *tracev2.Event) { + id := g.getResource(ev) + if id == R(noResource) { + // We have nowhere to put this in the UI. + return + } + ctx.Instant(traceviewer.InstantEvent{ + Name: "CPU profile sample", + Ts: ctx.elapsed(ev.Time()), + Resource: uint64(id), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + }) +} + +// globalRangeGenerator implements a generic handler for EventRange* events that pertain +// to tracev2.ResourceNone (the global scope). +type globalRangeGenerator struct { + ranges map[string]activeRange + seenSync bool +} + +// Sync notifies the generator of an EventSync event. +func (g *globalRangeGenerator) Sync() { + g.seenSync = true +} + +// GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone. +// It expects ev to be one such event. +func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *tracev2.Event) { + if g.ranges == nil { + g.ranges = make(map[string]activeRange) + } + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()} + case tracev2.EventRangeActive: + // If we've seen a Sync event, then Active events are always redundant. + if !g.seenSync { + // Otherwise, they extend back to the start of the trace. + g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()} + } + case tracev2.EventRangeEnd: + // Only emit GC events, because we have nowhere to + // put other events. + ar := g.ranges[r.Name] + if strings.Contains(r.Name, "GC") { + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ev.Time().Sub(ar.time), + Resource: trace.GCP, + Stack: ctx.Stack(viewerFrames(ar.stack)), + EndStack: ctx.Stack(viewerFrames(ev.Stack())), + }) + } + delete(g.ranges, r.Name) + } +} + +// Finish flushes any outstanding ranges at the end of the trace. +func (g *globalRangeGenerator) Finish(ctx *traceContext) { + for name, ar := range g.ranges { + if !strings.Contains(name, "GC") { + continue + } + ctx.Slice(traceviewer.SliceEvent{ + Name: name, + Ts: ctx.elapsed(ar.time), + Dur: ctx.endTime.Sub(ar.time), + Resource: trace.GCP, + Stack: ctx.Stack(viewerFrames(ar.stack)), + }) + } +} + +// globalMetricGenerator implements a generic handler for Metric events. +type globalMetricGenerator struct { +} + +// GlobalMetric implements an event handler for EventMetric events. ev must be one such event. +func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *tracev2.Event) { + m := ev.Metric() + switch m.Name { + case "/memory/classes/heap/objects:bytes": + ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64()) + case "/gc/heap/goal:bytes": + ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64()) + case "/sched/gomaxprocs:threads": + ctx.Gomaxprocs(m.Value.Uint64()) + } +} + +// procRangeGenerator implements a generic handler for EventRange* events whose Scope.Kind is +// ResourceProc. +type procRangeGenerator struct { + ranges map[tracev2.Range]activeRange + seenSync bool +} + +// Sync notifies the generator of an EventSync event. +func (g *procRangeGenerator) Sync() { + g.seenSync = true +} + +// ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc. +// It expects ev to be one such event. +func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + if g.ranges == nil { + g.ranges = make(map[tracev2.Range]activeRange) + } + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.ranges[r] = activeRange{ev.Time(), ev.Stack()} + case tracev2.EventRangeActive: + // If we've seen a Sync event, then Active events are always redundant. + if !g.seenSync { + // Otherwise, they extend back to the start of the trace. + g.ranges[r] = activeRange{ctx.startTime, ev.Stack()} + } + case tracev2.EventRangeEnd: + // Emit proc-based ranges. + ar := g.ranges[r] + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ev.Time().Sub(ar.time), + Resource: uint64(r.Scope.Proc()), + Stack: ctx.Stack(viewerFrames(ar.stack)), + EndStack: ctx.Stack(viewerFrames(ev.Stack())), + }) + delete(g.ranges, r) + } +} + +// Finish flushes any outstanding ranges at the end of the trace. +func (g *procRangeGenerator) Finish(ctx *traceContext) { + for r, ar := range g.ranges { + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ctx.endTime.Sub(ar.time), + Resource: uint64(r.Scope.Proc()), + Stack: ctx.Stack(viewerFrames(ar.stack)), + }) + } +} + +// activeRange represents an active EventRange* range. +type activeRange struct { + time tracev2.Time + stack tracev2.Stack +} + +// completedRange represents a completed EventRange* range. +type completedRange struct { + name string + startTime tracev2.Time + endTime tracev2.Time + startStack tracev2.Stack + endStack tracev2.Stack + arg any +} + +type logEventGenerator[R resource] struct { + // getResource is a function to extract a resource ID from a Log event. + getResource func(*tracev2.Event) R +} + +// Log implements a log event handler. It expects ev to be one such event. +func (g *logEventGenerator[R]) Log(ctx *traceContext, ev *tracev2.Event) { + id := g.getResource(ev) + if id == R(noResource) { + // We have nowhere to put this in the UI. + return + } + + // Construct the name to present. + log := ev.Log() + name := log.Message + if log.Category != "" { + name = "[" + log.Category + "] " + name + } + + // Emit an instant event. + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ev.Time()), + Category: "user event", + Resource: uint64(id), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutinegen.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutinegen.go new file mode 100644 index 0000000000000000000000000000000000000000..c76bd8487ae64f3fbde0e87446fcf93ca2cfee59 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutinegen.go @@ -0,0 +1,167 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + tracev2 "internal/trace/v2" +) + +var _ generator = &goroutineGenerator{} + +type goroutineGenerator struct { + globalRangeGenerator + globalMetricGenerator + stackSampleGenerator[tracev2.GoID] + logEventGenerator[tracev2.GoID] + + gStates map[tracev2.GoID]*gState[tracev2.GoID] + focus tracev2.GoID + filter map[tracev2.GoID]struct{} +} + +func newGoroutineGenerator(ctx *traceContext, focus tracev2.GoID, filter map[tracev2.GoID]struct{}) *goroutineGenerator { + gg := new(goroutineGenerator) + rg := func(ev *tracev2.Event) tracev2.GoID { + return ev.Goroutine() + } + gg.stackSampleGenerator.getResource = rg + gg.logEventGenerator.getResource = rg + gg.gStates = make(map[tracev2.GoID]*gState[tracev2.GoID]) + gg.focus = focus + gg.filter = filter + + // Enable a filter on the emitter. + if filter != nil { + ctx.SetResourceFilter(func(resource uint64) bool { + _, ok := filter[tracev2.GoID(resource)] + return ok + }) + } + return gg +} + +func (g *goroutineGenerator) Sync() { + g.globalRangeGenerator.Sync() +} + +func (g *goroutineGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *goroutineGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *goroutineGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.GoID](goID) + g.gStates[goID] = gs + } + + // Try to augment the name of the goroutine. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from.Executing() && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to.Executing() { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, goID, ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Goroutine(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Goroutine(), ev.Stack()) + } + if from == tracev2.GoSyscall && to != tracev2.GoRunning { + // Exiting blocked syscall. + gs.syscallEnd(ev.Time(), true, ctx) + gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx) + } else if from == tracev2.GoSyscall { + // Check if we're exiting a syscall in a non-blocking way. + gs.syscallEnd(ev.Time(), false, ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no G or P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, goID, ev.Stack()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *goroutineGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges + // that overlap with a goroutine's execution. +} + +func (g *goroutineGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + // Not needed. All relevant information for goroutines can be derived from goroutine transitions. +} + +func (g *goroutineGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("G") + + // Finish off global ranges. + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for id, gs := range g.gStates { + gs.finish(ctx) + + // Tell the emitter about the goroutines we want to render. + ctx.Resource(uint64(id), gs.name()) + } + + // Set the goroutine to focus on. + if g.focus != tracev2.NoGoroutine { + ctx.Focus(uint64(g.focus)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutines.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutines.go new file mode 100644 index 0000000000000000000000000000000000000000..3cf366635af178f367bb63430f4626f459db423e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/goroutines.go @@ -0,0 +1,420 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Goroutine-related profiles. + +package trace + +import ( + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "log" + "net/http" + "slices" + "sort" + "strings" + "time" +) + +// GoroutinesHandlerFunc returns a HandlerFunc that serves list of goroutine groups. +func GoroutinesHandlerFunc(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // goroutineGroup describes a group of goroutines grouped by name. + type goroutineGroup struct { + Name string // Start function. + N int // Total number of goroutines in this group. + ExecTime time.Duration // Total execution time of all goroutines in this group. + } + // Accumulate groups by Name. + groupsByName := make(map[string]goroutineGroup) + for _, summary := range summaries { + group := groupsByName[summary.Name] + group.Name = summary.Name + group.N++ + group.ExecTime += summary.ExecTime + groupsByName[summary.Name] = group + } + var groups []goroutineGroup + for _, group := range groupsByName { + groups = append(groups, group) + } + slices.SortFunc(groups, func(a, b goroutineGroup) int { + return cmp.Compare(b.ExecTime, a.ExecTime) + }) + w.Header().Set("Content-Type", "text/html;charset=utf-8") + if err := templGoroutines.Execute(w, groups); err != nil { + log.Printf("failed to execute template: %v", err) + return + } + } +} + +var templGoroutines = template.Must(template.New("").Parse(` + + + +

Goroutines

+Below is a table of all goroutines in the trace grouped by start location and sorted by the total execution time of the group.
+
+Click a start location to view more details about that group.
+
+ + + + + + +{{range $}} + + + + + +{{end}} +
Start locationCountTotal execution time
{{or .Name "(Inactive, no stack trace sampled)"}}{{.N}}{{.ExecTime}}
+ + +`)) + +// GoroutineHandler creates a handler that serves information about +// goroutines in a particular group. +func GoroutineHandler(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + goroutineName := r.FormValue("name") + + type goroutine struct { + *trace.GoroutineSummary + NonOverlappingStats map[string]time.Duration + HasRangeTime bool + } + + // Collect all the goroutines in the group. + var ( + goroutines []goroutine + name string + totalExecTime, execTime time.Duration + maxTotalTime time.Duration + ) + validNonOverlappingStats := make(map[string]struct{}) + validRangeStats := make(map[string]struct{}) + for _, summary := range summaries { + totalExecTime += summary.ExecTime + + if summary.Name != goroutineName { + continue + } + nonOverlappingStats := summary.NonOverlappingStats() + for name := range nonOverlappingStats { + validNonOverlappingStats[name] = struct{}{} + } + var totalRangeTime time.Duration + for name, dt := range summary.RangeTime { + validRangeStats[name] = struct{}{} + totalRangeTime += dt + } + goroutines = append(goroutines, goroutine{ + GoroutineSummary: summary, + NonOverlappingStats: nonOverlappingStats, + HasRangeTime: totalRangeTime != 0, + }) + name = summary.Name + execTime += summary.ExecTime + if maxTotalTime < summary.TotalTime { + maxTotalTime = summary.TotalTime + } + } + + // Compute the percent of total execution time these goroutines represent. + execTimePercent := "" + if totalExecTime > 0 { + execTimePercent = fmt.Sprintf("%.2f%%", float64(execTime)/float64(totalExecTime)*100) + } + + // Sort. + sortBy := r.FormValue("sortby") + if _, ok := validNonOverlappingStats[sortBy]; ok { + slices.SortFunc(goroutines, func(a, b goroutine) int { + return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy]) + }) + } else { + // Sort by total time by default. + slices.SortFunc(goroutines, func(a, b goroutine) int { + return cmp.Compare(b.TotalTime, a.TotalTime) + }) + } + + // Write down all the non-overlapping stats and sort them. + allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats)) + for name := range validNonOverlappingStats { + allNonOverlappingStats = append(allNonOverlappingStats, name) + } + slices.SortFunc(allNonOverlappingStats, func(a, b string) int { + if a == b { + return 0 + } + if a == "Execution time" { + return -1 + } + if b == "Execution time" { + return 1 + } + return cmp.Compare(a, b) + }) + + // Write down all the range stats and sort them. + allRangeStats := make([]string, 0, len(validRangeStats)) + for name := range validRangeStats { + allRangeStats = append(allRangeStats, name) + } + sort.Strings(allRangeStats) + + err := templGoroutine.Execute(w, struct { + Name string + N int + ExecTimePercent string + MaxTotal time.Duration + Goroutines []goroutine + NonOverlappingStats []string + RangeStats []string + }{ + Name: name, + N: len(goroutines), + ExecTimePercent: execTimePercent, + MaxTotal: maxTotalTime, + Goroutines: goroutines, + NonOverlappingStats: allNonOverlappingStats, + RangeStats: allRangeStats, + }) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +func stat2Color(statName string) string { + color := "#636363" + if strings.HasPrefix(statName, "Block time") { + color = "#d01c8b" + } + switch statName { + case "Sched wait time": + color = "#2c7bb6" + case "Syscall execution time": + color = "#7b3294" + case "Execution time": + color = "#d7191c" + } + return color +} + +var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{ + "percent": func(dividend, divisor time.Duration) template.HTML { + if divisor == 0 { + return "" + } + return template.HTML(fmt.Sprintf("(%.1f%%)", float64(dividend)/float64(divisor)*100)) + }, + "headerStyle": func(statName string) template.HTMLAttr { + return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName))) + }, + "barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr { + width := "0" + if divisor != 0 { + width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100) + } + return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName))) + }, +}).Parse(` + +Goroutines: {{.Name}} + + + + +

Goroutines

+ +Table of contents + + +

Summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Goroutine start location:{{.Name}}
Count:{{.N}}
Execution Time:{{.ExecTimePercent}} of total program execution time
Network wait profile: graph (download)
Sync block profile: graph (download)
Syscall profile: graph (download)
Scheduler wait profile: graph (download)
+ +

Breakdown

+ +The table below breaks down where each goroutine is spent its time during the +traced period. +All of the columns except total time are non-overlapping. +
+
+ + + + + + +{{range $.NonOverlappingStats}} + +{{end}} + +{{range .Goroutines}} + + + + + {{$Goroutine := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Goroutine.NonOverlappingStats .}} + + {{end}} + +{{end}} +
Goroutine Total {{.}}
{{.ID}} {{ .TotalTime.String }} +
+ {{$Goroutine := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Goroutine.NonOverlappingStats .}} + {{if $Time}} +   + {{end}} + {{end}} +
+
{{$Time.String}}
+ +

Special ranges

+ +The table below describes how much of the traced period each goroutine spent in +certain special time ranges. +If a goroutine has spent no time in any special time ranges, it is excluded from +the table. +For example, how much time it spent helping the GC. Note that these times do +overlap with the times from the first table. +In general the goroutine may not be executing in these special time ranges. +For example, it may have blocked while trying to help the GC. +This must be taken into account when interpreting the data. +
+
+ + + + + +{{range $.RangeStats}} + +{{end}} + +{{range .Goroutines}} + {{if .HasRangeTime}} + + + + {{$Goroutine := .}} + {{range $.RangeStats}} + {{$Time := index $Goroutine.RangeTime .}} + + {{end}} + + {{end}} +{{end}} +
Goroutine Total {{.}}
{{.ID}} {{ .TotalTime.String }} {{$Time.String}}
+`)) diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/gstate.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/gstate.go new file mode 100644 index 0000000000000000000000000000000000000000..aeba7ecbc17a7ffc4ba68711a1a91ab2b5c35ef7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/gstate.go @@ -0,0 +1,373 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" + "strings" +) + +// resource is a generic constraint interface for resource IDs. +type resource interface { + tracev2.GoID | tracev2.ProcID | tracev2.ThreadID +} + +// noResource indicates the lack of a resource. +const noResource = -1 + +// gState represents the trace viewer state of a goroutine in a trace. +// +// The type parameter on this type is the resource which is used to construct +// a timeline of events. e.g. R=ProcID for a proc-oriented view, R=GoID for +// a goroutine-oriented view, etc. +type gState[R resource] struct { + baseName string + named bool // Whether baseName has been set. + label string // EventLabel extension. + isSystemG bool + + executing R // The resource this goroutine is executing on. (Could be itself.) + + // lastStopStack is the stack trace at the point of the last + // call to the stop method. This tends to be a more reliable way + // of picking up stack traces, since the parser doesn't provide + // a stack for every state transition event. + lastStopStack tracev2.Stack + + // activeRanges is the set of all active ranges on the goroutine. + activeRanges map[string]activeRange + + // completedRanges is a list of ranges that completed since before the + // goroutine stopped executing. These are flushed on every stop or block. + completedRanges []completedRange + + // startRunning is the most recent event that caused a goroutine to + // transition to GoRunning. + startRunningTime tracev2.Time + + // startSyscall is the most recent event that caused a goroutine to + // transition to GoSyscall. + syscall struct { + time tracev2.Time + stack tracev2.Stack + active bool + } + + // startBlockReason is the StateTransition.Reason of the most recent + // event that caused a gorotuine to transition to GoWaiting. + startBlockReason string + + // startCause is the event that allowed this goroutine to start running. + // It's used to generate flow events. This is typically something like + // an unblock event or a goroutine creation event. + // + // startCause.resource is the resource on which startCause happened, but is + // listed separately because the cause may have happened on a resource that + // isn't R (or perhaps on some abstract nebulous resource, like trace.NetpollP). + startCause struct { + time tracev2.Time + name string + resource uint64 + stack tracev2.Stack + } +} + +// newGState constructs a new goroutine state for the goroutine +// identified by the provided ID. +func newGState[R resource](goID tracev2.GoID) *gState[R] { + return &gState[R]{ + baseName: fmt.Sprintf("G%d", goID), + executing: R(noResource), + activeRanges: make(map[string]activeRange), + } +} + +// augmentName attempts to use stk to augment the name of the goroutine +// with stack information. This stack must be related to the goroutine +// in some way, but it doesn't really matter which stack. +func (gs *gState[R]) augmentName(stk tracev2.Stack) { + if gs.named { + return + } + if stk == tracev2.NoStack { + return + } + name := lastFunc(stk) + gs.baseName += fmt.Sprintf(" %s", name) + gs.named = true + gs.isSystemG = trace.IsSystemGoroutine(name) +} + +// setLabel adds an additional label to the goroutine's name. +func (gs *gState[R]) setLabel(label string) { + gs.label = label +} + +// name returns a name for the goroutine. +func (gs *gState[R]) name() string { + name := gs.baseName + if gs.label != "" { + name += " (" + gs.label + ")" + } + return name +} + +// setStartCause sets the reason a goroutine will be allowed to start soon. +// For example, via unblocking or exiting a blocked syscall. +func (gs *gState[R]) setStartCause(ts tracev2.Time, name string, resource uint64, stack tracev2.Stack) { + gs.startCause.time = ts + gs.startCause.name = name + gs.startCause.resource = resource + gs.startCause.stack = stack +} + +// created indicates that this goroutine was just created by the provided creator. +func (gs *gState[R]) created(ts tracev2.Time, creator R, stack tracev2.Stack) { + if creator == R(noResource) { + return + } + gs.setStartCause(ts, "go", uint64(creator), stack) +} + +// start indicates that a goroutine has started running on a proc. +func (gs *gState[R]) start(ts tracev2.Time, resource R, ctx *traceContext) { + // Set the time for all the active ranges. + for name := range gs.activeRanges { + gs.activeRanges[name] = activeRange{ts, tracev2.NoStack} + } + + if gs.startCause.name != "" { + // It has a start cause. Emit a flow event. + ctx.Arrow(traceviewer.ArrowEvent{ + Name: gs.startCause.name, + Start: ctx.elapsed(gs.startCause.time), + End: ctx.elapsed(ts), + FromResource: uint64(gs.startCause.resource), + ToResource: uint64(resource), + FromStack: ctx.Stack(viewerFrames(gs.startCause.stack)), + }) + gs.startCause.time = 0 + gs.startCause.name = "" + gs.startCause.resource = 0 + gs.startCause.stack = tracev2.NoStack + } + gs.executing = resource + gs.startRunningTime = ts +} + +// syscallBegin indicates that the goroutine entered a syscall on a proc. +func (gs *gState[R]) syscallBegin(ts tracev2.Time, resource R, stack tracev2.Stack) { + gs.syscall.time = ts + gs.syscall.stack = stack + gs.syscall.active = true + if gs.executing == R(noResource) { + gs.executing = resource + gs.startRunningTime = ts + } +} + +// syscallEnd ends the syscall slice, wherever the syscall is at. This is orthogonal +// to blockedSyscallEnd -- both must be called when a syscall ends and that syscall +// blocked. They're kept separate because syscallEnd indicates the point at which the +// goroutine is no longer executing on the resource (e.g. a proc) whereas blockedSyscallEnd +// is the point at which the goroutine actually exited the syscall regardless of which +// resource that happened on. +func (gs *gState[R]) syscallEnd(ts tracev2.Time, blocked bool, ctx *traceContext) { + if !gs.syscall.active { + return + } + blockString := "no" + if blocked { + blockString = "yes" + } + gs.completedRanges = append(gs.completedRanges, completedRange{ + name: "syscall", + startTime: gs.syscall.time, + endTime: ts, + startStack: gs.syscall.stack, + arg: format.BlockedArg{Blocked: blockString}, + }) + gs.syscall.active = false + gs.syscall.time = 0 + gs.syscall.stack = tracev2.NoStack +} + +// blockedSyscallEnd indicates the point at which the blocked syscall ended. This is distinct +// and orthogonal to syscallEnd; both must be called if the syscall blocked. This sets up an instant +// to emit a flow event from, indicating explicitly that this goroutine was unblocked by the system. +func (gs *gState[R]) blockedSyscallEnd(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) { + name := "exit blocked syscall" + gs.setStartCause(ts, name, trace.SyscallP, stack) + + // Emit an syscall exit instant event for the "Syscall" lane. + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ts), + Resource: trace.SyscallP, + Stack: ctx.Stack(viewerFrames(stack)), + }) +} + +// unblock indicates that the goroutine gs represents has been unblocked. +func (gs *gState[R]) unblock(ts tracev2.Time, stack tracev2.Stack, resource R, ctx *traceContext) { + name := "unblock" + viewerResource := uint64(resource) + if gs.startBlockReason != "" { + name = fmt.Sprintf("%s (%s)", name, gs.startBlockReason) + } + if strings.Contains(gs.startBlockReason, "network") { + // Attribute the network instant to the nebulous "NetpollP" if + // resource isn't a thread, because there's a good chance that + // resource isn't going to be valid in this case. + // + // TODO(mknyszek): Handle this invalidness in a more general way. + if _, ok := any(resource).(tracev2.ThreadID); !ok { + // Emit an unblock instant event for the "Network" lane. + viewerResource = trace.NetpollP + } + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ts), + Resource: viewerResource, + Stack: ctx.Stack(viewerFrames(stack)), + }) + } + gs.startBlockReason = "" + if viewerResource != 0 { + gs.setStartCause(ts, name, viewerResource, stack) + } +} + +// block indicates that the goroutine has stopped executing on a proc -- specifically, +// it blocked for some reason. +func (gs *gState[R]) block(ts tracev2.Time, stack tracev2.Stack, reason string, ctx *traceContext) { + gs.startBlockReason = reason + gs.stop(ts, stack, ctx) +} + +// stop indicates that the goroutine has stopped executing on a proc. +func (gs *gState[R]) stop(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) { + // Emit the execution time slice. + var stk int + if gs.lastStopStack != tracev2.NoStack { + stk = ctx.Stack(viewerFrames(gs.lastStopStack)) + } + // Check invariants. + if gs.startRunningTime == 0 { + panic("silently broken trace or generator invariant (startRunningTime != 0) not held") + } + if gs.executing == R(noResource) { + panic("non-executing goroutine stopped") + } + ctx.Slice(traceviewer.SliceEvent{ + Name: gs.name(), + Ts: ctx.elapsed(gs.startRunningTime), + Dur: ts.Sub(gs.startRunningTime), + Resource: uint64(gs.executing), + Stack: stk, + }) + + // Flush completed ranges. + for _, cr := range gs.completedRanges { + ctx.Slice(traceviewer.SliceEvent{ + Name: cr.name, + Ts: ctx.elapsed(cr.startTime), + Dur: cr.endTime.Sub(cr.startTime), + Resource: uint64(gs.executing), + Stack: ctx.Stack(viewerFrames(cr.startStack)), + EndStack: ctx.Stack(viewerFrames(cr.endStack)), + Arg: cr.arg, + }) + } + gs.completedRanges = gs.completedRanges[:0] + + // Continue in-progress ranges. + for name, r := range gs.activeRanges { + // Check invariant. + if r.time == 0 { + panic("silently broken trace or generator invariant (activeRanges time != 0) not held") + } + ctx.Slice(traceviewer.SliceEvent{ + Name: name, + Ts: ctx.elapsed(r.time), + Dur: ts.Sub(r.time), + Resource: uint64(gs.executing), + Stack: ctx.Stack(viewerFrames(r.stack)), + }) + } + + // Clear the range info. + for name := range gs.activeRanges { + gs.activeRanges[name] = activeRange{0, tracev2.NoStack} + } + + gs.startRunningTime = 0 + gs.lastStopStack = stack + gs.executing = R(noResource) +} + +// finalize writes out any in-progress slices as if the goroutine stopped. +// This must only be used once the trace has been fully processed and no +// further events will be processed. This method may leave the gState in +// an inconsistent state. +func (gs *gState[R]) finish(ctx *traceContext) { + if gs.executing != R(noResource) { + gs.syscallEnd(ctx.endTime, false, ctx) + gs.stop(ctx.endTime, tracev2.NoStack, ctx) + } +} + +// rangeBegin indicates the start of a special range of time. +func (gs *gState[R]) rangeBegin(ts tracev2.Time, name string, stack tracev2.Stack) { + if gs.executing != R(noResource) { + // If we're executing, start the slice from here. + gs.activeRanges[name] = activeRange{ts, stack} + } else { + // If the goroutine isn't executing, there's no place for + // us to create a slice from. Wait until it starts executing. + gs.activeRanges[name] = activeRange{0, stack} + } +} + +// rangeActive indicates that a special range of time has been in progress. +func (gs *gState[R]) rangeActive(name string) { + if gs.executing != R(noResource) { + // If we're executing, and the range is active, then start + // from wherever the goroutine started running from. + gs.activeRanges[name] = activeRange{gs.startRunningTime, tracev2.NoStack} + } else { + // If the goroutine isn't executing, there's no place for + // us to create a slice from. Wait until it starts executing. + gs.activeRanges[name] = activeRange{0, tracev2.NoStack} + } +} + +// rangeEnd indicates the end of a special range of time. +func (gs *gState[R]) rangeEnd(ts tracev2.Time, name string, stack tracev2.Stack, ctx *traceContext) { + if gs.executing != R(noResource) { + r := gs.activeRanges[name] + gs.completedRanges = append(gs.completedRanges, completedRange{ + name: name, + startTime: r.time, + endTime: ts, + startStack: r.stack, + endStack: stack, + }) + } + delete(gs.activeRanges, name) +} + +func lastFunc(s tracev2.Stack) string { + var last tracev2.StackFrame + s.Frames(func(f tracev2.StackFrame) bool { + last = f + return true + }) + return last.Func +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace.go new file mode 100644 index 0000000000000000000000000000000000000000..e4ca613678adff809e8eddffcfc7908e74db1c78 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace.go @@ -0,0 +1,229 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "cmp" + "log" + "math" + "net/http" + "slices" + "strconv" + "time" + + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" +) + +func JSONTraceHandler(parsed *parsedTrace) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + opts := defaultGenOpts() + + switch r.FormValue("view") { + case "thread": + opts.mode = traceviewer.ModeThreadOriented + } + if goids := r.FormValue("goid"); goids != "" { + // Render trace focused on a particular goroutine. + + id, err := strconv.ParseUint(goids, 10, 64) + if err != nil { + log.Printf("failed to parse goid parameter %q: %v", goids, err) + return + } + goid := tracev2.GoID(id) + g, ok := parsed.summary.Goroutines[goid] + if !ok { + log.Printf("failed to find goroutine %d", goid) + return + } + opts.mode = traceviewer.ModeGoroutineOriented + if g.StartTime != 0 { + opts.startTime = g.StartTime.Sub(parsed.startTime()) + } else { + opts.startTime = 0 + } + if g.EndTime != 0 { + opts.endTime = g.EndTime.Sub(parsed.startTime()) + } else { // The goroutine didn't end. + opts.endTime = parsed.endTime().Sub(parsed.startTime()) + } + opts.focusGoroutine = goid + opts.goroutines = trace.RelatedGoroutinesV2(parsed.events, goid) + } else if taskids := r.FormValue("focustask"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse focustask parameter %q: %v", taskids, err) + return + } + task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)] + if !ok || (task.Start == nil && task.End == nil) { + log.Printf("failed to find task with id %d", taskid) + return + } + opts.setTask(parsed, task) + } else if taskids := r.FormValue("taskid"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse taskid parameter %q: %v", taskids, err) + return + } + task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)] + if !ok { + log.Printf("failed to find task with id %d", taskid) + return + } + // This mode is goroutine-oriented. + opts.mode = traceviewer.ModeGoroutineOriented + opts.setTask(parsed, task) + + // Pick the goroutine to orient ourselves around by just + // trying to pick the earliest event in the task that makes + // any sense. Though, we always want the start if that's there. + var firstEv *tracev2.Event + if task.Start != nil { + firstEv = task.Start + } else { + for _, logEv := range task.Logs { + if firstEv == nil || logEv.Time() < firstEv.Time() { + firstEv = logEv + } + } + if task.End != nil && (firstEv == nil || task.End.Time() < firstEv.Time()) { + firstEv = task.End + } + } + if firstEv == nil || firstEv.Goroutine() == tracev2.NoGoroutine { + log.Printf("failed to find task with id %d", taskid) + return + } + + // Set the goroutine filtering options. + goid := firstEv.Goroutine() + opts.focusGoroutine = goid + goroutines := make(map[tracev2.GoID]struct{}) + for _, task := range opts.tasks { + // Find only directly involved goroutines. + for id := range task.Goroutines { + goroutines[id] = struct{}{} + } + } + opts.goroutines = goroutines + } + + // Parse start and end options. Both or none must be present. + start := int64(0) + end := int64(math.MaxInt64) + if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" { + var err error + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + log.Printf("failed to parse start parameter %q: %v", startStr, err) + return + } + + end, err = strconv.ParseInt(endStr, 10, 64) + if err != nil { + log.Printf("failed to parse end parameter %q: %v", endStr, err) + return + } + } + + c := traceviewer.ViewerDataTraceConsumer(w, start, end) + if err := generateTrace(parsed, opts, c); err != nil { + log.Printf("failed to generate trace: %v", err) + } + }) +} + +// traceContext is a wrapper around a traceviewer.Emitter with some additional +// information that's useful to most parts of trace viewer JSON emission. +type traceContext struct { + *traceviewer.Emitter + startTime tracev2.Time + endTime tracev2.Time +} + +// elapsed returns the elapsed time between the trace time and the start time +// of the trace. +func (ctx *traceContext) elapsed(now tracev2.Time) time.Duration { + return now.Sub(ctx.startTime) +} + +type genOpts struct { + mode traceviewer.Mode + startTime time.Duration + endTime time.Duration + + // Used if mode != 0. + focusGoroutine tracev2.GoID + goroutines map[tracev2.GoID]struct{} // Goroutines to be displayed for goroutine-oriented or task-oriented view. goroutines[0] is the main goroutine. + tasks []*trace.UserTaskSummary +} + +// setTask sets a task to focus on. +func (opts *genOpts) setTask(parsed *parsedTrace, task *trace.UserTaskSummary) { + opts.mode |= traceviewer.ModeTaskOriented + if task.Start != nil { + opts.startTime = task.Start.Time().Sub(parsed.startTime()) + } else { // The task started before the trace did. + opts.startTime = 0 + } + if task.End != nil { + opts.endTime = task.End.Time().Sub(parsed.startTime()) + } else { // The task didn't end. + opts.endTime = parsed.endTime().Sub(parsed.startTime()) + } + opts.tasks = task.Descendents() + slices.SortStableFunc(opts.tasks, func(a, b *trace.UserTaskSummary) int { + aStart, bStart := parsed.startTime(), parsed.startTime() + if a.Start != nil { + aStart = a.Start.Time() + } + if b.Start != nil { + bStart = b.Start.Time() + } + if a.Start != b.Start { + return cmp.Compare(aStart, bStart) + } + // Break ties with the end time. + aEnd, bEnd := parsed.endTime(), parsed.endTime() + if a.End != nil { + aEnd = a.End.Time() + } + if b.End != nil { + bEnd = b.End.Time() + } + return cmp.Compare(aEnd, bEnd) + }) +} + +func defaultGenOpts() *genOpts { + return &genOpts{ + startTime: time.Duration(0), + endTime: time.Duration(math.MaxInt64), + } +} + +func generateTrace(parsed *parsedTrace, opts *genOpts, c traceviewer.TraceConsumer) error { + ctx := &traceContext{ + Emitter: traceviewer.NewEmitter(c, opts.startTime, opts.endTime), + startTime: parsed.events[0].Time(), + endTime: parsed.events[len(parsed.events)-1].Time(), + } + defer ctx.Flush() + + var g generator + if opts.mode&traceviewer.ModeGoroutineOriented != 0 { + g = newGoroutineGenerator(ctx, opts.focusGoroutine, opts.goroutines) + } else if opts.mode&traceviewer.ModeThreadOriented != 0 { + g = newThreadGenerator() + } else { + g = newProcGenerator() + } + runGenerator(ctx, g, parsed, opts) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace_test.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace_test.go new file mode 100644 index 0000000000000000000000000000000000000000..65ce041c4faabeb0817ca97daf8063389bd1f371 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/jsontrace_test.go @@ -0,0 +1,291 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "encoding/json" + tracev1 "internal/trace" + "io" + "net/http/httptest" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "testing" + "time" + + "internal/trace/traceviewer/format" + "internal/trace/v2/raw" +) + +func TestJSONTraceHandler(t *testing.T) { + testPaths, err := filepath.Glob("./testdata/*.test") + if err != nil { + t.Fatalf("discovering tests: %v", err) + } + for _, testPath := range testPaths { + t.Run(filepath.Base(testPath), func(t *testing.T) { + parsed := getTestTrace(t, testPath) + data := recordJSONTraceHandlerResponse(t, parsed) + // TODO(mknyszek): Check that there's one at most goroutine per proc at any given time. + checkExecutionTimes(t, data) + checkPlausibleHeapMetrics(t, data) + // TODO(mknyszek): Check for plausible thread and goroutine metrics. + checkMetaNamesEmitted(t, data, "process_name", []string{"STATS", "PROCS"}) + checkMetaNamesEmitted(t, data, "thread_name", []string{"GC", "Network", "Timers", "Syscalls", "Proc 0"}) + checkProcStartStop(t, data) + checkSyscalls(t, data) + checkNetworkUnblock(t, data) + // TODO(mknyszek): Check for flow events. + }) + } +} + +func checkSyscalls(t *testing.T, data format.Data) { + data = filterViewerTrace(data, + filterEventName("syscall"), + filterStackRootFunc("main.blockingSyscall")) + if len(data.Events) <= 1 { + t.Errorf("got %d events, want > 1", len(data.Events)) + } + data = filterViewerTrace(data, filterBlocked("yes")) + if len(data.Events) != 1 { + t.Errorf("got %d events, want 1", len(data.Events)) + } +} + +type eventFilterFn func(*format.Event, *format.Data) bool + +func filterEventName(name string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + return e.Name == name + } +} + +// filterGoRoutineName returns an event filter that returns true if the event's +// goroutine name is equal to name. +func filterGoRoutineName(name string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + return parseGoroutineName(e) == name + } +} + +// parseGoroutineName returns the goroutine name from the event's name field. +// E.g. if e.Name is "G42 main.cpu10", this returns "main.cpu10". +func parseGoroutineName(e *format.Event) string { + parts := strings.SplitN(e.Name, " ", 2) + if len(parts) != 2 || !strings.HasPrefix(parts[0], "G") { + return "" + } + return parts[1] +} + +// filterBlocked returns an event filter that returns true if the event's +// "blocked" argument is equal to blocked. +func filterBlocked(blocked string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + m, ok := e.Arg.(map[string]any) + if !ok { + return false + } + return m["blocked"] == blocked + } +} + +// filterStackRootFunc returns an event filter that returns true if the function +// at the root of the stack trace is named name. +func filterStackRootFunc(name string) eventFilterFn { + return func(e *format.Event, data *format.Data) bool { + frames := stackFrames(data, e.Stack) + rootFrame := frames[len(frames)-1] + return strings.HasPrefix(rootFrame, name+":") + } +} + +// filterViewerTrace returns a copy of data with only the events that pass all +// of the given filters. +func filterViewerTrace(data format.Data, fns ...eventFilterFn) (filtered format.Data) { + filtered = data + filtered.Events = nil + for _, e := range data.Events { + keep := true + for _, fn := range fns { + keep = keep && fn(e, &filtered) + } + if keep { + filtered.Events = append(filtered.Events, e) + } + } + return +} + +func stackFrames(data *format.Data, stackID int) (frames []string) { + for { + frame, ok := data.Frames[strconv.Itoa(stackID)] + if !ok { + return + } + frames = append(frames, frame.Name) + stackID = frame.Parent + } +} + +func checkProcStartStop(t *testing.T, data format.Data) { + procStarted := map[uint64]bool{} + for _, e := range data.Events { + if e.Name == "proc start" { + if procStarted[e.TID] == true { + t.Errorf("proc started twice: %d", e.TID) + } + procStarted[e.TID] = true + } + if e.Name == "proc stop" { + if procStarted[e.TID] == false { + t.Errorf("proc stopped twice: %d", e.TID) + } + procStarted[e.TID] = false + } + } + if got, want := len(procStarted), 8; got != want { + t.Errorf("wrong number of procs started/stopped got=%d want=%d", got, want) + } +} + +func checkNetworkUnblock(t *testing.T, data format.Data) { + count := 0 + var netBlockEv *format.Event + for _, e := range data.Events { + if e.TID == tracev1.NetpollP && e.Name == "unblock (network)" && e.Phase == "I" && e.Scope == "t" { + count++ + netBlockEv = e + } + } + if netBlockEv == nil { + t.Error("failed to find a network unblock") + } + if count == 0 { + t.Errorf("found zero network block events, want at least one") + } + // TODO(mknyszek): Check for the flow of this event to some slice event of a goroutine running. +} + +func checkExecutionTimes(t *testing.T, data format.Data) { + cpu10 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu10"))) + cpu20 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu20"))) + if cpu10 <= 0 || cpu20 <= 0 || cpu10 >= cpu20 { + t.Errorf("bad execution times: cpu10=%v, cpu20=%v", cpu10, cpu20) + } +} + +func checkMetaNamesEmitted(t *testing.T, data format.Data, category string, want []string) { + t.Helper() + names := metaEventNameArgs(category, data) + for _, wantName := range want { + if !slices.Contains(names, wantName) { + t.Errorf("%s: names=%v, want %q", category, names, wantName) + } + } +} + +func metaEventNameArgs(category string, data format.Data) (names []string) { + for _, e := range data.Events { + if e.Name == category && e.Phase == "M" { + names = append(names, e.Arg.(map[string]any)["name"].(string)) + } + } + return +} + +func checkPlausibleHeapMetrics(t *testing.T, data format.Data) { + hms := heapMetrics(data) + var nonZeroAllocated, nonZeroNextGC bool + for _, hm := range hms { + if hm.Allocated > 0 { + nonZeroAllocated = true + } + if hm.NextGC > 0 { + nonZeroNextGC = true + } + } + + if !nonZeroAllocated { + t.Errorf("nonZeroAllocated=%v, want true", nonZeroAllocated) + } + if !nonZeroNextGC { + t.Errorf("nonZeroNextGC=%v, want true", nonZeroNextGC) + } +} + +func heapMetrics(data format.Data) (metrics []format.HeapCountersArg) { + for _, e := range data.Events { + if e.Phase == "C" && e.Name == "Heap" { + j, _ := json.Marshal(e.Arg) + var metric format.HeapCountersArg + json.Unmarshal(j, &metric) + metrics = append(metrics, metric) + } + } + return +} + +func recordJSONTraceHandlerResponse(t *testing.T, parsed *parsedTrace) format.Data { + h := JSONTraceHandler(parsed) + recorder := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/jsontrace", nil) + h.ServeHTTP(recorder, r) + + var data format.Data + if err := json.Unmarshal(recorder.Body.Bytes(), &data); err != nil { + t.Fatal(err) + } + return data +} + +func sumExecutionTime(data format.Data) (sum time.Duration) { + for _, e := range data.Events { + sum += time.Duration(e.Dur) * time.Microsecond + } + return +} + +func getTestTrace(t *testing.T, testPath string) *parsedTrace { + t.Helper() + + // First read in the text trace and write it out as bytes. + f, err := os.Open(testPath) + if err != nil { + t.Fatalf("failed to open test %s: %v", testPath, err) + } + r, err := raw.NewTextReader(f) + if err != nil { + t.Fatalf("failed to read test %s: %v", testPath, err) + } + var trace bytes.Buffer + w, err := raw.NewWriter(&trace, r.Version()) + if err != nil { + t.Fatalf("failed to write out test %s: %v", testPath, err) + } + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("failed to read test %s: %v", testPath, err) + } + if err := w.WriteEvent(ev); err != nil { + t.Fatalf("failed to write out test %s: %v", testPath, err) + } + } + + // Parse the test trace. + parsed, err := parseTrace(&trace) + if err != nil { + t.Fatalf("failed to parse trace: %v", err) + } + return parsed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/main.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/main.go new file mode 100644 index 0000000000000000000000000000000000000000..0a60ef04db0bccedb136bb61b9d786236e19c1cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/main.go @@ -0,0 +1,190 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "io" + "log" + "net" + "net/http" + "os" + + "internal/trace/v2/raw" + + "cmd/internal/browser" +) + +// Main is the main function for cmd/trace v2. +func Main(traceFile, httpAddr, pprof string, debug int) error { + tracef, err := os.Open(traceFile) + if err != nil { + return fmt.Errorf("failed to read trace file: %w", err) + } + defer tracef.Close() + + // Debug flags. + switch debug { + case 1: + return debugProcessedEvents(tracef) + case 2: + return debugRawEvents(tracef) + } + + ln, err := net.Listen("tcp", httpAddr) + if err != nil { + return fmt.Errorf("failed to create server socket: %w", err) + } + addr := "http://" + ln.Addr().String() + + log.Print("Preparing trace for viewer...") + parsed, err := parseTrace(tracef) + if err != nil { + return err + } + // N.B. tracef not needed after this point. + // We might double-close, but that's fine; we ignore the error. + tracef.Close() + + log.Print("Splitting trace for viewer...") + ranges, err := splitTrace(parsed) + if err != nil { + return err + } + + log.Printf("Opening browser. Trace viewer is listening on %s", addr) + browser.Open(addr) + + mutatorUtil := func(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) { + return trace.MutatorUtilizationV2(parsed.events, flags), nil + } + + mux := http.NewServeMux() + + // Main endpoint. + mux.Handle("/", traceviewer.MainHandler([]traceviewer.View{ + {Type: traceviewer.ViewProc, Ranges: ranges}, + // N.B. Use the same ranges for threads. It takes a long time to compute + // the split a second time, but the makeup of the events are similar enough + // that this is still a good split. + {Type: traceviewer.ViewThread, Ranges: ranges}, + })) + + // Catapult handlers. + mux.Handle("/trace", traceviewer.TraceHandler()) + mux.Handle("/jsontrace", JSONTraceHandler(parsed)) + mux.Handle("/static/", traceviewer.StaticHandler()) + + // Goroutines handlers. + mux.HandleFunc("/goroutines", GoroutinesHandlerFunc(parsed.summary.Goroutines)) + mux.HandleFunc("/goroutine", GoroutineHandler(parsed.summary.Goroutines)) + + // MMU handler. + mux.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil)) + + // Basic pprof endpoints. + mux.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO(), parsed))) + mux.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock(), parsed))) + mux.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall(), parsed))) + mux.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched(), parsed))) + + // Region-based pprof endpoints. + mux.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO(), parsed))) + mux.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock(), parsed))) + mux.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall(), parsed))) + mux.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched(), parsed))) + + // Region endpoints. + mux.HandleFunc("/userregions", UserRegionsHandlerFunc(parsed)) + mux.HandleFunc("/userregion", UserRegionHandlerFunc(parsed)) + + // Task endpoints. + mux.HandleFunc("/usertasks", UserTasksHandlerFunc(parsed)) + mux.HandleFunc("/usertask", UserTaskHandlerFunc(parsed)) + + err = http.Serve(ln, mux) + return fmt.Errorf("failed to start http server: %w", err) +} + +type parsedTrace struct { + events []tracev2.Event + summary *trace.Summary +} + +func parseTrace(tr io.Reader) (*parsedTrace, error) { + r, err := tracev2.NewReader(tr) + if err != nil { + return nil, fmt.Errorf("failed to create trace reader: %w", err) + } + s := trace.NewSummarizer() + t := new(parsedTrace) + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("failed to read event: %w", err) + } + t.events = append(t.events, ev) + s.Event(&t.events[len(t.events)-1]) + } + t.summary = s.Finalize() + return t, nil +} + +func (t *parsedTrace) startTime() tracev2.Time { + return t.events[0].Time() +} + +func (t *parsedTrace) endTime() tracev2.Time { + return t.events[len(t.events)-1].Time() +} + +// splitTrace splits the trace into a number of ranges, each resulting in approx 100 MiB of +// json output (the trace viewer can hardly handle more). +func splitTrace(parsed *parsedTrace) ([]traceviewer.Range, error) { + // TODO(mknyszek): Split traces by generation by doing a quick first pass over the + // trace to identify all the generation boundaries. + s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100 MiB + if err := generateTrace(parsed, defaultGenOpts(), c); err != nil { + return nil, err + } + return s.Ranges, nil +} + +func debugProcessedEvents(trace io.Reader) error { + tr, err := tracev2.NewReader(trace) + if err != nil { + return err + } + for { + ev, err := tr.ReadEvent() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + fmt.Println(ev.String()) + } +} + +func debugRawEvents(trace io.Reader) error { + rr, err := raw.NewReader(trace) + if err != nil { + return err + } + for { + ev, err := rr.ReadEvent() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + fmt.Println(ev.String()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/pprof.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/pprof.go new file mode 100644 index 0000000000000000000000000000000000000000..05895eda3dee8f87f13bce35115999ebf3ef2d49 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/pprof.go @@ -0,0 +1,336 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Serving of pprof-like profiles. + +package trace + +import ( + "cmp" + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "net/http" + "slices" + "strings" + "time" +) + +func pprofByGoroutine(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + name := r.FormValue("name") + gToIntervals, err := pprofMatchingGoroutines(name, t) + if err != nil { + return nil, err + } + return compute(gToIntervals, t.events) + } +} + +func pprofByRegion(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + filter, err := newRegionFilter(r) + if err != nil { + return nil, err + } + gToIntervals, err := pprofMatchingRegions(filter, t) + if err != nil { + return nil, err + } + return compute(gToIntervals, t.events) + } +} + +// pprofMatchingGoroutines returns the ids of goroutines of the matching name and its interval. +// If the id string is empty, returns nil without an error. +func pprofMatchingGoroutines(name string, t *parsedTrace) (map[tracev2.GoID][]interval, error) { + res := make(map[tracev2.GoID][]interval) + for _, g := range t.summary.Goroutines { + if g.Name != name { + continue + } + endTime := g.EndTime + if g.EndTime == 0 { + endTime = t.endTime() // Use the trace end time, since the goroutine is still live then. + } + res[g.ID] = []interval{{start: g.StartTime, end: endTime}} + } + if len(res) == 0 { + return nil, fmt.Errorf("failed to find matching goroutines for name: %s", name) + } + return res, nil +} + +// pprofMatchingRegions returns the time intervals of matching regions +// grouped by the goroutine id. If the filter is nil, returns nil without an error. +func pprofMatchingRegions(filter *regionFilter, t *parsedTrace) (map[tracev2.GoID][]interval, error) { + if filter == nil { + return nil, nil + } + + gToIntervals := make(map[tracev2.GoID][]interval) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + if !filter.match(t, r) { + continue + } + gToIntervals[g.ID] = append(gToIntervals[g.ID], regionInterval(t, r)) + } + } + + for g, intervals := range gToIntervals { + // In order to remove nested regions and + // consider only the outermost regions, + // first, we sort based on the start time + // and then scan through to select only the outermost regions. + slices.SortFunc(intervals, func(a, b interval) int { + if c := cmp.Compare(a.start, b.start); c != 0 { + return c + } + return cmp.Compare(a.end, b.end) + }) + var lastTimestamp tracev2.Time + var n int + // Select only the outermost regions. + for _, i := range intervals { + if lastTimestamp <= i.start { + intervals[n] = i // new non-overlapping region starts. + lastTimestamp = i.end + n++ + } + // Otherwise, skip because this region overlaps with a previous region. + } + gToIntervals[g] = intervals[:n] + } + return gToIntervals, nil +} + +type computePprofFunc func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error) + +// computePprofIO returns a computePprofFunc that generates IO pprof-like profile (time spent in +// IO wait, currently only network blocking event). +func computePprofIO() computePprofFunc { + return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool { + return reason == "network" + }) +} + +// computePprofBlock returns a computePprofFunc that generates blocking pprof-like profile +// (time spent blocked on synchronization primitives). +func computePprofBlock() computePprofFunc { + return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool { + return strings.Contains(reason, "chan") || strings.Contains(reason, "sync") || strings.Contains(reason, "select") + }) +} + +// computePprofSyscall returns a computePprofFunc that generates a syscall pprof-like +// profile (time spent in syscalls). +func computePprofSyscall() computePprofFunc { + return makeComputePprofFunc(tracev2.GoSyscall, func(_ string) bool { + return true + }) +} + +// computePprofSched returns a computePprofFunc that generates a scheduler latency pprof-like profile +// (time between a goroutine become runnable and actually scheduled for execution). +func computePprofSched() computePprofFunc { + return makeComputePprofFunc(tracev2.GoRunnable, func(_ string) bool { + return true + }) +} + +// makeComputePprofFunc returns a computePprofFunc that generates a profile of time goroutines spend +// in a particular state for the specified reasons. +func makeComputePprofFunc(state tracev2.GoState, trackReason func(string) bool) computePprofFunc { + return func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error) { + stacks := newStackMap() + tracking := make(map[tracev2.GoID]*tracev2.Event) + for i := range events { + ev := &events[i] + + // Filter out any non-state-transitions and events without stacks. + if ev.Kind() != tracev2.EventStateTransition { + continue + } + stack := ev.Stack() + if stack == tracev2.NoStack { + continue + } + + // The state transition has to apply to a goroutine. + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + continue + } + id := st.Resource.Goroutine() + _, new := st.Goroutine() + + // Check if we're tracking this goroutine. + startEv := tracking[id] + if startEv == nil { + // We're not. Start tracking if the new state + // matches what we want and the transition is + // for one of the reasons we care about. + if new == state && trackReason(st.Reason) { + tracking[id] = ev + } + continue + } + // We're tracking this goroutine. + if new == state { + // We're tracking this goroutine, but it's just transitioning + // to the same state (this is a no-ip + continue + } + // The goroutine has transitioned out of the state we care about, + // so remove it from tracking and record the stack. + delete(tracking, id) + + overlapping := pprofOverlappingDuration(gToIntervals, id, interval{startEv.Time(), ev.Time()}) + if overlapping > 0 { + rec := stacks.getOrAdd(startEv.Stack()) + rec.Count++ + rec.Time += overlapping + } + } + return stacks.profile(), nil + } +} + +// pprofOverlappingDuration returns the overlapping duration between +// the time intervals in gToIntervals and the specified event. +// If gToIntervals is nil, this simply returns the event's duration. +func pprofOverlappingDuration(gToIntervals map[tracev2.GoID][]interval, id tracev2.GoID, sample interval) time.Duration { + if gToIntervals == nil { // No filtering. + return sample.duration() + } + intervals := gToIntervals[id] + if len(intervals) == 0 { + return 0 + } + + var overlapping time.Duration + for _, i := range intervals { + if o := i.overlap(sample); o > 0 { + overlapping += o + } + } + return overlapping +} + +// interval represents a time interval in the trace. +type interval struct { + start, end tracev2.Time +} + +func (i interval) duration() time.Duration { + return i.end.Sub(i.start) +} + +func (i1 interval) overlap(i2 interval) time.Duration { + // Assume start1 <= end1 and start2 <= end2 + if i1.end < i2.start || i2.end < i1.start { + return 0 + } + if i1.start < i2.start { // choose the later one + i1.start = i2.start + } + if i1.end > i2.end { // choose the earlier one + i1.end = i2.end + } + return i1.duration() +} + +// pprofMaxStack is the extent of the deduplication we're willing to do. +// +// Because slices aren't comparable and we want to leverage maps for deduplication, +// we have to choose a fixed constant upper bound on the amount of frames we want +// to support. In practice this is fine because there's a maximum depth to these +// stacks anyway. +const pprofMaxStack = 128 + +// stackMap is a map of tracev2.Stack to some value V. +type stackMap struct { + // stacks contains the full list of stacks in the set, however + // it is insufficient for deduplication because tracev2.Stack + // equality is only optimistic. If two tracev2.Stacks are equal, + // then they are guaranteed to be equal in content. If they are + // not equal, then they might still be equal in content. + stacks map[tracev2.Stack]*traceviewer.ProfileRecord + + // pcs is the source-of-truth for deduplication. It is a map of + // the actual PCs in the stack to a tracev2.Stack. + pcs map[[pprofMaxStack]uint64]tracev2.Stack +} + +func newStackMap() *stackMap { + return &stackMap{ + stacks: make(map[tracev2.Stack]*traceviewer.ProfileRecord), + pcs: make(map[[pprofMaxStack]uint64]tracev2.Stack), + } +} + +func (m *stackMap) getOrAdd(stack tracev2.Stack) *traceviewer.ProfileRecord { + // Fast path: check to see if this exact stack is already in the map. + if rec, ok := m.stacks[stack]; ok { + return rec + } + // Slow path: the stack may still be in the map. + + // Grab the stack's PCs as the source-of-truth. + var pcs [pprofMaxStack]uint64 + pcsForStack(stack, &pcs) + + // Check the source-of-truth. + var rec *traceviewer.ProfileRecord + if existing, ok := m.pcs[pcs]; ok { + // In the map. + rec = m.stacks[existing] + delete(m.stacks, existing) + } else { + // Not in the map. + rec = new(traceviewer.ProfileRecord) + } + // Insert regardless of whether we have a match in m.pcs. + // Even if we have a match, we want to keep the newest version + // of that stack, since we're much more likely tos see it again + // as we iterate through the trace linearly. Simultaneously, we + // are likely to never see the old stack again. + m.pcs[pcs] = stack + m.stacks[stack] = rec + return rec +} + +func (m *stackMap) profile() []traceviewer.ProfileRecord { + prof := make([]traceviewer.ProfileRecord, 0, len(m.stacks)) + for stack, record := range m.stacks { + rec := *record + i := 0 + stack.Frames(func(frame tracev2.StackFrame) bool { + rec.Stack = append(rec.Stack, &trace.Frame{ + PC: frame.PC, + Fn: frame.Func, + File: frame.File, + Line: int(frame.Line), + }) + i++ + // Cut this off at pprofMaxStack because that's as far + // as our deduplication goes. + return i < pprofMaxStack + }) + prof = append(prof, rec) + } + return prof +} + +// pcsForStack extracts the first pprofMaxStack PCs from stack into pcs. +func pcsForStack(stack tracev2.Stack, pcs *[pprofMaxStack]uint64) { + i := 0 + stack.Frames(func(frame tracev2.StackFrame) bool { + pcs[i] = frame.PC + i++ + return i < len(pcs) + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/procgen.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/procgen.go new file mode 100644 index 0000000000000000000000000000000000000000..41e379527f1eaa8cc688f9c6af9e8550797bd6aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/procgen.go @@ -0,0 +1,212 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" +) + +var _ generator = &procGenerator{} + +type procGenerator struct { + globalRangeGenerator + globalMetricGenerator + procRangeGenerator + stackSampleGenerator[tracev2.ProcID] + logEventGenerator[tracev2.ProcID] + + gStates map[tracev2.GoID]*gState[tracev2.ProcID] + inSyscall map[tracev2.ProcID]*gState[tracev2.ProcID] + maxProc tracev2.ProcID +} + +func newProcGenerator() *procGenerator { + pg := new(procGenerator) + rg := func(ev *tracev2.Event) tracev2.ProcID { + return ev.Proc() + } + pg.stackSampleGenerator.getResource = rg + pg.logEventGenerator.getResource = rg + pg.gStates = make(map[tracev2.GoID]*gState[tracev2.ProcID]) + pg.inSyscall = make(map[tracev2.ProcID]*gState[tracev2.ProcID]) + return pg +} + +func (g *procGenerator) Sync() { + g.globalRangeGenerator.Sync() + g.procRangeGenerator.Sync() +} + +func (g *procGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *procGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *procGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.ProcID](goID) + g.gStates[goID] = gs + } + // If we haven't already named this goroutine, try to name it. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from == tracev2.GoRunning && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to == tracev2.GoRunning { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, ev.Proc(), ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine was unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Proc(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Proc(), ev.Stack()) + } + if from == tracev2.GoSyscall && to != tracev2.GoRunning { + // Goroutine exited a blocked syscall. + gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall && ev.Proc() != tracev2.NoProc { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, ev.Proc(), ev.Stack()) + g.inSyscall[ev.Proc()] = gs + } + // Check if we're exiting a non-blocking syscall. + _, didNotBlock := g.inSyscall[ev.Proc()] + if from == tracev2.GoSyscall && didNotBlock { + gs.syscallEnd(ev.Time(), false, ctx) + delete(g.inSyscall, ev.Proc()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *procGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + proc := st.Resource.Proc() + + g.maxProc = max(g.maxProc, proc) + viewerEv := traceviewer.InstantEvent{ + Resource: uint64(proc), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + } + + from, to := st.Proc() + if from == to { + // Filter out no-op events. + return + } + if to.Executing() { + start := ev.Time() + if from == tracev2.ProcUndetermined { + start = ctx.startTime + } + viewerEv.Name = "proc start" + viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} + viewerEv.Ts = ctx.elapsed(start) + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) + } + if from.Executing() { + start := ev.Time() + viewerEv.Name = "proc stop" + viewerEv.Ts = ctx.elapsed(start) + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1) + + // Check if this proc was in a syscall before it stopped. + // This means the syscall blocked. We need to emit it to the + // viewer at this point because we only display the time the + // syscall occupied a P when the viewer is in per-P mode. + // + // TODO(mknyszek): We could do better in a per-M mode because + // all events have to happen on *some* thread, and in v2 traces + // we know what that thread is. + gs, ok := g.inSyscall[proc] + if ok { + // Emit syscall slice for blocked syscall. + gs.syscallEnd(start, true, ctx) + gs.stop(start, ev.Stack(), ctx) + delete(g.inSyscall, proc) + } + } + // TODO(mknyszek): Consider modeling procs differently and have them be + // transition to and from NotExist when GOMAXPROCS changes. We can emit + // events for this to clearly delineate GOMAXPROCS changes. + + if viewerEv.Name != "" { + ctx.Instant(viewerEv) + } +} + +func (g *procGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("PROCS") + + // Finish off ranges first. It doesn't really matter for the global ranges, + // but the proc ranges need to either be a subset of a goroutine slice or + // their own slice entirely. If the former, it needs to end first. + g.procRangeGenerator.Finish(ctx) + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for _, gs := range g.gStates { + gs.finish(ctx) + } + + // Name all the procs to the emitter. + for i := uint64(0); i <= uint64(g.maxProc); i++ { + ctx.Resource(i, fmt.Sprintf("Proc %v", i)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/regions.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/regions.go new file mode 100644 index 0000000000000000000000000000000000000000..5d04fd2ae5151e758eb60645a6f8295dc7fd9660 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/regions.go @@ -0,0 +1,529 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "net/http" + "net/url" + "slices" + "sort" + "strconv" + "strings" + "time" +) + +// UserTasksHandlerFunc returns a HandlerFunc that reports all regions found in the trace. +func UserRegionsHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Summarize all the regions. + summary := make(map[regionFingerprint]regionStats) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + id := fingerprintRegion(r) + stats, ok := summary[id] + if !ok { + stats.regionFingerprint = id + } + stats.add(t, r) + summary[id] = stats + } + } + // Sort regions by PC and name. + userRegions := make([]regionStats, 0, len(summary)) + for _, stats := range summary { + userRegions = append(userRegions, stats) + } + slices.SortFunc(userRegions, func(a, b regionStats) int { + if c := cmp.Compare(a.Type, b.Type); c != 0 { + return c + } + return cmp.Compare(a.Frame.PC, b.Frame.PC) + }) + // Emit table. + err := templUserRegionTypes.Execute(w, userRegions) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +// regionFingerprint is a way to categorize regions that goes just one step beyond the region's Type +// by including the top stack frame. +type regionFingerprint struct { + Frame tracev2.StackFrame + Type string +} + +func fingerprintRegion(r *trace.UserRegionSummary) regionFingerprint { + return regionFingerprint{ + Frame: regionTopStackFrame(r), + Type: r.Name, + } +} + +func regionTopStackFrame(r *trace.UserRegionSummary) tracev2.StackFrame { + var frame tracev2.StackFrame + if r.Start != nil && r.Start.Stack() != tracev2.NoStack { + r.Start.Stack().Frames(func(f tracev2.StackFrame) bool { + frame = f + return false + }) + } + return frame +} + +type regionStats struct { + regionFingerprint + Histogram traceviewer.TimeHistogram +} + +func (s *regionStats) UserRegionURL() func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/userregion?type=%s&pc=%x&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), s.Frame.PC, template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *regionStats) add(t *parsedTrace, region *trace.UserRegionSummary) { + s.Histogram.Add(regionInterval(t, region).duration()) +} + +var templUserRegionTypes = template.Must(template.New("").Parse(` + +Regions + + +

Regions

+ +Below is a table containing a summary of all the user-defined regions in the trace. +Regions are grouped by the region type and the point at which the region started. +The rightmost column of the table contains a latency histogram for each region group. +Note that this histogram only counts regions that began and ended within the traced +period. +However, the "Count" column includes all regions, including those that only started +or ended during the traced period. +Regions that were active through the trace period were not recorded, and so are not +accounted for at all. +Click on the links to explore a breakdown of time spent for each region by goroutine +and user-defined task. +
+
+ + + + + + + +{{range $}} + + + + + +{{end}} +
Region typeCountDuration distribution (complete tasks)
{{printf "%q" .Type}}
{{.Frame.Func}} @ {{printf "0x%x" .Frame.PC}}
{{.Frame.File}}:{{.Frame.Line}}
{{.Histogram.Count}}{{.Histogram.ToHTML (.UserRegionURL)}}
+ + +`)) + +// UserRegionHandlerFunc returns a HandlerFunc that presents the details of the selected regions. +func UserRegionHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Construct the filter from the request. + filter, err := newRegionFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Collect all the regions with their goroutines. + type region struct { + *trace.UserRegionSummary + Goroutine tracev2.GoID + NonOverlappingStats map[string]time.Duration + HasRangeTime bool + } + var regions []region + var maxTotal time.Duration + validNonOverlappingStats := make(map[string]struct{}) + validRangeStats := make(map[string]struct{}) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + if !filter.match(t, r) { + continue + } + nonOverlappingStats := r.NonOverlappingStats() + for name := range nonOverlappingStats { + validNonOverlappingStats[name] = struct{}{} + } + var totalRangeTime time.Duration + for name, dt := range r.RangeTime { + validRangeStats[name] = struct{}{} + totalRangeTime += dt + } + regions = append(regions, region{ + UserRegionSummary: r, + Goroutine: g.ID, + NonOverlappingStats: nonOverlappingStats, + HasRangeTime: totalRangeTime != 0, + }) + if maxTotal < r.TotalTime { + maxTotal = r.TotalTime + } + } + } + + // Sort. + sortBy := r.FormValue("sortby") + if _, ok := validNonOverlappingStats[sortBy]; ok { + slices.SortFunc(regions, func(a, b region) int { + return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy]) + }) + } else { + // Sort by total time by default. + slices.SortFunc(regions, func(a, b region) int { + return cmp.Compare(b.TotalTime, a.TotalTime) + }) + } + + // Write down all the non-overlapping stats and sort them. + allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats)) + for name := range validNonOverlappingStats { + allNonOverlappingStats = append(allNonOverlappingStats, name) + } + slices.SortFunc(allNonOverlappingStats, func(a, b string) int { + if a == b { + return 0 + } + if a == "Execution time" { + return -1 + } + if b == "Execution time" { + return 1 + } + return cmp.Compare(a, b) + }) + + // Write down all the range stats and sort them. + allRangeStats := make([]string, 0, len(validRangeStats)) + for name := range validRangeStats { + allRangeStats = append(allRangeStats, name) + } + sort.Strings(allRangeStats) + + err = templUserRegionType.Execute(w, struct { + MaxTotal time.Duration + Regions []region + Name string + Filter *regionFilter + NonOverlappingStats []string + RangeStats []string + }{ + MaxTotal: maxTotal, + Regions: regions, + Name: filter.name, + Filter: filter, + NonOverlappingStats: allNonOverlappingStats, + RangeStats: allRangeStats, + }) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{ + "headerStyle": func(statName string) template.HTMLAttr { + return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName))) + }, + "barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr { + width := "0" + if divisor != 0 { + width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100) + } + return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName))) + }, + "filterParams": func(f *regionFilter) template.URL { + return template.URL(f.params.Encode()) + }, +}).Parse(` + +Regions: {{.Name}} + + + + +

Regions: {{.Name}}

+ +Table of contents + + +

Summary

+ +{{ with $p := filterParams .Filter}} + + + + + + + + + + + + + + + + + +
Network wait profile: graph (download)
Sync block profile: graph (download)
Syscall profile: graph (download)
Scheduler wait profile: graph (download)
+{{ end }} + +

Breakdown

+ +The table below breaks down where each goroutine is spent its time during the +traced period. +All of the columns except total time are non-overlapping. +
+
+ + + + + + + +{{range $.NonOverlappingStats}} + +{{end}} + +{{range .Regions}} + + + + + + {{$Region := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Region.NonOverlappingStats .}} + + {{end}} + +{{end}} +
Goroutine Task Total {{.}}
{{.Goroutine}} {{if .TaskID}}{{.TaskID}}{{end}} {{ .TotalTime.String }} +
+ {{$Region := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Region.NonOverlappingStats .}} + {{if $Time}} +   + {{end}} + {{end}} +
+
{{$Time.String}}
+ +

Special ranges

+ +The table below describes how much of the traced period each goroutine spent in +certain special time ranges. +If a goroutine has spent no time in any special time ranges, it is excluded from +the table. +For example, how much time it spent helping the GC. Note that these times do +overlap with the times from the first table. +In general the goroutine may not be executing in these special time ranges. +For example, it may have blocked while trying to help the GC. +This must be taken into account when interpreting the data. +
+
+ + + + + + +{{range $.RangeStats}} + +{{end}} + +{{range .Regions}} + {{if .HasRangeTime}} + + + + + {{$Region := .}} + {{range $.RangeStats}} + {{$Time := index $Region.RangeTime .}} + + {{end}} + + {{end}} +{{end}} +
Goroutine Task Total {{.}}
{{.Goroutine}} {{if .TaskID}}{{.TaskID}}{{end}} {{ .TotalTime.String }} {{$Time.String}}
+`)) + +// regionFilter represents a region filter specified by a user of cmd/trace. +type regionFilter struct { + name string + params url.Values + cond []func(*parsedTrace, *trace.UserRegionSummary) bool +} + +// match returns true if a region, described by its ID and summary, matches +// the filter. +func (f *regionFilter) match(t *parsedTrace, s *trace.UserRegionSummary) bool { + for _, c := range f.cond { + if !c(t, s) { + return false + } + } + return true +} + +// newRegionFilter creates a new region filter from URL query variables. +func newRegionFilter(r *http.Request) (*regionFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(*parsedTrace, *trace.UserRegionSummary) bool + filterParams := make(url.Values) + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, fmt.Sprintf("%q", typ[0])) + conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool { + return r.Name == typ[0] + }) + filterParams.Add("type", typ[0]) + } + if pc, err := strconv.ParseUint(r.FormValue("pc"), 16, 64); err == nil { + encPC := fmt.Sprintf("0x%x", pc) + name = append(name, "@ "+encPC) + conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool { + return regionTopStackFrame(r).PC == pc + }) + filterParams.Add("pc", encPC) + } + + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("(latency >= %s)", lat)) + conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool { + return regionInterval(t, r).duration() >= lat + }) + filterParams.Add("latmin", lat.String()) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("(latency <= %s)", lat)) + conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool { + return regionInterval(t, r).duration() <= lat + }) + filterParams.Add("latmax", lat.String()) + } + + return ®ionFilter{ + name: strings.Join(name, " "), + cond: conditions, + params: filterParams, + }, nil +} + +func regionInterval(t *parsedTrace, s *trace.UserRegionSummary) interval { + var i interval + if s.Start != nil { + i.start = s.Start.Time() + } else { + i.start = t.startTime() + } + if s.End != nil { + i.end = s.End.Time() + } else { + i.end = t.endTime() + } + return i +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/tasks.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/tasks.go new file mode 100644 index 0000000000000000000000000000000000000000..fb40811565e83a7b667548c41eef046314f920cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/tasks.go @@ -0,0 +1,477 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "log" + "net/http" + "slices" + "strings" + "time" +) + +// UserTasksHandlerFunc returns a HandlerFunc that reports all tasks found in the trace. +func UserTasksHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + tasks := t.summary.Tasks + + // Summarize groups of tasks with the same name. + summary := make(map[string]taskStats) + for _, task := range tasks { + stats, ok := summary[task.Name] + if !ok { + stats.Type = task.Name + } + stats.add(task) + summary[task.Name] = stats + } + + // Sort tasks by type. + userTasks := make([]taskStats, 0, len(summary)) + for _, stats := range summary { + userTasks = append(userTasks, stats) + } + slices.SortFunc(userTasks, func(a, b taskStats) int { + return cmp.Compare(a.Type, b.Type) + }) + + // Emit table. + err := templUserTaskTypes.Execute(w, userTasks) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +type taskStats struct { + Type string + Count int // Complete + incomplete tasks + Histogram traceviewer.TimeHistogram // Complete tasks only +} + +func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/usertask?type=%s&complete=%v&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), template.URLQueryEscaper(complete), template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *taskStats) add(task *trace.UserTaskSummary) { + s.Count++ + if task.Complete() { + s.Histogram.Add(task.End.Time().Sub(task.Start.Time())) + } +} + +var templUserTaskTypes = template.Must(template.New("").Parse(` + +Tasks + + +Search log text:

+ + + + + + +{{range $}} + + + + + +{{end}} +
Task typeCountDuration distribution (complete tasks)
{{.Type}}{{.Count}}{{.Histogram.ToHTML (.UserTaskURL true)}}
+ + +`)) + +// UserTaskHandlerFunc returns a HandlerFunc that presents the details of the selected tasks. +func UserTaskHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + filter, err := newTaskFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + type event struct { + WhenString string + Elapsed time.Duration + Goroutine tracev2.GoID + What string + // TODO: include stack trace of creation time + } + type task struct { + WhenString string + ID tracev2.TaskID + Duration time.Duration + Complete bool + Events []event + Start, End time.Duration // Time since the beginning of the trace + GCTime time.Duration + } + var tasks []task + for _, summary := range t.summary.Tasks { + if !filter.match(t, summary) { + continue + } + + // Collect all the events for the task. + var rawEvents []*tracev2.Event + if summary.Start != nil { + rawEvents = append(rawEvents, summary.Start) + } + if summary.End != nil { + rawEvents = append(rawEvents, summary.End) + } + rawEvents = append(rawEvents, summary.Logs...) + for _, r := range summary.Regions { + if r.Start != nil { + rawEvents = append(rawEvents, r.Start) + } + if r.End != nil { + rawEvents = append(rawEvents, r.End) + } + } + + // Sort them. + slices.SortStableFunc(rawEvents, func(a, b *tracev2.Event) int { + return cmp.Compare(a.Time(), b.Time()) + }) + + // Summarize them. + var events []event + last := t.startTime() + for _, ev := range rawEvents { + what := describeEvent(ev) + if what == "" { + continue + } + sinceStart := ev.Time().Sub(t.startTime()) + events = append(events, event{ + WhenString: fmt.Sprintf("%2.9f", sinceStart.Seconds()), + Elapsed: ev.Time().Sub(last), + What: what, + Goroutine: primaryGoroutine(ev), + }) + last = ev.Time() + } + taskSpan := taskInterval(t, summary) + taskStart := taskSpan.start.Sub(t.startTime()) + + // Produce the task summary. + tasks = append(tasks, task{ + WhenString: fmt.Sprintf("%2.9fs", taskStart.Seconds()), + Duration: taskSpan.duration(), + ID: summary.ID, + Complete: summary.Complete(), + Events: events, + Start: taskStart, + End: taskStart + taskSpan.duration(), + }) + } + // Sort the tasks by duration. + slices.SortFunc(tasks, func(a, b task) int { + return cmp.Compare(a.Duration, b.Duration) + }) + + // Emit table. + err = templUserTaskType.Execute(w, struct { + Name string + Tasks []task + }{ + Name: filter.name, + Tasks: tasks, + }) + if err != nil { + log.Printf("failed to execute template: %v", err) + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +var templUserTaskType = template.Must(template.New("userTask").Funcs(template.FuncMap{ + "elapsed": elapsed, + "asMillisecond": asMillisecond, + "trimSpace": strings.TrimSpace, +}).Parse(` + +Tasks: {{.Name}} + + + +

User Task: {{.Name}}

+ +Search log text:
+ +

+ + + + + + + + + {{range $el := $.Tasks}} + + + + + + + {{range $el.Events}} + + + + + + + {{end}} + {{end}} + + +`)) + +// taskFilter represents a task filter specified by a user of cmd/trace. +type taskFilter struct { + name string + cond []func(*parsedTrace, *trace.UserTaskSummary) bool +} + +// match returns true if a task, described by its ID and summary, matches +// the filter. +func (f *taskFilter) match(t *parsedTrace, task *trace.UserTaskSummary) bool { + if t == nil { + return false + } + for _, c := range f.cond { + if !c(t, task) { + return false + } + } + return true +} + +// newTaskFilter creates a new task filter from URL query variables. +func newTaskFilter(r *http.Request) (*taskFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(*parsedTrace, *trace.UserTaskSummary) bool + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, fmt.Sprintf("%q", typ[0])) + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Name == typ[0] + }) + } + if complete := r.FormValue("complete"); complete == "1" { + name = append(name, "complete") + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() + }) + } else if complete == "0" { + name = append(name, "incomplete") + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return !task.Complete() + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("latency >= %s", lat)) + conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() && taskInterval(t, task).duration() >= lat + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("latency <= %s", lat)) + conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() && taskInterval(t, task).duration() <= lat + }) + } + if text := r.FormValue("logtext"); text != "" { + name = append(name, fmt.Sprintf("log contains %q", text)) + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return taskMatches(task, text) + }) + } + + return &taskFilter{name: strings.Join(name, ","), cond: conditions}, nil +} + +func taskInterval(t *parsedTrace, s *trace.UserTaskSummary) interval { + var i interval + if s.Start != nil { + i.start = s.Start.Time() + } else { + i.start = t.startTime() + } + if s.End != nil { + i.end = s.End.Time() + } else { + i.end = t.endTime() + } + return i +} + +func taskMatches(t *trace.UserTaskSummary, text string) bool { + matches := func(s string) bool { + return strings.Contains(s, text) + } + if matches(t.Name) { + return true + } + for _, r := range t.Regions { + if matches(r.Name) { + return true + } + } + for _, ev := range t.Logs { + log := ev.Log() + if matches(log.Category) { + return true + } + if matches(log.Message) { + return true + } + } + return false +} + +func describeEvent(ev *tracev2.Event) string { + switch ev.Kind() { + case tracev2.EventStateTransition: + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + return "" + } + old, new := st.Goroutine() + return fmt.Sprintf("%s -> %s", old, new) + case tracev2.EventRegionBegin: + return fmt.Sprintf("region %q begin", ev.Region().Type) + case tracev2.EventRegionEnd: + return fmt.Sprintf("region %q end", ev.Region().Type) + case tracev2.EventTaskBegin: + t := ev.Task() + return fmt.Sprintf("task %q (D %d, parent %d) begin", t.Type, t.ID, t.Parent) + case tracev2.EventTaskEnd: + return "task end" + case tracev2.EventLog: + log := ev.Log() + if log.Category != "" { + return fmt.Sprintf("log %q", log.Message) + } + return fmt.Sprintf("log (category: %s): %q", log.Category, log.Message) + } + return "" +} + +func primaryGoroutine(ev *tracev2.Event) tracev2.GoID { + if ev.Kind() != tracev2.EventStateTransition { + return ev.Goroutine() + } + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + return tracev2.NoGoroutine + } + return st.Resource.Goroutine() +} + +func elapsed(d time.Duration) string { + b := fmt.Appendf(nil, "%.9f", d.Seconds()) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + return string(b) +} + +func asMillisecond(d time.Duration) float64 { + return float64(d.Nanoseconds()) / float64(time.Millisecond) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/generate.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/generate.go new file mode 100644 index 0000000000000000000000000000000000000000..c0658b2329667c3c341cad241403543cf2e6fa41 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/generate.go @@ -0,0 +1,6 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mktests.go +package testdata diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/go122.test b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/go122.test new file mode 100644 index 0000000000000000000000000000000000000000..2ec9e88f4f68cd6d9ae3d19d767d237245bf8457 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/go122.test @@ -0,0 +1,4639 @@ +Trace Go1.22 +EventBatch gen=1 m=18446744073709551615 time=7689672466239 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=1709048 time=7689670869319 size=423 +ProcStart dt=409 p=7 p_seq=1 +GoStart dt=31 g=34 g_seq=1 +GoStop dt=291990 reason_string=16 stack=50 +GoStart dt=21 g=34 g_seq=2 +GoStop dt=315853 reason_string=16 stack=50 +GoStart dt=30 g=34 g_seq=3 +GoUnblock dt=173432 g=1 g_seq=73 stack=52 +GoDestroy dt=96 +GoStart dt=22 g=1 g_seq=74 +HeapAlloc dt=79 heapalloc_value=26397576 +HeapAlloc dt=51 heapalloc_value=26405640 +GoCreate dt=62 new_g=50 new_stack=53 stack=54 +GoBlock dt=23 reason_string=12 stack=55 +GoStart dt=7 g=50 g_seq=1 +HeapAlloc dt=301 heapalloc_value=26413776 +HeapAlloc dt=30 heapalloc_value=26421680 +GoSyscallBegin dt=35 p_seq=2 stack=56 +GoSyscallEnd dt=39 +GoSyscallBegin dt=13 p_seq=3 stack=57 +GoSyscallEnd dt=16 +GoSyscallBegin dt=396 p_seq=4 stack=58 +GoSyscallEnd dt=16 +GoSyscallBegin dt=15 p_seq=5 stack=59 +GoSyscallEnd dt=14 +HeapAlloc dt=305 heapalloc_value=26429872 +HeapAlloc dt=34 heapalloc_value=26437248 +HeapAlloc dt=42 heapalloc_value=26445120 +GoSyscallBegin dt=42 p_seq=6 stack=60 +GoSyscallEnd dt=18 +GoSyscallBegin dt=10 p_seq=7 stack=61 +GoSyscallEnd dt=14 +GoSyscallBegin dt=23 p_seq=8 stack=62 +ProcStart dt=787251 p=7 p_seq=15 +GoSyscallEndBlocked dt=7 +GoStart dt=1 g=50 g_seq=2 +GoUnblock dt=48 g=1 g_seq=75 stack=65 +GoDestroy dt=143 +GoStart dt=30 g=1 g_seq=76 +HeapAlloc dt=621 heapalloc_value=26468232 +GoStop dt=656 reason_string=16 stack=66 +GoStart dt=103 g=1 g_seq=77 +HeapAlloc dt=42 heapalloc_value=26476424 +HeapAlloc dt=87 heapalloc_value=26484360 +GoSyscallBegin dt=18 p_seq=16 stack=67 +GoSyscallEnd dt=456 +GoSyscallBegin dt=41 p_seq=17 stack=68 +GoSyscallEnd dt=25 +GoSyscallBegin dt=16 p_seq=18 stack=69 +GoSyscallEnd dt=18 +HeapAlloc dt=193 heapalloc_value=26549896 +GoSyscallBegin dt=69 p_seq=19 stack=70 +GoSyscallEnd dt=227 +GoSyscallBegin dt=12 p_seq=20 stack=70 +GoSyscallEnd dt=105 +GoSyscallBegin dt=87 p_seq=21 stack=71 +GoSyscallEnd dt=48 +GoSyscallBegin dt=37 p_seq=22 stack=72 +GoSyscallEnd dt=51 +GoSyscallBegin dt=49 p_seq=23 stack=73 +GoSyscallEnd dt=158 +GoSyscallBegin dt=12 p_seq=24 stack=74 +GoSyscallEnd dt=67 +HeapAlloc dt=126 heapalloc_value=26558088 +HeapAlloc dt=30 heapalloc_value=26566160 +GoCreate dt=34 new_g=52 new_stack=75 stack=76 +HeapAlloc dt=205 heapalloc_value=26573872 +GoSyscallBegin dt=890 p_seq=25 stack=77 +GoSyscallEnd dt=1128 +GoBlock dt=96 reason_string=7 stack=80 +ProcStop dt=29 +ProcStart dt=384 p=6 p_seq=7 +GoStart dt=14 g=52 g_seq=4 +GoSyscallBegin dt=16 p_seq=8 stack=78 +ProcStart dt=160 p=5 p_seq=13 +GoSyscallEndBlocked dt=3 +GoStart dt=1 g=52 g_seq=5 +HeapAlloc dt=297 heapalloc_value=26581840 +HeapAlloc dt=31 heapalloc_value=26590032 +HeapAlloc dt=164 heapalloc_value=26598224 +GoSyscallBegin dt=34 p_seq=14 stack=88 +GoSyscallEnd dt=33 +GoSyscallBegin dt=14 p_seq=15 stack=89 +GoSyscallEnd dt=36 +GoSyscallBegin dt=12 p_seq=16 stack=90 +GoSyscallEnd dt=22 +GoSyscallBegin dt=15 p_seq=17 stack=91 +GoSyscallEnd dt=28 +HeapAlloc dt=18 heapalloc_value=26606416 +HeapAlloc dt=20 heapalloc_value=26614608 +GoBlock dt=16 reason_string=19 stack=92 +ProcStop dt=136 +ProcStart dt=17788 p=6 p_seq=12 +GoUnblock dt=41 g=1 g_seq=80 stack=0 +GoStart dt=136 g=1 g_seq=81 +GoSyscallBegin dt=14 p_seq=13 stack=86 +GoSyscallEnd dt=65 +GoSyscallBegin dt=72 p_seq=14 stack=95 +GoSyscallEnd dt=534 +HeapAlloc dt=284 heapalloc_value=26630992 +HeapAlloc dt=38 heapalloc_value=26639120 +EventBatch gen=1 m=1709047 time=7689670866279 size=202 +ProcStart dt=437 p=6 p_seq=2 +HeapAlloc dt=131 heapalloc_value=26373928 +HeapAlloc dt=368 heapalloc_value=26382120 +HeapAlloc dt=55 heapalloc_value=26390056 +GoStart dt=1030 g=36 g_seq=1 +GoStop dt=293329 reason_string=16 stack=50 +GoStart dt=25 g=36 g_seq=2 +GoStop dt=315834 reason_string=16 stack=50 +GoStart dt=24 g=36 g_seq=3 +GoDestroy dt=172079 +ProcStop dt=60 +ProcStart dt=1749 p=6 p_seq=3 +ProcStop dt=1621 +ProcStart dt=64901 p=5 p_seq=4 +ProcStop dt=24 +ProcStart dt=722061 p=5 p_seq=5 +ProcStop dt=31 +ProcStart dt=2847 p=5 p_seq=8 +ProcStop dt=20 +ProcStart dt=3166 p=7 p_seq=26 +GoUnblock dt=6 g=52 g_seq=3 stack=0 +GoUnblock dt=90 g=1 g_seq=78 stack=0 +GoStart dt=5 g=1 g_seq=79 +GoSyscallBegin dt=31 p_seq=27 stack=81 +GoSyscallEnd dt=35 +GoSyscallBegin dt=134 p_seq=28 stack=82 +GoSyscallEnd dt=29 +GoSyscallBegin dt=17 p_seq=29 stack=83 +GoSyscallEnd dt=30 +GoSyscallBegin dt=8 p_seq=30 stack=84 +GoSyscallEnd dt=19 +GoSyscallBegin dt=11 p_seq=31 stack=85 +GoSyscallEnd dt=24 +GoSyscallBegin dt=65 p_seq=32 stack=86 +GoSyscallEnd dt=57 +GoBlock dt=19 reason_string=7 stack=87 +ProcStop dt=38 +ProcStart dt=458 p=6 p_seq=11 +ProcStop dt=30 +ProcStart dt=377 p=5 p_seq=18 +ProcStop dt=23 +ProcStart dt=17141 p=5 p_seq=19 +GoUnblock dt=19 g=52 g_seq=6 stack=0 +GoStart dt=111 g=52 g_seq=7 +HeapAlloc dt=38 heapalloc_value=26622800 +GoSyscallBegin dt=36 p_seq=20 stack=93 +GoSyscallEnd dt=554 +GoSyscallBegin dt=83 p_seq=21 stack=94 +GoSyscallEnd dt=196 +GoDestroy dt=15 +ProcStop dt=37 +EventBatch gen=1 m=1709046 time=7689670697530 size=167 +ProcStart dt=236 p=5 p_seq=1 +ProcStop dt=281 +ProcStart dt=1683 p=2 p_seq=14 +ProcStop dt=33 +ProcStart dt=147800 p=2 p_seq=16 +ProcStop dt=29 +ProcStart dt=3880 p=1 p_seq=28 +ProcStop dt=30 +ProcStart dt=801175 p=5 p_seq=3 +ProcStop dt=19 +ProcStart dt=47961 p=6 p_seq=4 +ProcStop dt=15 +ProcStart dt=16716 p=6 p_seq=5 +GoUnblock dt=60 g=6 g_seq=2 stack=0 +GoStart dt=90 g=6 g_seq=3 +HeapAlloc dt=193 heapalloc_value=26453304 +GoBlock dt=29 reason_string=12 stack=15 +ProcStop dt=12 +ProcStart dt=704555 p=7 p_seq=10 +ProcStop dt=25 +ProcStart dt=16755 p=7 p_seq=11 +HeapAlloc dt=61 heapalloc_value=26461496 +GoCreate dt=72 new_g=51 new_stack=63 stack=0 +GoStart dt=98 g=51 g_seq=1 +GoSyscallBegin dt=45 p_seq=12 stack=64 +ProcStart dt=206 p=7 p_seq=14 +GoSyscallEndBlocked dt=3 +GoStart dt=1 g=51 g_seq=2 +GoDestroy dt=12 +ProcStop dt=18 +ProcStart dt=849 p=5 p_seq=6 +ProcStop dt=16 +ProcStart dt=1359 p=5 p_seq=7 +ProcStop dt=12 +ProcStart dt=2079 p=5 p_seq=9 +GoStart dt=1134 g=52 g_seq=1 +GoSyscallBegin dt=39 p_seq=10 stack=78 +ProcStart dt=232 p=5 p_seq=12 +GoSyscallEndBlocked dt=2 +GoStart dt=1 g=52 g_seq=2 +GoBlock dt=27 reason_string=7 stack=79 +ProcStop dt=20 +EventBatch gen=1 m=1709045 time=7689670544102 size=3297 +ProcStart dt=84 p=4 p_seq=5 +GoUnblock dt=91 g=1 g_seq=34 stack=0 +GoStart dt=157 g=1 g_seq=35 +HeapAlloc dt=117 heapalloc_value=8105520 +HeapAlloc dt=67 heapalloc_value=8113712 +HeapAlloc dt=36 heapalloc_value=8121904 +HeapAlloc dt=25 heapalloc_value=8130096 +HeapAlloc dt=25 heapalloc_value=8138288 +HeapAlloc dt=25 heapalloc_value=8146480 +HeapAlloc dt=21 heapalloc_value=8154672 +HeapAlloc dt=26 heapalloc_value=8162864 +HeapAlloc dt=18 heapalloc_value=8171056 +HeapAlloc dt=24 heapalloc_value=8179248 +HeapAlloc dt=15 heapalloc_value=8187440 +HeapAlloc dt=133 heapalloc_value=8195632 +HeapAlloc dt=105 heapalloc_value=8203824 +HeapAlloc dt=20 heapalloc_value=8212016 +HeapAlloc dt=18 heapalloc_value=8220208 +HeapAlloc dt=8 heapalloc_value=8228400 +HeapAlloc dt=8 heapalloc_value=8236592 +HeapAlloc dt=9 heapalloc_value=8244784 +GCMarkAssistBegin dt=27 stack=31 +HeapAlloc dt=69 heapalloc_value=8252784 +GoBlock dt=31 reason_string=10 stack=36 +ProcStop dt=156 +ProcStart dt=993 p=0 p_seq=11 +GoStart dt=192 g=1 g_seq=37 +GCMarkAssistEnd dt=12 +HeapAlloc dt=35 heapalloc_value=8746312 +GCSweepBegin dt=26 stack=42 +GCSweepEnd dt=777 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=22 heapalloc_value=8754504 +GCSweepBegin dt=47 stack=42 +GCSweepEnd dt=662 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=11 heapalloc_value=8762696 +GCSweepBegin dt=25 stack=42 +GCSweepEnd dt=712 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=39 heapalloc_value=8770888 +GCSweepBegin dt=27 stack=42 +GCSweepEnd dt=630 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=8779080 +GCSweepBegin dt=25 stack=42 +GCSweepEnd dt=1256 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=8 heapalloc_value=8787272 +GCSweepBegin dt=40 stack=42 +GCSweepEnd dt=529 swept_value=360448 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=8795464 +HeapAlloc dt=24 heapalloc_value=8803656 +HeapAlloc dt=24 heapalloc_value=8811848 +HeapAlloc dt=25 heapalloc_value=8820040 +HeapAlloc dt=23 heapalloc_value=8828232 +HeapAlloc dt=18 heapalloc_value=8836424 +HeapAlloc dt=95 heapalloc_value=8844616 +HeapAlloc dt=25 heapalloc_value=8852808 +HeapAlloc dt=23 heapalloc_value=8861000 +HeapAlloc dt=19 heapalloc_value=8869192 +HeapAlloc dt=93 heapalloc_value=8877384 +HeapAlloc dt=23 heapalloc_value=8885576 +HeapAlloc dt=23 heapalloc_value=8893768 +HeapAlloc dt=23 heapalloc_value=8901960 +HeapAlloc dt=22 heapalloc_value=8910152 +HeapAlloc dt=18 heapalloc_value=8918344 +HeapAlloc dt=174 heapalloc_value=8926536 +HeapAlloc dt=31 heapalloc_value=8934728 +HeapAlloc dt=38 heapalloc_value=8942920 +HeapAlloc dt=31 heapalloc_value=8951112 +HeapAlloc dt=57 heapalloc_value=8959304 +HeapAlloc dt=58 heapalloc_value=8967496 +HeapAlloc dt=60 heapalloc_value=8975688 +HeapAlloc dt=44 heapalloc_value=8983880 +HeapAlloc dt=53 heapalloc_value=8992072 +HeapAlloc dt=57 heapalloc_value=9000264 +HeapAlloc dt=63 heapalloc_value=9008456 +HeapAlloc dt=55 heapalloc_value=9016648 +HeapAlloc dt=28 heapalloc_value=9024840 +HeapAlloc dt=12 heapalloc_value=9033032 +HeapAlloc dt=9 heapalloc_value=9041224 +HeapAlloc dt=8 heapalloc_value=9049416 +HeapAlloc dt=7 heapalloc_value=9057608 +HeapAlloc dt=8 heapalloc_value=9065800 +HeapAlloc dt=14 heapalloc_value=9073992 +HeapAlloc dt=8 heapalloc_value=9082184 +HeapAlloc dt=45 heapalloc_value=9090376 +HeapAlloc dt=10 heapalloc_value=9098568 +HeapAlloc dt=14 heapalloc_value=9106760 +HeapAlloc dt=8 heapalloc_value=9114952 +HeapAlloc dt=10 heapalloc_value=9123144 +HeapAlloc dt=15 heapalloc_value=9131336 +HeapAlloc dt=53 heapalloc_value=9139528 +HeapAlloc dt=27 heapalloc_value=9147720 +HeapAlloc dt=38 heapalloc_value=9155912 +HeapAlloc dt=33 heapalloc_value=9164104 +HeapAlloc dt=33 heapalloc_value=9172296 +HeapAlloc dt=34 heapalloc_value=9180488 +HeapAlloc dt=36 heapalloc_value=9188680 +HeapAlloc dt=39 heapalloc_value=9196872 +HeapAlloc dt=40 heapalloc_value=9205064 +HeapAlloc dt=59 heapalloc_value=9213256 +HeapAlloc dt=28 heapalloc_value=9221448 +HeapAlloc dt=22 heapalloc_value=9229640 +HeapAlloc dt=20 heapalloc_value=9237832 +HeapAlloc dt=25 heapalloc_value=9246024 +HeapAlloc dt=20 heapalloc_value=9254216 +HeapAlloc dt=16 heapalloc_value=9262408 +HeapAlloc dt=14 heapalloc_value=9270600 +HeapAlloc dt=18 heapalloc_value=9278792 +HeapAlloc dt=32 heapalloc_value=9286984 +HeapAlloc dt=21 heapalloc_value=9295176 +HeapAlloc dt=49 heapalloc_value=9303368 +HeapAlloc dt=23 heapalloc_value=9311560 +HeapAlloc dt=16 heapalloc_value=9319752 +HeapAlloc dt=15 heapalloc_value=9327944 +HeapAlloc dt=13 heapalloc_value=9336136 +HeapAlloc dt=15 heapalloc_value=9344328 +HeapAlloc dt=14 heapalloc_value=9352520 +HeapAlloc dt=16 heapalloc_value=9360712 +HeapAlloc dt=14 heapalloc_value=9368904 +HeapAlloc dt=19 heapalloc_value=9377096 +HeapAlloc dt=16 heapalloc_value=9385288 +HeapAlloc dt=15 heapalloc_value=9393480 +HeapAlloc dt=14 heapalloc_value=9401672 +HeapAlloc dt=16 heapalloc_value=9409864 +HeapAlloc dt=15 heapalloc_value=9418056 +HeapAlloc dt=15 heapalloc_value=9426248 +HeapAlloc dt=15 heapalloc_value=9434440 +HeapAlloc dt=18 heapalloc_value=9442632 +HeapAlloc dt=94 heapalloc_value=9450824 +HeapAlloc dt=17 heapalloc_value=9459016 +HeapAlloc dt=14 heapalloc_value=9467208 +HeapAlloc dt=16 heapalloc_value=9475400 +HeapAlloc dt=15 heapalloc_value=9483592 +HeapAlloc dt=15 heapalloc_value=9491784 +HeapAlloc dt=15 heapalloc_value=9499976 +HeapAlloc dt=49 heapalloc_value=9508168 +HeapAlloc dt=16 heapalloc_value=9516360 +HeapAlloc dt=14 heapalloc_value=9524552 +HeapAlloc dt=15 heapalloc_value=9532744 +HeapAlloc dt=15 heapalloc_value=9540936 +HeapAlloc dt=15 heapalloc_value=9549128 +HeapAlloc dt=17 heapalloc_value=9557320 +HeapAlloc dt=15 heapalloc_value=9565512 +HeapAlloc dt=21 heapalloc_value=9573704 +HeapAlloc dt=15 heapalloc_value=9581896 +HeapAlloc dt=16 heapalloc_value=9590088 +HeapAlloc dt=14 heapalloc_value=9598280 +HeapAlloc dt=16 heapalloc_value=9606472 +HeapAlloc dt=14 heapalloc_value=9614664 +HeapAlloc dt=16 heapalloc_value=9622856 +GoBlock dt=21 reason_string=19 stack=21 +ProcStop dt=157 +ProcStart dt=17320 p=2 p_seq=6 +ProcStop dt=15 +ProcStart dt=2411 p=0 p_seq=14 +ProcStop dt=8 +ProcStart dt=16766 p=0 p_seq=15 +GoUnblock dt=9 g=1 g_seq=40 stack=0 +GoStart dt=91 g=1 g_seq=41 +HeapAlloc dt=19 heapalloc_value=10859848 +HeapAlloc dt=9 heapalloc_value=10868040 +HeapAlloc dt=7 heapalloc_value=10876232 +HeapAlloc dt=6 heapalloc_value=10884424 +HeapAlloc dt=6 heapalloc_value=10892616 +HeapAlloc dt=6 heapalloc_value=10900808 +HeapAlloc dt=6 heapalloc_value=10909000 +HeapAlloc dt=6 heapalloc_value=10917192 +HeapAlloc dt=6 heapalloc_value=10925384 +HeapAlloc dt=6 heapalloc_value=10933576 +HeapAlloc dt=6 heapalloc_value=10941768 +HeapAlloc dt=6 heapalloc_value=10949960 +HeapAlloc dt=6 heapalloc_value=10958152 +HeapAlloc dt=5 heapalloc_value=10966344 +HeapAlloc dt=6 heapalloc_value=10974536 +HeapAlloc dt=6 heapalloc_value=10982728 +HeapAlloc dt=6 heapalloc_value=10990920 +HeapAlloc dt=6 heapalloc_value=10999112 +HeapAlloc dt=6 heapalloc_value=11007304 +HeapAlloc dt=5 heapalloc_value=11015496 +HeapAlloc dt=7 heapalloc_value=11023688 +HeapAlloc dt=6 heapalloc_value=11031880 +HeapAlloc dt=14 heapalloc_value=11040072 +HeapAlloc dt=7 heapalloc_value=11048264 +HeapAlloc dt=6 heapalloc_value=11056456 +HeapAlloc dt=6 heapalloc_value=11064648 +HeapAlloc dt=5 heapalloc_value=11072840 +HeapAlloc dt=6 heapalloc_value=11081032 +HeapAlloc dt=6 heapalloc_value=11089224 +HeapAlloc dt=6 heapalloc_value=11097416 +HeapAlloc dt=6 heapalloc_value=11105608 +HeapAlloc dt=6 heapalloc_value=11113800 +HeapAlloc dt=59 heapalloc_value=11121992 +HeapAlloc dt=9 heapalloc_value=11130184 +HeapAlloc dt=7 heapalloc_value=11138376 +HeapAlloc dt=6 heapalloc_value=11146568 +HeapAlloc dt=6 heapalloc_value=11154760 +HeapAlloc dt=5 heapalloc_value=11162952 +HeapAlloc dt=6 heapalloc_value=11171144 +HeapAlloc dt=6 heapalloc_value=11179336 +HeapAlloc dt=6 heapalloc_value=11187528 +HeapAlloc dt=5 heapalloc_value=11195720 +HeapAlloc dt=6 heapalloc_value=11203912 +HeapAlloc dt=6 heapalloc_value=11212104 +HeapAlloc dt=84 heapalloc_value=11220296 +HeapAlloc dt=7 heapalloc_value=11228488 +HeapAlloc dt=6 heapalloc_value=11236680 +HeapAlloc dt=6 heapalloc_value=11244872 +HeapAlloc dt=5 heapalloc_value=11253064 +HeapAlloc dt=6 heapalloc_value=11261256 +HeapAlloc dt=6 heapalloc_value=11269448 +HeapAlloc dt=6 heapalloc_value=11277640 +HeapAlloc dt=5 heapalloc_value=11285832 +HeapAlloc dt=6 heapalloc_value=11294024 +HeapAlloc dt=6 heapalloc_value=11302216 +HeapAlloc dt=5 heapalloc_value=11310408 +HeapAlloc dt=6 heapalloc_value=11318600 +HeapAlloc dt=38 heapalloc_value=11326792 +HeapAlloc dt=7 heapalloc_value=11334984 +HeapAlloc dt=6 heapalloc_value=11343176 +HeapAlloc dt=6 heapalloc_value=11351368 +HeapAlloc dt=5 heapalloc_value=11359560 +HeapAlloc dt=6 heapalloc_value=11367752 +HeapAlloc dt=6 heapalloc_value=11375944 +HeapAlloc dt=6 heapalloc_value=11384136 +HeapAlloc dt=6 heapalloc_value=11392328 +HeapAlloc dt=5 heapalloc_value=11400520 +HeapAlloc dt=6 heapalloc_value=11408712 +HeapAlloc dt=6 heapalloc_value=11416904 +HeapAlloc dt=5 heapalloc_value=11425096 +HeapAlloc dt=6 heapalloc_value=11433288 +HeapAlloc dt=6 heapalloc_value=11441480 +HeapAlloc dt=6 heapalloc_value=11449672 +HeapAlloc dt=5 heapalloc_value=11457864 +HeapAlloc dt=6 heapalloc_value=11466056 +HeapAlloc dt=79 heapalloc_value=11474248 +HeapAlloc dt=6 heapalloc_value=11482440 +HeapAlloc dt=5 heapalloc_value=11490632 +HeapAlloc dt=6 heapalloc_value=11498824 +HeapAlloc dt=6 heapalloc_value=11507016 +HeapAlloc dt=6 heapalloc_value=11515208 +HeapAlloc dt=5 heapalloc_value=11523400 +HeapAlloc dt=6 heapalloc_value=11531592 +HeapAlloc dt=5 heapalloc_value=11539784 +HeapAlloc dt=6 heapalloc_value=11547976 +HeapAlloc dt=6 heapalloc_value=11556168 +HeapAlloc dt=10 heapalloc_value=11564360 +HeapAlloc dt=6 heapalloc_value=11572552 +HeapAlloc dt=24 heapalloc_value=11580744 +HeapAlloc dt=7 heapalloc_value=11588936 +HeapAlloc dt=5 heapalloc_value=11597128 +HeapAlloc dt=6 heapalloc_value=11605320 +HeapAlloc dt=6 heapalloc_value=11613512 +HeapAlloc dt=6 heapalloc_value=11621704 +HeapAlloc dt=5 heapalloc_value=11629896 +HeapAlloc dt=6 heapalloc_value=11638088 +HeapAlloc dt=6 heapalloc_value=11646280 +HeapAlloc dt=5 heapalloc_value=11654472 +HeapAlloc dt=6 heapalloc_value=11662664 +HeapAlloc dt=6 heapalloc_value=11670856 +HeapAlloc dt=6 heapalloc_value=11679048 +HeapAlloc dt=5 heapalloc_value=11687240 +HeapAlloc dt=6 heapalloc_value=11695432 +HeapAlloc dt=6 heapalloc_value=11703624 +HeapAlloc dt=6 heapalloc_value=11711816 +HeapAlloc dt=5 heapalloc_value=11720008 +HeapAlloc dt=6 heapalloc_value=11728200 +HeapAlloc dt=6 heapalloc_value=11736392 +HeapAlloc dt=70 heapalloc_value=11744584 +HeapAlloc dt=8 heapalloc_value=11752776 +HeapAlloc dt=5 heapalloc_value=11760968 +HeapAlloc dt=6 heapalloc_value=11769160 +HeapAlloc dt=5 heapalloc_value=11777352 +HeapAlloc dt=6 heapalloc_value=11785544 +HeapAlloc dt=6 heapalloc_value=11793736 +HeapAlloc dt=6 heapalloc_value=11801928 +HeapAlloc dt=5 heapalloc_value=11810120 +HeapAlloc dt=6 heapalloc_value=11818312 +HeapAlloc dt=6 heapalloc_value=11826504 +HeapAlloc dt=6 heapalloc_value=11834696 +HeapAlloc dt=6 heapalloc_value=11842888 +HeapAlloc dt=5 heapalloc_value=11851080 +HeapAlloc dt=6 heapalloc_value=11859272 +HeapAlloc dt=5 heapalloc_value=11867464 +HeapAlloc dt=6 heapalloc_value=11875656 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=105 +ProcStart dt=17283 p=2 p_seq=8 +ProcStop dt=12 +ProcStart dt=4008 p=0 p_seq=18 +ProcStop dt=9 +ProcStart dt=16692 p=0 p_seq=19 +GoUnblock dt=9 g=1 g_seq=44 stack=0 +GoStart dt=76 g=1 g_seq=45 +HeapAlloc dt=16 heapalloc_value=13169992 +HeapAlloc dt=9 heapalloc_value=13178184 +HeapAlloc dt=7 heapalloc_value=13186376 +HeapAlloc dt=5 heapalloc_value=13194568 +HeapAlloc dt=6 heapalloc_value=13202760 +HeapAlloc dt=6 heapalloc_value=13210952 +HeapAlloc dt=5 heapalloc_value=13219144 +HeapAlloc dt=6 heapalloc_value=13227336 +HeapAlloc dt=6 heapalloc_value=13235528 +HeapAlloc dt=6 heapalloc_value=13243720 +HeapAlloc dt=6 heapalloc_value=13251912 +HeapAlloc dt=59 heapalloc_value=13260104 +HeapAlloc dt=8 heapalloc_value=13268296 +HeapAlloc dt=6 heapalloc_value=13276488 +HeapAlloc dt=5 heapalloc_value=13284680 +HeapAlloc dt=6 heapalloc_value=13292872 +HeapAlloc dt=5 heapalloc_value=13301064 +HeapAlloc dt=6 heapalloc_value=13309256 +HeapAlloc dt=5 heapalloc_value=13317448 +HeapAlloc dt=6 heapalloc_value=13325640 +HeapAlloc dt=6 heapalloc_value=13333832 +HeapAlloc dt=6 heapalloc_value=13342024 +HeapAlloc dt=5 heapalloc_value=13350216 +HeapAlloc dt=6 heapalloc_value=13358408 +HeapAlloc dt=6 heapalloc_value=13366600 +HeapAlloc dt=5 heapalloc_value=13374792 +HeapAlloc dt=6 heapalloc_value=13382984 +HeapAlloc dt=6 heapalloc_value=13391176 +HeapAlloc dt=6 heapalloc_value=13399368 +HeapAlloc dt=5 heapalloc_value=13407560 +HeapAlloc dt=8 heapalloc_value=13415752 +HeapAlloc dt=6 heapalloc_value=13423944 +HeapAlloc dt=7 heapalloc_value=13432136 +HeapAlloc dt=5 heapalloc_value=13440328 +HeapAlloc dt=6 heapalloc_value=13448520 +HeapAlloc dt=5 heapalloc_value=13456712 +HeapAlloc dt=6 heapalloc_value=13464904 +HeapAlloc dt=6 heapalloc_value=13473096 +HeapAlloc dt=6 heapalloc_value=13481288 +HeapAlloc dt=5 heapalloc_value=13489480 +HeapAlloc dt=5 heapalloc_value=13497672 +HeapAlloc dt=6 heapalloc_value=13505864 +HeapAlloc dt=5 heapalloc_value=13514056 +HeapAlloc dt=6 heapalloc_value=13522248 +HeapAlloc dt=5 heapalloc_value=13530440 +HeapAlloc dt=6 heapalloc_value=13538632 +HeapAlloc dt=5 heapalloc_value=13546824 +HeapAlloc dt=6 heapalloc_value=13555016 +HeapAlloc dt=6 heapalloc_value=13563208 +HeapAlloc dt=48 heapalloc_value=13571400 +HeapAlloc dt=7 heapalloc_value=13579592 +HeapAlloc dt=6 heapalloc_value=13587784 +HeapAlloc dt=5 heapalloc_value=13595976 +HeapAlloc dt=6 heapalloc_value=13604168 +HeapAlloc dt=5 heapalloc_value=13612360 +HeapAlloc dt=6 heapalloc_value=13620552 +HeapAlloc dt=5 heapalloc_value=13628744 +HeapAlloc dt=6 heapalloc_value=13636936 +HeapAlloc dt=5 heapalloc_value=13645128 +HeapAlloc dt=6 heapalloc_value=13653320 +HeapAlloc dt=14 heapalloc_value=13661512 +HeapAlloc dt=6 heapalloc_value=13669704 +HeapAlloc dt=6 heapalloc_value=13677896 +HeapAlloc dt=35 heapalloc_value=13686088 +HeapAlloc dt=7 heapalloc_value=13694280 +HeapAlloc dt=6 heapalloc_value=13702472 +HeapAlloc dt=6 heapalloc_value=13710664 +HeapAlloc dt=5 heapalloc_value=13718856 +HeapAlloc dt=6 heapalloc_value=13727048 +HeapAlloc dt=6 heapalloc_value=13735240 +HeapAlloc dt=5 heapalloc_value=13743432 +HeapAlloc dt=6 heapalloc_value=13751624 +HeapAlloc dt=5 heapalloc_value=13759816 +HeapAlloc dt=6 heapalloc_value=13768008 +HeapAlloc dt=5 heapalloc_value=13776200 +HeapAlloc dt=5 heapalloc_value=13784392 +HeapAlloc dt=6 heapalloc_value=13792584 +HeapAlloc dt=6 heapalloc_value=13800776 +HeapAlloc dt=5 heapalloc_value=13808968 +HeapAlloc dt=6 heapalloc_value=13817160 +HeapAlloc dt=5 heapalloc_value=13825352 +HeapAlloc dt=6 heapalloc_value=13833544 +HeapAlloc dt=5 heapalloc_value=13841736 +HeapAlloc dt=6 heapalloc_value=13849928 +HeapAlloc dt=5 heapalloc_value=13858120 +HeapAlloc dt=6 heapalloc_value=13866312 +HeapAlloc dt=5 heapalloc_value=13874504 +HeapAlloc dt=5 heapalloc_value=13882696 +HeapAlloc dt=6 heapalloc_value=13890888 +HeapAlloc dt=5 heapalloc_value=13899080 +HeapAlloc dt=6 heapalloc_value=13907272 +HeapAlloc dt=5 heapalloc_value=13915464 +HeapAlloc dt=6 heapalloc_value=13923656 +HeapAlloc dt=21 heapalloc_value=13931848 +HeapAlloc dt=6 heapalloc_value=13940040 +HeapAlloc dt=6 heapalloc_value=13948232 +HeapAlloc dt=6 heapalloc_value=13956424 +HeapAlloc dt=6 heapalloc_value=13964616 +HeapAlloc dt=5 heapalloc_value=13972808 +HeapAlloc dt=5 heapalloc_value=13981000 +HeapAlloc dt=6 heapalloc_value=13989192 +HeapAlloc dt=6 heapalloc_value=13997384 +HeapAlloc dt=5 heapalloc_value=14005576 +HeapAlloc dt=6 heapalloc_value=14013768 +HeapAlloc dt=5 heapalloc_value=14021960 +HeapAlloc dt=6 heapalloc_value=14030152 +HeapAlloc dt=6 heapalloc_value=14038344 +HeapAlloc dt=5 heapalloc_value=14046536 +HeapAlloc dt=6 heapalloc_value=14054728 +HeapAlloc dt=5 heapalloc_value=14062920 +HeapAlloc dt=6 heapalloc_value=14071112 +HeapAlloc dt=5 heapalloc_value=14079304 +HeapAlloc dt=5 heapalloc_value=14087496 +HeapAlloc dt=76 heapalloc_value=14095688 +HeapAlloc dt=35 heapalloc_value=14103880 +HeapAlloc dt=7 heapalloc_value=14112072 +HeapAlloc dt=5 heapalloc_value=14120264 +HeapAlloc dt=6 heapalloc_value=14128456 +HeapAlloc dt=7 heapalloc_value=14136648 +HeapAlloc dt=5 heapalloc_value=14144840 +HeapAlloc dt=5 heapalloc_value=14153032 +HeapAlloc dt=6 heapalloc_value=14161224 +HeapAlloc dt=5 heapalloc_value=14169416 +HeapAlloc dt=6 heapalloc_value=14177608 +HeapAlloc dt=10 heapalloc_value=14185800 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=108 +ProcStart dt=17296 p=2 p_seq=10 +ProcStop dt=12 +ProcStart dt=3626 p=0 p_seq=22 +ProcStop dt=8 +ProcStart dt=16715 p=0 p_seq=23 +GoUnblock dt=6 g=1 g_seq=48 stack=0 +GoStart dt=79 g=1 g_seq=49 +HeapAlloc dt=15 heapalloc_value=15553864 +HeapAlloc dt=13 heapalloc_value=15562056 +HeapAlloc dt=15 heapalloc_value=15570248 +HeapAlloc dt=7 heapalloc_value=15578440 +HeapAlloc dt=6 heapalloc_value=15586632 +HeapAlloc dt=6 heapalloc_value=15594824 +HeapAlloc dt=6 heapalloc_value=15603016 +HeapAlloc dt=6 heapalloc_value=15611208 +HeapAlloc dt=5 heapalloc_value=15619400 +HeapAlloc dt=6 heapalloc_value=15627592 +HeapAlloc dt=6 heapalloc_value=15635784 +HeapAlloc dt=5 heapalloc_value=15643976 +HeapAlloc dt=6 heapalloc_value=15652168 +HeapAlloc dt=5 heapalloc_value=15660360 +HeapAlloc dt=6 heapalloc_value=15668552 +HeapAlloc dt=6 heapalloc_value=15676744 +HeapAlloc dt=57 heapalloc_value=15684936 +HeapAlloc dt=7 heapalloc_value=15693128 +HeapAlloc dt=6 heapalloc_value=15701320 +HeapAlloc dt=6 heapalloc_value=15709512 +HeapAlloc dt=5 heapalloc_value=15717704 +HeapAlloc dt=6 heapalloc_value=15725896 +HeapAlloc dt=5 heapalloc_value=15734088 +HeapAlloc dt=6 heapalloc_value=15742280 +HeapAlloc dt=6 heapalloc_value=15750472 +HeapAlloc dt=10 heapalloc_value=15758664 +HeapAlloc dt=6 heapalloc_value=15766856 +HeapAlloc dt=6 heapalloc_value=15775048 +HeapAlloc dt=5 heapalloc_value=15783240 +HeapAlloc dt=6 heapalloc_value=15791432 +HeapAlloc dt=6 heapalloc_value=15799624 +HeapAlloc dt=6 heapalloc_value=15807816 +HeapAlloc dt=6 heapalloc_value=15816008 +HeapAlloc dt=7 heapalloc_value=15824200 +HeapAlloc dt=6 heapalloc_value=15832392 +HeapAlloc dt=6 heapalloc_value=15840584 +HeapAlloc dt=5 heapalloc_value=15848776 +HeapAlloc dt=6 heapalloc_value=15856968 +HeapAlloc dt=6 heapalloc_value=15865160 +HeapAlloc dt=6 heapalloc_value=15873352 +HeapAlloc dt=5 heapalloc_value=15881544 +HeapAlloc dt=6 heapalloc_value=15889736 +HeapAlloc dt=6 heapalloc_value=15897928 +HeapAlloc dt=5 heapalloc_value=15906120 +HeapAlloc dt=6 heapalloc_value=15914312 +HeapAlloc dt=5 heapalloc_value=15922504 +HeapAlloc dt=6 heapalloc_value=15930696 +HeapAlloc dt=5 heapalloc_value=15938888 +HeapAlloc dt=6 heapalloc_value=15947080 +HeapAlloc dt=5 heapalloc_value=15955272 +HeapAlloc dt=6 heapalloc_value=15963464 +HeapAlloc dt=6 heapalloc_value=15971656 +HeapAlloc dt=5 heapalloc_value=15979848 +HeapAlloc dt=6 heapalloc_value=15988040 +HeapAlloc dt=44 heapalloc_value=15996232 +HeapAlloc dt=8 heapalloc_value=16004424 +HeapAlloc dt=5 heapalloc_value=16012616 +HeapAlloc dt=6 heapalloc_value=16020808 +HeapAlloc dt=5 heapalloc_value=16029000 +HeapAlloc dt=6 heapalloc_value=16037192 +HeapAlloc dt=5 heapalloc_value=16045384 +HeapAlloc dt=6 heapalloc_value=16053576 +HeapAlloc dt=5 heapalloc_value=16061768 +HeapAlloc dt=6 heapalloc_value=16069960 +HeapAlloc dt=5 heapalloc_value=16078152 +HeapAlloc dt=6 heapalloc_value=16086344 +HeapAlloc dt=5 heapalloc_value=16094536 +HeapAlloc dt=6 heapalloc_value=16102728 +HeapAlloc dt=36 heapalloc_value=16110920 +HeapAlloc dt=8 heapalloc_value=16119112 +HeapAlloc dt=6 heapalloc_value=16127304 +HeapAlloc dt=5 heapalloc_value=16135496 +HeapAlloc dt=6 heapalloc_value=16143688 +HeapAlloc dt=5 heapalloc_value=16151880 +HeapAlloc dt=5 heapalloc_value=16160072 +HeapAlloc dt=5 heapalloc_value=16168264 +HeapAlloc dt=5 heapalloc_value=16176456 +HeapAlloc dt=5 heapalloc_value=16184648 +HeapAlloc dt=6 heapalloc_value=16192840 +HeapAlloc dt=5 heapalloc_value=16201032 +HeapAlloc dt=5 heapalloc_value=16209224 +HeapAlloc dt=5 heapalloc_value=16217416 +HeapAlloc dt=5 heapalloc_value=16225608 +HeapAlloc dt=6 heapalloc_value=16233800 +HeapAlloc dt=5 heapalloc_value=16241992 +HeapAlloc dt=73 heapalloc_value=16250184 +HeapAlloc dt=6 heapalloc_value=16258376 +HeapAlloc dt=5 heapalloc_value=16266568 +HeapAlloc dt=6 heapalloc_value=16274760 +HeapAlloc dt=371 heapalloc_value=16282952 +HeapAlloc dt=13 heapalloc_value=16291144 +HeapAlloc dt=7 heapalloc_value=16299336 +HeapAlloc dt=6 heapalloc_value=16307528 +HeapAlloc dt=6 heapalloc_value=16315720 +HeapAlloc dt=5 heapalloc_value=16323912 +HeapAlloc dt=6 heapalloc_value=16332104 +HeapAlloc dt=5 heapalloc_value=16340296 +HeapAlloc dt=5 heapalloc_value=16348488 +HeapAlloc dt=22 heapalloc_value=16356680 +HeapAlloc dt=6 heapalloc_value=16364872 +HeapAlloc dt=5 heapalloc_value=16373064 +HeapAlloc dt=6 heapalloc_value=16381256 +HeapAlloc dt=5 heapalloc_value=16389448 +HeapAlloc dt=5 heapalloc_value=16397640 +HeapAlloc dt=5 heapalloc_value=16405832 +HeapAlloc dt=5 heapalloc_value=16414024 +HeapAlloc dt=5 heapalloc_value=16422216 +HeapAlloc dt=6 heapalloc_value=16430408 +HeapAlloc dt=5 heapalloc_value=16438600 +HeapAlloc dt=6 heapalloc_value=16446792 +HeapAlloc dt=5 heapalloc_value=16454984 +HeapAlloc dt=5 heapalloc_value=16463176 +HeapAlloc dt=6 heapalloc_value=16471368 +HeapAlloc dt=5 heapalloc_value=16479560 +HeapAlloc dt=5 heapalloc_value=16487752 +HeapAlloc dt=5 heapalloc_value=16495944 +HeapAlloc dt=6 heapalloc_value=16504136 +HeapAlloc dt=5 heapalloc_value=16512328 +HeapAlloc dt=45 heapalloc_value=16520520 +HeapAlloc dt=38 heapalloc_value=16528712 +HeapAlloc dt=7 heapalloc_value=16536904 +HeapAlloc dt=5 heapalloc_value=16545096 +HeapAlloc dt=5 heapalloc_value=16553288 +HeapAlloc dt=6 heapalloc_value=16561480 +HeapAlloc dt=5 heapalloc_value=16569672 +GoBlock dt=11 reason_string=19 stack=21 +ProcStop dt=109 +ProcStart dt=18122 p=2 p_seq=12 +ProcStop dt=23 +ProcStart dt=803 p=1 p_seq=12 +GoUnblock dt=12 g=24 g_seq=10 stack=0 +GoStart dt=143 g=24 g_seq=11 +GoLabel dt=2 label_string=2 +GoBlock dt=3389 reason_string=15 stack=27 +ProcStop dt=2403 +ProcStart dt=161103 p=4 p_seq=8 +GoStart dt=172 g=38 g_seq=1 +GoStop dt=304901 reason_string=16 stack=50 +GoStart dt=21 g=38 g_seq=2 +GoStop dt=315468 reason_string=16 stack=50 +GoStart dt=20 g=38 g_seq=3 +GoDestroy dt=160861 +ProcStop dt=34 +EventBatch gen=1 m=1709044 time=7689670489757 size=2312 +ProcStart dt=310 p=3 p_seq=2 +ProcStop dt=39 +ProcStart dt=1386 p=3 p_seq=3 +ProcStop dt=138 +ProcStart dt=3920 p=0 p_seq=5 +GoStart dt=266 g=24 g_seq=7 +GoUnblock dt=50 g=1 g_seq=25 stack=41 +GoBlock dt=13 reason_string=15 stack=27 +GoStart dt=7 g=1 g_seq=26 +GCMarkAssistEnd dt=6 +HeapAlloc dt=29 heapalloc_value=3843824 +GCSweepBegin dt=57 stack=42 +GCSweepEnd dt=816 swept_value=827392 reclaimed_value=0 +GCSweepBegin dt=310 stack=43 +GCSweepEnd dt=63 swept_value=67108864 reclaimed_value=0 +HeapAlloc dt=23 heapalloc_value=3852016 +HeapAlloc dt=46 heapalloc_value=3860208 +HeapAlloc dt=27 heapalloc_value=3868400 +HeapAlloc dt=16 heapalloc_value=3876592 +HeapAlloc dt=109 heapalloc_value=3884784 +HeapAlloc dt=32 heapalloc_value=3892976 +HeapAlloc dt=33 heapalloc_value=3901168 +HeapAlloc dt=26 heapalloc_value=3909360 +HeapAlloc dt=35 heapalloc_value=3917552 +HeapAlloc dt=16 heapalloc_value=3925744 +HeapAlloc dt=16 heapalloc_value=3933936 +HeapAlloc dt=16 heapalloc_value=3942128 +HeapAlloc dt=68 heapalloc_value=3950320 +HeapAlloc dt=21 heapalloc_value=3958512 +HeapAlloc dt=20 heapalloc_value=3966704 +HeapAlloc dt=15 heapalloc_value=3974896 +HeapAlloc dt=24 heapalloc_value=3983088 +HeapAlloc dt=15 heapalloc_value=3991280 +HeapAlloc dt=16 heapalloc_value=3999472 +HeapAlloc dt=15 heapalloc_value=4007664 +HeapAlloc dt=18 heapalloc_value=4015856 +HeapAlloc dt=15 heapalloc_value=4024048 +HeapAlloc dt=21 heapalloc_value=4032240 +HeapAlloc dt=26 heapalloc_value=4040432 +HeapAlloc dt=28 heapalloc_value=4048624 +HeapAlloc dt=16 heapalloc_value=4056816 +HeapAlloc dt=16 heapalloc_value=4065008 +HeapAlloc dt=16 heapalloc_value=4073200 +HeapAlloc dt=17 heapalloc_value=4081392 +HeapAlloc dt=15 heapalloc_value=4089584 +HeapAlloc dt=19 heapalloc_value=4097776 +HeapAlloc dt=15 heapalloc_value=4105968 +HeapAlloc dt=20 heapalloc_value=4114160 +HeapAlloc dt=15 heapalloc_value=4122352 +HeapAlloc dt=16 heapalloc_value=4130544 +HeapAlloc dt=16 heapalloc_value=4138736 +HeapAlloc dt=17 heapalloc_value=4146928 +HeapAlloc dt=15 heapalloc_value=4155120 +HeapAlloc dt=20 heapalloc_value=4163312 +HeapAlloc dt=18 heapalloc_value=4171504 +HeapAlloc dt=23 heapalloc_value=4179696 +HeapAlloc dt=18 heapalloc_value=4187888 +HeapAlloc dt=20 heapalloc_value=4196080 +HeapAlloc dt=19 heapalloc_value=4204272 +HeapAlloc dt=19 heapalloc_value=4212464 +HeapAlloc dt=105 heapalloc_value=4220656 +HeapAlloc dt=45 heapalloc_value=4228848 +HeapAlloc dt=22 heapalloc_value=4237040 +HeapAlloc dt=23 heapalloc_value=4245232 +HeapAlloc dt=29 heapalloc_value=4253424 +HeapAlloc dt=21 heapalloc_value=4261616 +HeapAlloc dt=56 heapalloc_value=4269808 +HeapAlloc dt=21 heapalloc_value=4278000 +HeapAlloc dt=25 heapalloc_value=4286192 +HeapAlloc dt=15 heapalloc_value=4294384 +HeapAlloc dt=60 heapalloc_value=4302576 +HeapAlloc dt=40 heapalloc_value=4359920 +HeapAlloc dt=152 heapalloc_value=4368112 +HeapAlloc dt=30 heapalloc_value=4376304 +HeapAlloc dt=27 heapalloc_value=4384496 +HeapAlloc dt=20 heapalloc_value=4392688 +HeapAlloc dt=32 heapalloc_value=4400880 +HeapAlloc dt=25 heapalloc_value=4409072 +HeapAlloc dt=48 heapalloc_value=4417264 +HeapAlloc dt=58 heapalloc_value=4425456 +HeapAlloc dt=30 heapalloc_value=4433648 +HeapAlloc dt=23 heapalloc_value=4441840 +HeapAlloc dt=16 heapalloc_value=4450032 +HeapAlloc dt=17 heapalloc_value=4458224 +HeapAlloc dt=16 heapalloc_value=4466416 +HeapAlloc dt=19 heapalloc_value=4474608 +HeapAlloc dt=16 heapalloc_value=4482800 +HeapAlloc dt=15 heapalloc_value=4490992 +HeapAlloc dt=16 heapalloc_value=4499184 +HeapAlloc dt=16 heapalloc_value=4507376 +HeapAlloc dt=15 heapalloc_value=4515568 +HeapAlloc dt=16 heapalloc_value=4523760 +HeapAlloc dt=16 heapalloc_value=4531952 +HeapAlloc dt=21 heapalloc_value=4540144 +HeapAlloc dt=25 heapalloc_value=4548336 +HeapAlloc dt=22 heapalloc_value=4556528 +HeapAlloc dt=59 heapalloc_value=4564720 +HeapAlloc dt=21 heapalloc_value=4572912 +HeapAlloc dt=16 heapalloc_value=4581104 +HeapAlloc dt=16 heapalloc_value=4589296 +HeapAlloc dt=15 heapalloc_value=4597488 +HeapAlloc dt=24 heapalloc_value=4605680 +HeapAlloc dt=12 heapalloc_value=4613872 +HeapAlloc dt=8 heapalloc_value=4622064 +HeapAlloc dt=11 heapalloc_value=4630256 +HeapAlloc dt=7 heapalloc_value=4638448 +HeapAlloc dt=7 heapalloc_value=4646640 +HeapAlloc dt=7 heapalloc_value=4654832 +GoBlock dt=31 reason_string=19 stack=21 +ProcStop dt=34 +ProcStart dt=6196 p=4 p_seq=2 +ProcStop dt=26 +ProcStart dt=1578 p=0 p_seq=7 +ProcStop dt=12 +ProcStart dt=16743 p=0 p_seq=8 +GoUnblock dt=21 g=1 g_seq=29 stack=0 +GoStart dt=147 g=1 g_seq=30 +HeapAlloc dt=51 heapalloc_value=5768944 +HeapAlloc dt=22 heapalloc_value=5777136 +HeapAlloc dt=16 heapalloc_value=5785328 +HeapAlloc dt=15 heapalloc_value=5793520 +HeapAlloc dt=16 heapalloc_value=5801712 +HeapAlloc dt=18 heapalloc_value=5809904 +HeapAlloc dt=15 heapalloc_value=5818096 +HeapAlloc dt=15 heapalloc_value=5826288 +HeapAlloc dt=12 heapalloc_value=5834480 +HeapAlloc dt=12 heapalloc_value=5842672 +HeapAlloc dt=15 heapalloc_value=5850864 +HeapAlloc dt=16 heapalloc_value=5859056 +HeapAlloc dt=12 heapalloc_value=5867248 +HeapAlloc dt=12 heapalloc_value=5875440 +HeapAlloc dt=6 heapalloc_value=5883632 +HeapAlloc dt=8 heapalloc_value=5891824 +HeapAlloc dt=6 heapalloc_value=5900016 +HeapAlloc dt=6 heapalloc_value=5908208 +HeapAlloc dt=98 heapalloc_value=5916400 +HeapAlloc dt=21 heapalloc_value=5924592 +HeapAlloc dt=5 heapalloc_value=5932784 +HeapAlloc dt=7 heapalloc_value=5940976 +HeapAlloc dt=6 heapalloc_value=5949168 +HeapAlloc dt=9 heapalloc_value=5957360 +HeapAlloc dt=6 heapalloc_value=5965552 +HeapAlloc dt=5 heapalloc_value=5973744 +HeapAlloc dt=7 heapalloc_value=5981936 +HeapAlloc dt=5 heapalloc_value=5990128 +HeapAlloc dt=6 heapalloc_value=5998320 +HeapAlloc dt=5 heapalloc_value=6006512 +HeapAlloc dt=6 heapalloc_value=6014704 +HeapAlloc dt=9 heapalloc_value=6022896 +HeapAlloc dt=5 heapalloc_value=6031088 +HeapAlloc dt=6 heapalloc_value=6039280 +HeapAlloc dt=6 heapalloc_value=6047472 +HeapAlloc dt=40 heapalloc_value=6055664 +HeapAlloc dt=6 heapalloc_value=6063856 +HeapAlloc dt=35 heapalloc_value=6072048 +HeapAlloc dt=8 heapalloc_value=6080240 +HeapAlloc dt=9 heapalloc_value=6088432 +HeapAlloc dt=5 heapalloc_value=6096624 +HeapAlloc dt=6 heapalloc_value=6104816 +HeapAlloc dt=5 heapalloc_value=6113008 +HeapAlloc dt=6 heapalloc_value=6121200 +HeapAlloc dt=6 heapalloc_value=6129392 +HeapAlloc dt=6 heapalloc_value=6137584 +HeapAlloc dt=5 heapalloc_value=6145776 +HeapAlloc dt=9 heapalloc_value=6153968 +HeapAlloc dt=5 heapalloc_value=6162160 +HeapAlloc dt=6 heapalloc_value=6170352 +HeapAlloc dt=6 heapalloc_value=6178544 +HeapAlloc dt=8 heapalloc_value=6186736 +HeapAlloc dt=11 heapalloc_value=6301424 +HeapAlloc dt=2483 heapalloc_value=6309616 +HeapAlloc dt=9 heapalloc_value=6317808 +HeapAlloc dt=7 heapalloc_value=6326000 +HeapAlloc dt=11 heapalloc_value=6334192 +HeapAlloc dt=6 heapalloc_value=6342384 +HeapAlloc dt=6 heapalloc_value=6350576 +HeapAlloc dt=6 heapalloc_value=6358768 +HeapAlloc dt=7 heapalloc_value=6366960 +HeapAlloc dt=9 heapalloc_value=6375152 +HeapAlloc dt=5 heapalloc_value=6383344 +HeapAlloc dt=6 heapalloc_value=6391536 +HeapAlloc dt=6 heapalloc_value=6399728 +HeapAlloc dt=5 heapalloc_value=6407920 +HeapAlloc dt=5 heapalloc_value=6416112 +HeapAlloc dt=6 heapalloc_value=6424304 +HeapAlloc dt=9 heapalloc_value=6432496 +HeapAlloc dt=8 heapalloc_value=6440688 +HeapAlloc dt=9 heapalloc_value=6448880 +HeapAlloc dt=6 heapalloc_value=6457072 +HeapAlloc dt=13 heapalloc_value=6465264 +HeapAlloc dt=6 heapalloc_value=6473456 +HeapAlloc dt=5 heapalloc_value=6481648 +HeapAlloc dt=6 heapalloc_value=6489840 +HeapAlloc dt=5 heapalloc_value=6498032 +HeapAlloc dt=6 heapalloc_value=6506224 +HeapAlloc dt=8 heapalloc_value=6514416 +HeapAlloc dt=6 heapalloc_value=6522608 +HeapAlloc dt=6 heapalloc_value=6530800 +HeapAlloc dt=5 heapalloc_value=6538992 +HeapAlloc dt=81 heapalloc_value=6547184 +HeapAlloc dt=7 heapalloc_value=6555376 +HeapAlloc dt=6 heapalloc_value=6563568 +HeapAlloc dt=5 heapalloc_value=6571760 +HeapAlloc dt=20 heapalloc_value=6579952 +HeapAlloc dt=6 heapalloc_value=6588144 +HeapAlloc dt=56 heapalloc_value=6596336 +HeapAlloc dt=7 heapalloc_value=6604528 +HeapAlloc dt=7 heapalloc_value=6612720 +HeapAlloc dt=6 heapalloc_value=6620912 +HeapAlloc dt=5 heapalloc_value=6629104 +HeapAlloc dt=5 heapalloc_value=6637296 +HeapAlloc dt=6 heapalloc_value=6645488 +HeapAlloc dt=5 heapalloc_value=6653680 +HeapAlloc dt=5 heapalloc_value=6661872 +HeapAlloc dt=6 heapalloc_value=6670064 +HeapAlloc dt=5 heapalloc_value=6678256 +HeapAlloc dt=5 heapalloc_value=6686448 +HeapAlloc dt=6 heapalloc_value=6694640 +HeapAlloc dt=5 heapalloc_value=6702832 +HeapAlloc dt=5 heapalloc_value=6711024 +HeapAlloc dt=6 heapalloc_value=6719216 +HeapAlloc dt=9 heapalloc_value=6727408 +HeapAlloc dt=7 heapalloc_value=6735600 +HeapAlloc dt=5 heapalloc_value=6743792 +HeapAlloc dt=5 heapalloc_value=6751984 +HeapAlloc dt=6 heapalloc_value=6760176 +HeapAlloc dt=5 heapalloc_value=6768368 +HeapAlloc dt=5 heapalloc_value=6776560 +HeapAlloc dt=6 heapalloc_value=6784752 +HeapAlloc dt=5 heapalloc_value=6792944 +HeapAlloc dt=6 heapalloc_value=6801136 +HeapAlloc dt=36 heapalloc_value=6809328 +HeapAlloc dt=7 heapalloc_value=6817520 +HeapAlloc dt=5 heapalloc_value=6825712 +HeapAlloc dt=6 heapalloc_value=6833904 +HeapAlloc dt=6 heapalloc_value=6842096 +HeapAlloc dt=5 heapalloc_value=6850288 +HeapAlloc dt=6 heapalloc_value=6858480 +HeapAlloc dt=5 heapalloc_value=6866672 +HeapAlloc dt=5 heapalloc_value=6874864 +HeapAlloc dt=5 heapalloc_value=6883056 +HeapAlloc dt=5 heapalloc_value=6891248 +HeapAlloc dt=6 heapalloc_value=6899440 +GoBlock dt=14 reason_string=19 stack=21 +ProcStop dt=198 +ProcStart dt=2996 p=0 p_seq=10 +GoUnblock dt=12 g=1 g_seq=31 stack=0 +GoStart dt=135 g=1 g_seq=32 +HeapAlloc dt=25 heapalloc_value=6907632 +HeapAlloc dt=9 heapalloc_value=6915824 +HeapAlloc dt=6 heapalloc_value=6924016 +HeapAlloc dt=5 heapalloc_value=6932208 +HeapAlloc dt=6 heapalloc_value=6940400 +HeapAlloc dt=5 heapalloc_value=6948592 +HeapAlloc dt=5 heapalloc_value=6956784 +HeapAlloc dt=6 heapalloc_value=6964976 +HeapAlloc dt=5 heapalloc_value=6973168 +HeapAlloc dt=6 heapalloc_value=6981360 +HeapAlloc dt=5 heapalloc_value=6989552 +HeapAlloc dt=5 heapalloc_value=6997744 +HeapAlloc dt=5 heapalloc_value=7005936 +HeapAlloc dt=97 heapalloc_value=7014128 +HeapAlloc dt=7 heapalloc_value=7022320 +HeapAlloc dt=5 heapalloc_value=7030512 +HeapAlloc dt=6 heapalloc_value=7038704 +HeapAlloc dt=5 heapalloc_value=7046896 +HeapAlloc dt=5 heapalloc_value=7055088 +HeapAlloc dt=5 heapalloc_value=7063280 +HeapAlloc dt=50 heapalloc_value=7071472 +HeapAlloc dt=7 heapalloc_value=7079664 +HeapAlloc dt=6 heapalloc_value=7087856 +HeapAlloc dt=5 heapalloc_value=7096048 +HeapAlloc dt=20 heapalloc_value=7104240 +HeapAlloc dt=6 heapalloc_value=7112432 +HeapAlloc dt=8 heapalloc_value=7120624 +HeapAlloc dt=6 heapalloc_value=7128816 +HeapAlloc dt=5 heapalloc_value=7137008 +HeapAlloc dt=6 heapalloc_value=7145200 +HeapAlloc dt=8 heapalloc_value=7153392 +HeapAlloc dt=6 heapalloc_value=7161584 +HeapAlloc dt=5 heapalloc_value=7169776 +HeapAlloc dt=5 heapalloc_value=7177968 +HeapAlloc dt=6 heapalloc_value=7186160 +HeapAlloc dt=5 heapalloc_value=7194352 +HeapAlloc dt=5 heapalloc_value=7202544 +HeapAlloc dt=6 heapalloc_value=7210736 +HeapAlloc dt=5 heapalloc_value=7218928 +HeapAlloc dt=35 heapalloc_value=7227120 +HeapAlloc dt=10 heapalloc_value=7235312 +HeapAlloc dt=5 heapalloc_value=7243504 +HeapAlloc dt=5 heapalloc_value=7251696 +HeapAlloc dt=6 heapalloc_value=7259888 +HeapAlloc dt=5 heapalloc_value=7268080 +HeapAlloc dt=5 heapalloc_value=7276272 +HeapAlloc dt=5 heapalloc_value=7284464 +HeapAlloc dt=6 heapalloc_value=7292656 +HeapAlloc dt=6 heapalloc_value=7300848 +HeapAlloc dt=5 heapalloc_value=7309040 +HeapAlloc dt=13 heapalloc_value=7317232 +HeapAlloc dt=5 heapalloc_value=7325424 +HeapAlloc dt=6 heapalloc_value=7333616 +HeapAlloc dt=8 heapalloc_value=7341808 +HeapAlloc dt=5 heapalloc_value=7350000 +HeapAlloc dt=9 heapalloc_value=7358192 +HeapAlloc dt=5 heapalloc_value=7366384 +HeapAlloc dt=6 heapalloc_value=7374576 +HeapAlloc dt=5 heapalloc_value=7382768 +HeapAlloc dt=5 heapalloc_value=7390960 +HeapAlloc dt=5 heapalloc_value=7399152 +HeapAlloc dt=6 heapalloc_value=7407344 +HeapAlloc dt=5 heapalloc_value=7415536 +HeapAlloc dt=5 heapalloc_value=7423728 +HeapAlloc dt=6 heapalloc_value=7431920 +HeapAlloc dt=5 heapalloc_value=7440112 +HeapAlloc dt=5 heapalloc_value=7448304 +HeapAlloc dt=5 heapalloc_value=7456496 +HeapAlloc dt=6 heapalloc_value=7464688 +HeapAlloc dt=5 heapalloc_value=7472880 +HeapAlloc dt=5 heapalloc_value=7481072 +HeapAlloc dt=5 heapalloc_value=7489264 +HeapAlloc dt=6 heapalloc_value=7497456 +HeapAlloc dt=5 heapalloc_value=7505648 +HeapAlloc dt=5 heapalloc_value=7513840 +HeapAlloc dt=5 heapalloc_value=7522032 +HeapAlloc dt=5 heapalloc_value=7530224 +HeapAlloc dt=6 heapalloc_value=7538416 +HeapAlloc dt=5 heapalloc_value=7546608 +HeapAlloc dt=6 heapalloc_value=7554800 +HeapAlloc dt=5 heapalloc_value=7562992 +HeapAlloc dt=5 heapalloc_value=7571184 +HeapAlloc dt=6 heapalloc_value=7579376 +HeapAlloc dt=5 heapalloc_value=7587568 +HeapAlloc dt=45 heapalloc_value=7595760 +HeapAlloc dt=7 heapalloc_value=7603952 +HeapAlloc dt=5 heapalloc_value=7612144 +HeapAlloc dt=6 heapalloc_value=7620336 +HeapAlloc dt=376 heapalloc_value=7628528 +HeapAlloc dt=13 heapalloc_value=7636720 +HeapAlloc dt=7 heapalloc_value=7644912 +HeapAlloc dt=35 heapalloc_value=7653104 +GCBegin dt=23 gc_seq=3 stack=22 +STWBegin dt=73 kind_string=22 stack=28 +GoUnblock dt=258 g=4 g_seq=5 stack=29 +ProcsChange dt=80 procs_value=8 stack=30 +STWEnd dt=37 +GCMarkAssistBegin dt=96 stack=31 +GCMarkAssistEnd dt=4606 +HeapAlloc dt=187 heapalloc_value=7671600 +HeapAlloc dt=26 heapalloc_value=7679792 +HeapAlloc dt=17 heapalloc_value=7687984 +HeapAlloc dt=29 heapalloc_value=7696176 +HeapAlloc dt=16 heapalloc_value=7704368 +HeapAlloc dt=12 heapalloc_value=7712560 +HeapAlloc dt=48 heapalloc_value=7868208 +GoStop dt=4635 reason_string=16 stack=45 +GoStart dt=48 g=1 g_seq=33 +HeapAlloc dt=27 heapalloc_value=7884336 +HeapAlloc dt=11 heapalloc_value=7892528 +HeapAlloc dt=8 heapalloc_value=7900720 +HeapAlloc dt=12 heapalloc_value=7908912 +HeapAlloc dt=9 heapalloc_value=7917104 +HeapAlloc dt=9 heapalloc_value=7925296 +HeapAlloc dt=9 heapalloc_value=7933488 +HeapAlloc dt=8 heapalloc_value=7941680 +HeapAlloc dt=10 heapalloc_value=7949872 +HeapAlloc dt=8 heapalloc_value=7958064 +HeapAlloc dt=10 heapalloc_value=7966256 +HeapAlloc dt=12 heapalloc_value=7974448 +HeapAlloc dt=8 heapalloc_value=7982640 +HeapAlloc dt=8 heapalloc_value=7990832 +HeapAlloc dt=9 heapalloc_value=7999024 +HeapAlloc dt=8 heapalloc_value=8007216 +HeapAlloc dt=54 heapalloc_value=8015408 +HeapAlloc dt=10 heapalloc_value=8023600 +HeapAlloc dt=8 heapalloc_value=8031792 +HeapAlloc dt=9 heapalloc_value=8039984 +HeapAlloc dt=8 heapalloc_value=8048176 +HeapAlloc dt=9 heapalloc_value=8056368 +HeapAlloc dt=8 heapalloc_value=8064560 +HeapAlloc dt=9 heapalloc_value=8072752 +HeapAlloc dt=8 heapalloc_value=8080944 +HeapAlloc dt=9 heapalloc_value=8089136 +HeapAlloc dt=8 heapalloc_value=8097328 +GoBlock dt=20 reason_string=19 stack=21 +ProcStop dt=35 +ProcStart dt=147580 p=3 p_seq=6 +GoStart dt=144 g=4 g_seq=10 +GoBlock dt=38 reason_string=15 stack=32 +GoUnblock dt=41 g=25 g_seq=4 stack=0 +GoStart dt=6 g=25 g_seq=5 +GoLabel dt=1 label_string=4 +GoBlock dt=5825 reason_string=15 stack=27 +ProcStop dt=299 +ProcStart dt=158874 p=3 p_seq=7 +GoStart dt=231 g=35 g_seq=1 +GoStop dt=305629 reason_string=16 stack=51 +GoStart dt=79 g=35 g_seq=2 +GoStop dt=315206 reason_string=16 stack=50 +GoStart dt=36 g=35 g_seq=3 +GoDestroy dt=160337 +ProcStop dt=68 +EventBatch gen=1 m=1709042 time=7689670149213 size=4550 +ProcStart dt=287 p=2 p_seq=1 +GoStart dt=328 g=7 g_seq=1 +HeapAlloc dt=7006 heapalloc_value=2793472 +HeapAlloc dt=74 heapalloc_value=2801664 +GoBlock dt=275 reason_string=12 stack=18 +ProcStop dt=34 +ProcStart dt=327698 p=0 p_seq=3 +ProcStop dt=7 +ProcStart dt=2124 p=2 p_seq=3 +GoUnblock dt=32 g=24 g_seq=2 stack=0 +HeapAlloc dt=302 heapalloc_value=4038656 +HeapAlloc dt=104 heapalloc_value=4046848 +HeapAlloc dt=52 heapalloc_value=4055040 +GoStart dt=1147 g=24 g_seq=3 +GoLabel dt=5 label_string=2 +GoBlock dt=128 reason_string=15 stack=27 +GoUnblock dt=72 g=1 g_seq=21 stack=0 +GoStart dt=11 g=1 g_seq=22 +HeapAlloc dt=44 heapalloc_value=4063232 +HeapAlloc dt=43 heapalloc_value=4071424 +HeapAlloc dt=28 heapalloc_value=4079616 +HeapAlloc dt=24 heapalloc_value=4087808 +HeapAlloc dt=84 heapalloc_value=4096000 +HeapAlloc dt=25 heapalloc_value=4104192 +HeapAlloc dt=20 heapalloc_value=4112384 +HeapAlloc dt=24 heapalloc_value=4120576 +HeapAlloc dt=20 heapalloc_value=4128768 +HeapAlloc dt=19 heapalloc_value=4136960 +HeapAlloc dt=24 heapalloc_value=4145152 +HeapAlloc dt=20 heapalloc_value=4153344 +HeapAlloc dt=19 heapalloc_value=4161536 +HeapAlloc dt=20 heapalloc_value=4169728 +HeapAlloc dt=24 heapalloc_value=4177920 +HeapAlloc dt=33 heapalloc_value=4186112 +HeapAlloc dt=26 heapalloc_value=4194304 +HeapAlloc dt=31 heapalloc_value=4235264 +HeapAlloc dt=363 heapalloc_value=4243456 +HeapAlloc dt=61 heapalloc_value=4251648 +HeapAlloc dt=14 heapalloc_value=4259840 +HeapAlloc dt=12 heapalloc_value=4268032 +HeapAlloc dt=9 heapalloc_value=4276224 +HeapAlloc dt=9 heapalloc_value=4284416 +HeapAlloc dt=9 heapalloc_value=4292608 +HeapAlloc dt=8 heapalloc_value=4300800 +HeapAlloc dt=162 heapalloc_value=4308992 +HeapAlloc dt=14 heapalloc_value=4317184 +HeapAlloc dt=8 heapalloc_value=4325376 +HeapAlloc dt=53 heapalloc_value=4333568 +HeapAlloc dt=10 heapalloc_value=4341760 +HeapAlloc dt=16 heapalloc_value=4349952 +HeapAlloc dt=14 heapalloc_value=4358144 +GCMarkAssistBegin dt=27 stack=31 +GCMarkAssistEnd dt=18 +GCMarkAssistBegin dt=4 stack=31 +GoBlock dt=198 reason_string=13 stack=33 +ProcStop dt=19 +ProcStart dt=387 p=2 p_seq=4 +GoUnblock dt=265 g=24 g_seq=4 stack=0 +GoStart dt=69 g=24 g_seq=5 +GoLabel dt=1 label_string=2 +GoBlock dt=132 reason_string=10 stack=35 +GoStart dt=20 g=1 g_seq=24 +GCMarkAssistEnd dt=2 +HeapAlloc dt=13 heapalloc_value=4366336 +GCMarkAssistBegin dt=7 stack=31 +GoBlock dt=25 reason_string=10 stack=36 +ProcStop dt=24 +ProcStart dt=4689 p=1 p_seq=7 +ProcStop dt=23 +ProcStart dt=36183 p=1 p_seq=8 +ProcStop dt=24 +ProcStart dt=1076 p=1 p_seq=9 +GoUnblock dt=12 g=22 g_seq=4 stack=0 +GoStart dt=118 g=22 g_seq=5 +GoLabel dt=1 label_string=2 +GoBlock dt=7117 reason_string=15 stack=27 +ProcStop dt=41 +ProcStart dt=150567 p=4 p_seq=7 +GoUnblock dt=41 g=23 g_seq=4 stack=0 +HeapAlloc dt=108 heapalloc_value=17163592 +HeapAlloc dt=61 heapalloc_value=17166856 +HeapAlloc dt=2994 heapalloc_value=17608712 +GoStart dt=1008 g=23 g_seq=5 +GoLabel dt=4 label_string=4 +GoBlock dt=40 reason_string=15 stack=27 +GoUnblock dt=49 g=1 g_seq=52 stack=0 +GoStart dt=7 g=1 g_seq=53 +HeapAlloc dt=30 heapalloc_value=17616904 +HeapAlloc dt=52 heapalloc_value=17625096 +HeapAlloc dt=35 heapalloc_value=17633288 +HeapAlloc dt=27 heapalloc_value=17641480 +HeapAlloc dt=28 heapalloc_value=17649672 +HeapAlloc dt=87 heapalloc_value=17657864 +HeapAlloc dt=32 heapalloc_value=17666056 +HeapAlloc dt=24 heapalloc_value=17674248 +HeapAlloc dt=22 heapalloc_value=17682440 +HeapAlloc dt=16 heapalloc_value=17690632 +HeapAlloc dt=15 heapalloc_value=17698824 +HeapAlloc dt=20 heapalloc_value=17707016 +HeapAlloc dt=19 heapalloc_value=17715208 +HeapAlloc dt=15 heapalloc_value=17723400 +HeapAlloc dt=18 heapalloc_value=17731592 +HeapAlloc dt=20 heapalloc_value=17739784 +HeapAlloc dt=15 heapalloc_value=17747976 +HeapAlloc dt=17 heapalloc_value=17756168 +HeapAlloc dt=67 heapalloc_value=17764360 +HeapAlloc dt=28 heapalloc_value=17772552 +HeapAlloc dt=22 heapalloc_value=17780744 +HeapAlloc dt=19 heapalloc_value=17788936 +HeapAlloc dt=22 heapalloc_value=17797128 +HeapAlloc dt=19 heapalloc_value=17805320 +HeapAlloc dt=19 heapalloc_value=17813512 +HeapAlloc dt=19 heapalloc_value=17821704 +HeapAlloc dt=15 heapalloc_value=17829896 +HeapAlloc dt=21 heapalloc_value=17838088 +HeapAlloc dt=19 heapalloc_value=17846280 +HeapAlloc dt=16 heapalloc_value=17854472 +HeapAlloc dt=14 heapalloc_value=17862664 +HeapAlloc dt=18 heapalloc_value=17870856 +HeapAlloc dt=58 heapalloc_value=17879048 +HeapAlloc dt=19 heapalloc_value=17887240 +HeapAlloc dt=15 heapalloc_value=17895432 +HeapAlloc dt=19 heapalloc_value=17903624 +HeapAlloc dt=21 heapalloc_value=17911816 +HeapAlloc dt=17 heapalloc_value=17920008 +HeapAlloc dt=19 heapalloc_value=17928200 +HeapAlloc dt=19 heapalloc_value=17936392 +HeapAlloc dt=16 heapalloc_value=17944584 +HeapAlloc dt=15 heapalloc_value=17952776 +HeapAlloc dt=15 heapalloc_value=17960968 +HeapAlloc dt=19 heapalloc_value=17969160 +HeapAlloc dt=16 heapalloc_value=17977352 +HeapAlloc dt=16 heapalloc_value=17985544 +HeapAlloc dt=16 heapalloc_value=17993736 +HeapAlloc dt=19 heapalloc_value=18001928 +HeapAlloc dt=15 heapalloc_value=18010120 +HeapAlloc dt=16 heapalloc_value=18018312 +HeapAlloc dt=15 heapalloc_value=18026504 +HeapAlloc dt=19 heapalloc_value=18034696 +HeapAlloc dt=14 heapalloc_value=18042888 +HeapAlloc dt=17 heapalloc_value=18051080 +HeapAlloc dt=18 heapalloc_value=18059272 +HeapAlloc dt=20 heapalloc_value=18067464 +HeapAlloc dt=17 heapalloc_value=18075656 +HeapAlloc dt=125 heapalloc_value=18083848 +GoStop dt=20 reason_string=16 stack=46 +GoUnblock dt=288 g=25 g_seq=6 stack=0 +GoStart dt=7 g=25 g_seq=7 +GoLabel dt=1 label_string=2 +HeapAlloc dt=255 heapalloc_value=18091752 +GoBlock dt=30 reason_string=10 stack=35 +GoStart dt=5 g=1 g_seq=54 +HeapAlloc dt=25 heapalloc_value=18099944 +HeapAlloc dt=19 heapalloc_value=18108136 +HeapAlloc dt=45 heapalloc_value=18116328 +HeapAlloc dt=9 heapalloc_value=18124520 +HeapAlloc dt=80 heapalloc_value=18132712 +HeapAlloc dt=11 heapalloc_value=18140904 +HeapAlloc dt=6 heapalloc_value=18149096 +HeapAlloc dt=7 heapalloc_value=18157288 +HeapAlloc dt=7 heapalloc_value=18165480 +HeapAlloc dt=12 heapalloc_value=18173672 +HeapAlloc dt=11 heapalloc_value=18181864 +HeapAlloc dt=11 heapalloc_value=18190056 +HeapAlloc dt=7 heapalloc_value=18198248 +HeapAlloc dt=62 heapalloc_value=18206440 +HeapAlloc dt=8 heapalloc_value=18214632 +HeapAlloc dt=7 heapalloc_value=18222824 +HeapAlloc dt=6 heapalloc_value=18231016 +HeapAlloc dt=7 heapalloc_value=18239208 +HeapAlloc dt=11 heapalloc_value=18247400 +HeapAlloc dt=6 heapalloc_value=18255592 +HeapAlloc dt=7 heapalloc_value=18263784 +HeapAlloc dt=11 heapalloc_value=18271976 +HeapAlloc dt=6 heapalloc_value=18280168 +HeapAlloc dt=7 heapalloc_value=18288360 +HeapAlloc dt=7 heapalloc_value=18296552 +HeapAlloc dt=6 heapalloc_value=18304744 +HeapAlloc dt=10 heapalloc_value=18312936 +HeapAlloc dt=7 heapalloc_value=18321128 +HeapAlloc dt=7 heapalloc_value=18329320 +HeapAlloc dt=7 heapalloc_value=18337512 +HeapAlloc dt=31 heapalloc_value=18345704 +HeapAlloc dt=17 heapalloc_value=18353896 +HeapAlloc dt=7 heapalloc_value=18362088 +HeapAlloc dt=13 heapalloc_value=18370280 +HeapAlloc dt=6 heapalloc_value=18378472 +HeapAlloc dt=7 heapalloc_value=18386664 +HeapAlloc dt=7 heapalloc_value=18394856 +HeapAlloc dt=11 heapalloc_value=18403048 +HeapAlloc dt=6 heapalloc_value=18411240 +HeapAlloc dt=7 heapalloc_value=18419432 +HeapAlloc dt=7 heapalloc_value=18427624 +HeapAlloc dt=6 heapalloc_value=18435816 +HeapAlloc dt=7 heapalloc_value=18444008 +HeapAlloc dt=7 heapalloc_value=18452200 +GCMarkAssistBegin dt=13 stack=31 +GoBlock dt=35 reason_string=10 stack=36 +ProcStop dt=22 +ProcStart dt=936 p=1 p_seq=13 +GoStart dt=212 g=25 g_seq=9 +GoUnblock dt=31 g=1 g_seq=55 stack=41 +GoBlock dt=7 reason_string=15 stack=27 +GoStart dt=13 g=1 g_seq=56 +GCMarkAssistEnd dt=4 +HeapAlloc dt=30 heapalloc_value=16971400 +GCSweepBegin dt=41 stack=42 +GCSweepEnd dt=310 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=23 heapalloc_value=16979592 +GCSweepBegin dt=30 stack=42 +GCSweepEnd dt=934 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=80 heapalloc_value=16987784 +GCSweepBegin dt=43 stack=42 +GCSweepEnd dt=1671 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=6 heapalloc_value=16995976 +GCSweepBegin dt=41 stack=42 +GCSweepEnd dt=1680 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=13 heapalloc_value=17004168 +GCSweepBegin dt=44 stack=42 +GCSweepEnd dt=1555 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=12 heapalloc_value=17012360 +GCSweepBegin dt=46 stack=42 +GCSweepEnd dt=1914 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=16 heapalloc_value=17020552 +GCSweepBegin dt=47 stack=42 +GCSweepEnd dt=1545 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=10 heapalloc_value=17028744 +GCSweepBegin dt=37 stack=42 +GCSweepEnd dt=1763 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=17036936 +GCSweepBegin dt=37 stack=42 +GCSweepEnd dt=1712 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=18 heapalloc_value=17045128 +GCSweepBegin dt=34 stack=42 +GCSweepEnd dt=1009 swept_value=466944 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=17053320 +HeapAlloc dt=28 heapalloc_value=17061512 +HeapAlloc dt=25 heapalloc_value=17069704 +HeapAlloc dt=34 heapalloc_value=17077896 +HeapAlloc dt=39 heapalloc_value=17086088 +HeapAlloc dt=72 heapalloc_value=17094280 +HeapAlloc dt=32 heapalloc_value=17102472 +HeapAlloc dt=16 heapalloc_value=17110664 +HeapAlloc dt=15 heapalloc_value=17118856 +HeapAlloc dt=14 heapalloc_value=17127048 +HeapAlloc dt=16 heapalloc_value=17135240 +HeapAlloc dt=15 heapalloc_value=17143432 +HeapAlloc dt=19 heapalloc_value=17151624 +HeapAlloc dt=15 heapalloc_value=17159816 +HeapAlloc dt=54 heapalloc_value=17585800 +GoBlock dt=482 reason_string=19 stack=21 +ProcStop dt=210 +ProcStart dt=17621 p=0 p_seq=26 +ProcStop dt=24 +ProcStart dt=5194 p=1 p_seq=16 +ProcStop dt=17 +ProcStart dt=16724 p=1 p_seq=17 +GoUnblock dt=27 g=1 g_seq=59 stack=0 +GoStart dt=127 g=1 g_seq=60 +HeapAlloc dt=55 heapalloc_value=18617992 +HeapAlloc dt=64 heapalloc_value=18626184 +HeapAlloc dt=65 heapalloc_value=18634376 +HeapAlloc dt=61 heapalloc_value=18642568 +HeapAlloc dt=54 heapalloc_value=18650760 +HeapAlloc dt=66 heapalloc_value=18658952 +HeapAlloc dt=67 heapalloc_value=18667144 +HeapAlloc dt=54 heapalloc_value=18675336 +HeapAlloc dt=57 heapalloc_value=18683528 +HeapAlloc dt=45 heapalloc_value=18691720 +HeapAlloc dt=84 heapalloc_value=18699912 +HeapAlloc dt=26 heapalloc_value=18708104 +HeapAlloc dt=18 heapalloc_value=18716296 +HeapAlloc dt=15 heapalloc_value=18724488 +HeapAlloc dt=24 heapalloc_value=18732680 +HeapAlloc dt=26 heapalloc_value=18740872 +HeapAlloc dt=21 heapalloc_value=18749064 +HeapAlloc dt=15 heapalloc_value=18757256 +HeapAlloc dt=31 heapalloc_value=18765448 +HeapAlloc dt=7 heapalloc_value=18773640 +HeapAlloc dt=7 heapalloc_value=18781832 +HeapAlloc dt=113 heapalloc_value=18790024 +HeapAlloc dt=8 heapalloc_value=18798216 +HeapAlloc dt=6 heapalloc_value=18806408 +HeapAlloc dt=7 heapalloc_value=18814600 +HeapAlloc dt=6 heapalloc_value=18822792 +HeapAlloc dt=6 heapalloc_value=18830984 +HeapAlloc dt=7 heapalloc_value=18839176 +HeapAlloc dt=6 heapalloc_value=18847368 +HeapAlloc dt=6 heapalloc_value=18855560 +HeapAlloc dt=6 heapalloc_value=18863752 +HeapAlloc dt=6 heapalloc_value=18871944 +HeapAlloc dt=6 heapalloc_value=18880136 +HeapAlloc dt=6 heapalloc_value=18888328 +HeapAlloc dt=6 heapalloc_value=18896520 +HeapAlloc dt=7 heapalloc_value=18904712 +HeapAlloc dt=6 heapalloc_value=18912904 +HeapAlloc dt=38 heapalloc_value=18921096 +HeapAlloc dt=7 heapalloc_value=18929288 +HeapAlloc dt=6 heapalloc_value=18937480 +HeapAlloc dt=14 heapalloc_value=18945672 +HeapAlloc dt=6 heapalloc_value=18953864 +HeapAlloc dt=6 heapalloc_value=18962056 +HeapAlloc dt=6 heapalloc_value=18970248 +HeapAlloc dt=7 heapalloc_value=18978440 +HeapAlloc dt=6 heapalloc_value=18986632 +HeapAlloc dt=6 heapalloc_value=18994824 +HeapAlloc dt=13 heapalloc_value=19003016 +HeapAlloc dt=7 heapalloc_value=19011208 +HeapAlloc dt=6 heapalloc_value=19019400 +HeapAlloc dt=6 heapalloc_value=19027592 +HeapAlloc dt=6 heapalloc_value=19035784 +HeapAlloc dt=7 heapalloc_value=19043976 +HeapAlloc dt=6 heapalloc_value=19052168 +HeapAlloc dt=6 heapalloc_value=19060360 +HeapAlloc dt=6 heapalloc_value=19068552 +HeapAlloc dt=6 heapalloc_value=19076744 +HeapAlloc dt=7 heapalloc_value=19084936 +HeapAlloc dt=6 heapalloc_value=19093128 +HeapAlloc dt=6 heapalloc_value=19101320 +HeapAlloc dt=6 heapalloc_value=19109512 +HeapAlloc dt=7 heapalloc_value=19117704 +HeapAlloc dt=5 heapalloc_value=19125896 +HeapAlloc dt=7 heapalloc_value=19134088 +HeapAlloc dt=6 heapalloc_value=19142280 +HeapAlloc dt=6 heapalloc_value=19150472 +HeapAlloc dt=6 heapalloc_value=19158664 +HeapAlloc dt=6 heapalloc_value=19166856 +HeapAlloc dt=7 heapalloc_value=19175048 +HeapAlloc dt=6 heapalloc_value=19183240 +HeapAlloc dt=6 heapalloc_value=19191432 +HeapAlloc dt=6 heapalloc_value=19199624 +HeapAlloc dt=7 heapalloc_value=19207816 +HeapAlloc dt=6 heapalloc_value=19216008 +HeapAlloc dt=6 heapalloc_value=19224200 +HeapAlloc dt=6 heapalloc_value=19232392 +HeapAlloc dt=7 heapalloc_value=19240584 +HeapAlloc dt=6 heapalloc_value=19248776 +HeapAlloc dt=6 heapalloc_value=19256968 +HeapAlloc dt=6 heapalloc_value=19265160 +HeapAlloc dt=6 heapalloc_value=19273352 +HeapAlloc dt=6 heapalloc_value=19281544 +HeapAlloc dt=6 heapalloc_value=19289736 +HeapAlloc dt=7 heapalloc_value=19297928 +HeapAlloc dt=6 heapalloc_value=19306120 +HeapAlloc dt=62 heapalloc_value=19314312 +HeapAlloc dt=7 heapalloc_value=19322504 +HeapAlloc dt=6 heapalloc_value=19330696 +HeapAlloc dt=6 heapalloc_value=19338888 +HeapAlloc dt=35 heapalloc_value=19347080 +HeapAlloc dt=7 heapalloc_value=19355272 +HeapAlloc dt=6 heapalloc_value=19363464 +HeapAlloc dt=6 heapalloc_value=19371656 +HeapAlloc dt=6 heapalloc_value=19379848 +HeapAlloc dt=6 heapalloc_value=19388040 +HeapAlloc dt=6 heapalloc_value=19396232 +HeapAlloc dt=7 heapalloc_value=19404424 +HeapAlloc dt=6 heapalloc_value=19412616 +HeapAlloc dt=7 heapalloc_value=19420808 +HeapAlloc dt=6 heapalloc_value=19429000 +HeapAlloc dt=6 heapalloc_value=19437192 +HeapAlloc dt=6 heapalloc_value=19445384 +HeapAlloc dt=7 heapalloc_value=19453576 +HeapAlloc dt=6 heapalloc_value=19461768 +HeapAlloc dt=10 heapalloc_value=19469960 +HeapAlloc dt=6 heapalloc_value=19478152 +HeapAlloc dt=6 heapalloc_value=19486344 +HeapAlloc dt=6 heapalloc_value=19494536 +HeapAlloc dt=6 heapalloc_value=19502728 +HeapAlloc dt=7 heapalloc_value=19510920 +HeapAlloc dt=6 heapalloc_value=19519112 +HeapAlloc dt=6 heapalloc_value=19527304 +HeapAlloc dt=6 heapalloc_value=19535496 +HeapAlloc dt=6 heapalloc_value=19543688 +HeapAlloc dt=35 heapalloc_value=19551880 +HeapAlloc dt=7 heapalloc_value=19560072 +HeapAlloc dt=6 heapalloc_value=19568264 +HeapAlloc dt=6 heapalloc_value=19576456 +HeapAlloc dt=6 heapalloc_value=19584648 +HeapAlloc dt=7 heapalloc_value=19592840 +HeapAlloc dt=7 heapalloc_value=19601032 +HeapAlloc dt=6 heapalloc_value=19609224 +HeapAlloc dt=6 heapalloc_value=19617416 +HeapAlloc dt=6 heapalloc_value=19625608 +HeapAlloc dt=6 heapalloc_value=19633800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=171 +ProcStart dt=17527 p=0 p_seq=28 +ProcStop dt=24 +ProcStart dt=1830 p=1 p_seq=20 +ProcStop dt=13 +ProcStart dt=16742 p=1 p_seq=21 +GoUnblock dt=20 g=1 g_seq=63 stack=0 +GoStart dt=121 g=1 g_seq=64 +HeapAlloc dt=62 heapalloc_value=20665992 +HeapAlloc dt=21 heapalloc_value=20674184 +HeapAlloc dt=25 heapalloc_value=20682376 +HeapAlloc dt=20 heapalloc_value=20690568 +HeapAlloc dt=12 heapalloc_value=20698760 +HeapAlloc dt=16 heapalloc_value=20706952 +HeapAlloc dt=15 heapalloc_value=20715144 +HeapAlloc dt=18 heapalloc_value=20723336 +HeapAlloc dt=12 heapalloc_value=20731528 +HeapAlloc dt=16 heapalloc_value=20739720 +HeapAlloc dt=12 heapalloc_value=20747912 +HeapAlloc dt=12 heapalloc_value=20756104 +HeapAlloc dt=12 heapalloc_value=20764296 +HeapAlloc dt=12 heapalloc_value=20772488 +HeapAlloc dt=9 heapalloc_value=20780680 +HeapAlloc dt=5 heapalloc_value=20788872 +HeapAlloc dt=6 heapalloc_value=20797064 +HeapAlloc dt=9 heapalloc_value=20805256 +HeapAlloc dt=5 heapalloc_value=20813448 +HeapAlloc dt=6 heapalloc_value=20821640 +HeapAlloc dt=5 heapalloc_value=20829832 +HeapAlloc dt=6 heapalloc_value=20838024 +HeapAlloc dt=15 heapalloc_value=20846216 +HeapAlloc dt=12 heapalloc_value=20854408 +HeapAlloc dt=11 heapalloc_value=20862600 +HeapAlloc dt=13 heapalloc_value=20870792 +HeapAlloc dt=5 heapalloc_value=20878984 +HeapAlloc dt=106 heapalloc_value=20887176 +HeapAlloc dt=8 heapalloc_value=20895368 +HeapAlloc dt=5 heapalloc_value=20903560 +HeapAlloc dt=6 heapalloc_value=20911752 +HeapAlloc dt=6 heapalloc_value=20919944 +HeapAlloc dt=5 heapalloc_value=20928136 +HeapAlloc dt=9 heapalloc_value=20936328 +HeapAlloc dt=6 heapalloc_value=20944520 +HeapAlloc dt=5 heapalloc_value=20952712 +HeapAlloc dt=6 heapalloc_value=20960904 +HeapAlloc dt=5 heapalloc_value=20969096 +HeapAlloc dt=6 heapalloc_value=20977288 +HeapAlloc dt=5 heapalloc_value=20985480 +HeapAlloc dt=5 heapalloc_value=20993672 +HeapAlloc dt=10 heapalloc_value=21001864 +HeapAlloc dt=6 heapalloc_value=21010056 +HeapAlloc dt=37 heapalloc_value=21018248 +HeapAlloc dt=7 heapalloc_value=21026440 +HeapAlloc dt=6 heapalloc_value=21034632 +HeapAlloc dt=34 heapalloc_value=21042824 +HeapAlloc dt=6 heapalloc_value=21051016 +HeapAlloc dt=6 heapalloc_value=21059208 +HeapAlloc dt=11 heapalloc_value=21067400 +HeapAlloc dt=6 heapalloc_value=21075592 +HeapAlloc dt=5 heapalloc_value=21083784 +HeapAlloc dt=6 heapalloc_value=21091976 +HeapAlloc dt=5 heapalloc_value=21100168 +HeapAlloc dt=9 heapalloc_value=21108360 +HeapAlloc dt=6 heapalloc_value=21116552 +HeapAlloc dt=6 heapalloc_value=21124744 +HeapAlloc dt=10 heapalloc_value=21132936 +HeapAlloc dt=5 heapalloc_value=21141128 +HeapAlloc dt=6 heapalloc_value=21149320 +HeapAlloc dt=5 heapalloc_value=21157512 +HeapAlloc dt=6 heapalloc_value=21165704 +HeapAlloc dt=5 heapalloc_value=21173896 +HeapAlloc dt=6 heapalloc_value=21182088 +HeapAlloc dt=5 heapalloc_value=21190280 +HeapAlloc dt=9 heapalloc_value=21198472 +HeapAlloc dt=6 heapalloc_value=21206664 +HeapAlloc dt=6 heapalloc_value=21214856 +HeapAlloc dt=6 heapalloc_value=21223048 +HeapAlloc dt=5 heapalloc_value=21231240 +HeapAlloc dt=6 heapalloc_value=21239432 +HeapAlloc dt=5 heapalloc_value=21247624 +HeapAlloc dt=6 heapalloc_value=21255816 +HeapAlloc dt=5 heapalloc_value=21264008 +HeapAlloc dt=6 heapalloc_value=21272200 +HeapAlloc dt=5 heapalloc_value=21280392 +HeapAlloc dt=6 heapalloc_value=21288584 +HeapAlloc dt=5 heapalloc_value=21296776 +HeapAlloc dt=6 heapalloc_value=21304968 +HeapAlloc dt=5 heapalloc_value=21313160 +HeapAlloc dt=6 heapalloc_value=21321352 +HeapAlloc dt=6 heapalloc_value=21329544 +HeapAlloc dt=6 heapalloc_value=21337736 +HeapAlloc dt=6 heapalloc_value=21345928 +HeapAlloc dt=6 heapalloc_value=21354120 +HeapAlloc dt=5 heapalloc_value=21362312 +HeapAlloc dt=6 heapalloc_value=21370504 +HeapAlloc dt=6 heapalloc_value=21378696 +HeapAlloc dt=6 heapalloc_value=21386888 +HeapAlloc dt=5 heapalloc_value=21395080 +HeapAlloc dt=6 heapalloc_value=21403272 +HeapAlloc dt=96 heapalloc_value=21411464 +HeapAlloc dt=7 heapalloc_value=21419656 +HeapAlloc dt=6 heapalloc_value=21427848 +HeapAlloc dt=21 heapalloc_value=21968520 +HeapAlloc dt=1835 heapalloc_value=21976712 +HeapAlloc dt=11 heapalloc_value=21984904 +HeapAlloc dt=8 heapalloc_value=21993096 +HeapAlloc dt=7 heapalloc_value=22001288 +HeapAlloc dt=8 heapalloc_value=22009480 +HeapAlloc dt=7 heapalloc_value=22017672 +HeapAlloc dt=8 heapalloc_value=22025864 +HeapAlloc dt=7 heapalloc_value=22034056 +HeapAlloc dt=8 heapalloc_value=22042248 +HeapAlloc dt=7 heapalloc_value=22050440 +HeapAlloc dt=7 heapalloc_value=22058632 +HeapAlloc dt=8 heapalloc_value=22066824 +HeapAlloc dt=7 heapalloc_value=22075016 +HeapAlloc dt=8 heapalloc_value=22083208 +HeapAlloc dt=7 heapalloc_value=22091400 +HeapAlloc dt=7 heapalloc_value=22099592 +HeapAlloc dt=14 heapalloc_value=22107784 +HeapAlloc dt=5 heapalloc_value=22115976 +HeapAlloc dt=6 heapalloc_value=22124168 +HeapAlloc dt=6 heapalloc_value=22132360 +HeapAlloc dt=5 heapalloc_value=22140552 +HeapAlloc dt=6 heapalloc_value=22148744 +HeapAlloc dt=5 heapalloc_value=22156936 +HeapAlloc dt=6 heapalloc_value=22165128 +HeapAlloc dt=6 heapalloc_value=22173320 +HeapAlloc dt=38 heapalloc_value=22181512 +HeapAlloc dt=7 heapalloc_value=22189704 +HeapAlloc dt=5 heapalloc_value=22197896 +HeapAlloc dt=6 heapalloc_value=22206088 +HeapAlloc dt=6 heapalloc_value=22214280 +HeapAlloc dt=5 heapalloc_value=22222472 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=163 +ProcStart dt=16841 p=0 p_seq=30 +ProcStop dt=23 +ProcStart dt=1498 p=1 p_seq=24 +ProcStop dt=11 +ProcStart dt=16726 p=1 p_seq=25 +GoUnblock dt=19 g=1 g_seq=67 stack=0 +GoStart dt=117 g=1 g_seq=68 +HeapAlloc dt=46 heapalloc_value=23254664 +HeapAlloc dt=19 heapalloc_value=23262856 +HeapAlloc dt=20 heapalloc_value=23271048 +HeapAlloc dt=16 heapalloc_value=23279240 +HeapAlloc dt=12 heapalloc_value=23287432 +HeapAlloc dt=12 heapalloc_value=23295624 +HeapAlloc dt=13 heapalloc_value=23303816 +HeapAlloc dt=15 heapalloc_value=23312008 +HeapAlloc dt=13 heapalloc_value=23320200 +HeapAlloc dt=13 heapalloc_value=23328392 +HeapAlloc dt=12 heapalloc_value=23336584 +HeapAlloc dt=12 heapalloc_value=23344776 +HeapAlloc dt=5 heapalloc_value=23352968 +HeapAlloc dt=100 heapalloc_value=23361160 +HeapAlloc dt=14 heapalloc_value=23369352 +HeapAlloc dt=16 heapalloc_value=23377544 +HeapAlloc dt=13 heapalloc_value=23385736 +HeapAlloc dt=5 heapalloc_value=23393928 +HeapAlloc dt=6 heapalloc_value=23402120 +HeapAlloc dt=9 heapalloc_value=23410312 +HeapAlloc dt=6 heapalloc_value=23418504 +HeapAlloc dt=6 heapalloc_value=23426696 +HeapAlloc dt=5 heapalloc_value=23434888 +HeapAlloc dt=6 heapalloc_value=23443080 +HeapAlloc dt=5 heapalloc_value=23451272 +HeapAlloc dt=6 heapalloc_value=23459464 +HeapAlloc dt=6 heapalloc_value=23467656 +HeapAlloc dt=6 heapalloc_value=23475848 +HeapAlloc dt=6 heapalloc_value=23484040 +HeapAlloc dt=5 heapalloc_value=23492232 +HeapAlloc dt=6 heapalloc_value=23500424 +HeapAlloc dt=5 heapalloc_value=23508616 +HeapAlloc dt=83 heapalloc_value=23516808 +HeapAlloc dt=8 heapalloc_value=23525000 +HeapAlloc dt=5 heapalloc_value=23533192 +HeapAlloc dt=6 heapalloc_value=23541384 +HeapAlloc dt=6 heapalloc_value=23549576 +HeapAlloc dt=5 heapalloc_value=23557768 +HeapAlloc dt=7 heapalloc_value=23565960 +HeapAlloc dt=7 heapalloc_value=23574152 +HeapAlloc dt=6 heapalloc_value=23582344 +HeapAlloc dt=5 heapalloc_value=23590536 +HeapAlloc dt=6 heapalloc_value=23598728 +HeapAlloc dt=6 heapalloc_value=23606920 +HeapAlloc dt=5 heapalloc_value=23615112 +HeapAlloc dt=6 heapalloc_value=23623304 +HeapAlloc dt=6 heapalloc_value=23631496 +HeapAlloc dt=5 heapalloc_value=23639688 +HeapAlloc dt=38 heapalloc_value=23647880 +HeapAlloc dt=8 heapalloc_value=23656072 +HeapAlloc dt=37 heapalloc_value=23664264 +HeapAlloc dt=6 heapalloc_value=23672456 +HeapAlloc dt=6 heapalloc_value=23680648 +HeapAlloc dt=6 heapalloc_value=23688840 +HeapAlloc dt=6 heapalloc_value=23697032 +HeapAlloc dt=6 heapalloc_value=23705224 +HeapAlloc dt=5 heapalloc_value=23713416 +HeapAlloc dt=6 heapalloc_value=23721608 +HeapAlloc dt=10 heapalloc_value=23729800 +HeapAlloc dt=5 heapalloc_value=23737992 +HeapAlloc dt=6 heapalloc_value=23746184 +HeapAlloc dt=6 heapalloc_value=23754376 +HeapAlloc dt=5 heapalloc_value=23762568 +HeapAlloc dt=6 heapalloc_value=23770760 +HeapAlloc dt=6 heapalloc_value=23778952 +HeapAlloc dt=5 heapalloc_value=23787144 +HeapAlloc dt=9 heapalloc_value=23795336 +HeapAlloc dt=6 heapalloc_value=23803528 +HeapAlloc dt=6 heapalloc_value=23811720 +HeapAlloc dt=5 heapalloc_value=23819912 +HeapAlloc dt=6 heapalloc_value=23828104 +HeapAlloc dt=6 heapalloc_value=23836296 +HeapAlloc dt=6 heapalloc_value=23844488 +HeapAlloc dt=5 heapalloc_value=23852680 +HeapAlloc dt=6 heapalloc_value=23860872 +HeapAlloc dt=6 heapalloc_value=23869064 +HeapAlloc dt=6 heapalloc_value=23877256 +HeapAlloc dt=6 heapalloc_value=23885448 +HeapAlloc dt=5 heapalloc_value=23893640 +HeapAlloc dt=6 heapalloc_value=23901832 +HeapAlloc dt=5 heapalloc_value=23910024 +HeapAlloc dt=6 heapalloc_value=23918216 +HeapAlloc dt=6 heapalloc_value=23926408 +HeapAlloc dt=6 heapalloc_value=23934600 +HeapAlloc dt=6 heapalloc_value=23942792 +HeapAlloc dt=5 heapalloc_value=23950984 +HeapAlloc dt=6 heapalloc_value=23959176 +HeapAlloc dt=5 heapalloc_value=23967368 +HeapAlloc dt=6 heapalloc_value=23975560 +HeapAlloc dt=7 heapalloc_value=23983752 +HeapAlloc dt=5 heapalloc_value=23991944 +HeapAlloc dt=6 heapalloc_value=24000136 +HeapAlloc dt=5 heapalloc_value=24008328 +HeapAlloc dt=6 heapalloc_value=24016520 +HeapAlloc dt=6 heapalloc_value=24024712 +HeapAlloc dt=5 heapalloc_value=24032904 +HeapAlloc dt=50 heapalloc_value=24041096 +HeapAlloc dt=7 heapalloc_value=24049288 +HeapAlloc dt=6 heapalloc_value=24057480 +HeapAlloc dt=5 heapalloc_value=24065672 +HeapAlloc dt=34 heapalloc_value=24073864 +HeapAlloc dt=7 heapalloc_value=24082056 +HeapAlloc dt=6 heapalloc_value=24090248 +HeapAlloc dt=6 heapalloc_value=24098440 +HeapAlloc dt=6 heapalloc_value=24106632 +HeapAlloc dt=5 heapalloc_value=24114824 +HeapAlloc dt=6 heapalloc_value=24123016 +HeapAlloc dt=6 heapalloc_value=24131208 +HeapAlloc dt=6 heapalloc_value=24139400 +HeapAlloc dt=6 heapalloc_value=24147592 +HeapAlloc dt=5 heapalloc_value=24155784 +HeapAlloc dt=6 heapalloc_value=24163976 +HeapAlloc dt=5 heapalloc_value=24172168 +HeapAlloc dt=6 heapalloc_value=24180360 +HeapAlloc dt=365 heapalloc_value=24188552 +HeapAlloc dt=13 heapalloc_value=24196744 +HeapAlloc dt=6 heapalloc_value=24204936 +HeapAlloc dt=6 heapalloc_value=24213128 +HeapAlloc dt=5 heapalloc_value=24221320 +HeapAlloc dt=6 heapalloc_value=24229512 +HeapAlloc dt=6 heapalloc_value=24237704 +HeapAlloc dt=6 heapalloc_value=24245896 +HeapAlloc dt=6 heapalloc_value=24254088 +HeapAlloc dt=6 heapalloc_value=24262280 +HeapAlloc dt=6 heapalloc_value=24270472 +GoBlock dt=10 reason_string=19 stack=21 +ProcStop dt=157 +ProcStart dt=12778 p=1 p_seq=27 +GoUnblock dt=12 g=1 g_seq=69 stack=0 +GoStart dt=143 g=1 g_seq=70 +HeapAlloc dt=61 heapalloc_value=24278664 +HeapAlloc dt=11 heapalloc_value=24286856 +HeapAlloc dt=5 heapalloc_value=24295048 +HeapAlloc dt=6 heapalloc_value=24303240 +HeapAlloc dt=5 heapalloc_value=24311432 +HeapAlloc dt=6 heapalloc_value=24319624 +HeapAlloc dt=6 heapalloc_value=24327816 +HeapAlloc dt=6 heapalloc_value=24336008 +HeapAlloc dt=7 heapalloc_value=24344200 +HeapAlloc dt=5 heapalloc_value=24352392 +HeapAlloc dt=7 heapalloc_value=24360584 +HeapAlloc dt=5 heapalloc_value=24368776 +HeapAlloc dt=6 heapalloc_value=24376968 +HeapAlloc dt=6 heapalloc_value=24385160 +HeapAlloc dt=5 heapalloc_value=24393352 +HeapAlloc dt=6 heapalloc_value=24401544 +HeapAlloc dt=6 heapalloc_value=24409736 +HeapAlloc dt=6 heapalloc_value=24417928 +HeapAlloc dt=6 heapalloc_value=24426120 +HeapAlloc dt=5 heapalloc_value=24434312 +HeapAlloc dt=6 heapalloc_value=24442504 +HeapAlloc dt=6 heapalloc_value=24450696 +HeapAlloc dt=6 heapalloc_value=24458888 +HeapAlloc dt=6 heapalloc_value=24467080 +HeapAlloc dt=5 heapalloc_value=24475272 +HeapAlloc dt=6 heapalloc_value=24483464 +HeapAlloc dt=5 heapalloc_value=24491656 +HeapAlloc dt=6 heapalloc_value=24499848 +HeapAlloc dt=6 heapalloc_value=24508040 +HeapAlloc dt=5 heapalloc_value=24516232 +HeapAlloc dt=6 heapalloc_value=24524424 +HeapAlloc dt=6 heapalloc_value=24532616 +HeapAlloc dt=5 heapalloc_value=24540808 +HeapAlloc dt=6 heapalloc_value=24549000 +HeapAlloc dt=5 heapalloc_value=24557192 +HeapAlloc dt=49 heapalloc_value=24565384 +HeapAlloc dt=7 heapalloc_value=24573576 +HeapAlloc dt=5 heapalloc_value=24581768 +HeapAlloc dt=6 heapalloc_value=24589960 +HeapAlloc dt=17 heapalloc_value=24598152 +HeapAlloc dt=12 heapalloc_value=24606344 +HeapAlloc dt=5 heapalloc_value=24614536 +HeapAlloc dt=6 heapalloc_value=24622728 +HeapAlloc dt=5 heapalloc_value=24630920 +HeapAlloc dt=6 heapalloc_value=24639112 +HeapAlloc dt=6 heapalloc_value=24647304 +HeapAlloc dt=5 heapalloc_value=24655496 +HeapAlloc dt=6 heapalloc_value=24663688 +HeapAlloc dt=37 heapalloc_value=24671880 +HeapAlloc dt=6 heapalloc_value=24680072 +HeapAlloc dt=6 heapalloc_value=24688264 +HeapAlloc dt=36 heapalloc_value=24696456 +HeapAlloc dt=7 heapalloc_value=24704648 +HeapAlloc dt=12 heapalloc_value=24712840 +HeapAlloc dt=6 heapalloc_value=24721032 +HeapAlloc dt=17 heapalloc_value=24729224 +HeapAlloc dt=5 heapalloc_value=24737416 +HeapAlloc dt=6 heapalloc_value=24745608 +HeapAlloc dt=19 heapalloc_value=24753800 +HeapAlloc dt=5 heapalloc_value=24761992 +HeapAlloc dt=6 heapalloc_value=24770184 +HeapAlloc dt=79 heapalloc_value=24778376 +HeapAlloc dt=7 heapalloc_value=24786568 +HeapAlloc dt=6 heapalloc_value=24794760 +HeapAlloc dt=5 heapalloc_value=24802952 +HeapAlloc dt=6 heapalloc_value=24811144 +HeapAlloc dt=6 heapalloc_value=24819336 +HeapAlloc dt=6 heapalloc_value=24827528 +HeapAlloc dt=5 heapalloc_value=24835720 +HeapAlloc dt=6 heapalloc_value=24843912 +HeapAlloc dt=6 heapalloc_value=24852104 +HeapAlloc dt=6 heapalloc_value=24860296 +HeapAlloc dt=6 heapalloc_value=24868488 +HeapAlloc dt=5 heapalloc_value=24876680 +HeapAlloc dt=6 heapalloc_value=24884872 +HeapAlloc dt=6 heapalloc_value=24893064 +HeapAlloc dt=5 heapalloc_value=24901256 +HeapAlloc dt=6 heapalloc_value=24909448 +HeapAlloc dt=6 heapalloc_value=24917640 +HeapAlloc dt=5 heapalloc_value=24925832 +HeapAlloc dt=6 heapalloc_value=24934024 +HeapAlloc dt=5 heapalloc_value=24942216 +HeapAlloc dt=6 heapalloc_value=24950408 +HeapAlloc dt=6 heapalloc_value=24958600 +HeapAlloc dt=6 heapalloc_value=24966792 +HeapAlloc dt=5 heapalloc_value=24974984 +HeapAlloc dt=6 heapalloc_value=24983176 +HeapAlloc dt=6 heapalloc_value=24991368 +HeapAlloc dt=6 heapalloc_value=24999560 +HeapAlloc dt=5 heapalloc_value=25007752 +HeapAlloc dt=6 heapalloc_value=25015944 +HeapAlloc dt=5 heapalloc_value=25024136 +HeapAlloc dt=6 heapalloc_value=25032328 +HeapAlloc dt=6 heapalloc_value=25040520 +HeapAlloc dt=6 heapalloc_value=25048712 +HeapAlloc dt=6 heapalloc_value=25056904 +HeapAlloc dt=5 heapalloc_value=25065096 +HeapAlloc dt=6 heapalloc_value=25073288 +HeapAlloc dt=6 heapalloc_value=25081480 +HeapAlloc dt=46 heapalloc_value=25089672 +HeapAlloc dt=7 heapalloc_value=25097864 +HeapAlloc dt=6 heapalloc_value=25106056 +HeapAlloc dt=5 heapalloc_value=25114248 +HeapAlloc dt=36 heapalloc_value=25122440 +HeapAlloc dt=7 heapalloc_value=25130632 +HeapAlloc dt=6 heapalloc_value=25138824 +HeapAlloc dt=6 heapalloc_value=25147016 +HeapAlloc dt=5 heapalloc_value=25155208 +HeapAlloc dt=6 heapalloc_value=25163400 +HeapAlloc dt=5 heapalloc_value=25171592 +HeapAlloc dt=6 heapalloc_value=25179784 +HeapAlloc dt=5 heapalloc_value=25187976 +HeapAlloc dt=6 heapalloc_value=25196168 +HeapAlloc dt=5 heapalloc_value=25204360 +HeapAlloc dt=6 heapalloc_value=25212552 +HeapAlloc dt=5 heapalloc_value=25220744 +HeapAlloc dt=6 heapalloc_value=25228936 +HeapAlloc dt=10 heapalloc_value=25237128 +HeapAlloc dt=5 heapalloc_value=25245320 +HeapAlloc dt=6 heapalloc_value=25253512 +HeapAlloc dt=5 heapalloc_value=25261704 +HeapAlloc dt=6 heapalloc_value=25269896 +HeapAlloc dt=6 heapalloc_value=25278088 +HeapAlloc dt=5 heapalloc_value=25286280 +HeapAlloc dt=6 heapalloc_value=25294472 +GoBlock dt=10 reason_string=19 stack=21 +ProcStop dt=14 +ProcStart dt=7152 p=1 p_seq=29 +GoStart dt=199 g=37 g_seq=1 +GoStop dt=306782 reason_string=16 stack=50 +GoStart dt=57 g=37 g_seq=2 +GoStop dt=315218 reason_string=16 stack=50 +GoStart dt=17 g=37 g_seq=3 +GoDestroy dt=159214 +ProcStop dt=60 +EventBatch gen=1 m=1709041 time=7689670150297 size=5255 +ProcStart dt=316 p=3 p_seq=1 +ProcStop dt=37 +ProcStart dt=311299 p=1 p_seq=5 +ProcStop dt=17 +ProcStart dt=16759 p=1 p_seq=6 +GoUnblock dt=47 g=1 g_seq=3 stack=0 +GoStart dt=137 g=1 g_seq=4 +HeapAlloc dt=56 heapalloc_value=2809856 +HeapAlloc dt=29 heapalloc_value=2818048 +HeapAlloc dt=19 heapalloc_value=2826240 +HeapAlloc dt=22 heapalloc_value=2834432 +HeapAlloc dt=91 heapalloc_value=2842624 +HeapAlloc dt=21 heapalloc_value=2850816 +HeapAlloc dt=24 heapalloc_value=2859008 +HeapAlloc dt=7 heapalloc_value=2867200 +HeapAlloc dt=15 heapalloc_value=2875392 +HeapAlloc dt=16 heapalloc_value=2883584 +HeapAlloc dt=12 heapalloc_value=2899968 +HeapAlloc dt=9 heapalloc_value=2908160 +HeapAlloc dt=16 heapalloc_value=2916352 +HeapAlloc dt=15 heapalloc_value=2924544 +HeapAlloc dt=12 heapalloc_value=2932736 +HeapAlloc dt=12 heapalloc_value=2940928 +HeapAlloc dt=7 heapalloc_value=2949120 +HeapAlloc dt=18 heapalloc_value=2957312 +HeapAlloc dt=14 heapalloc_value=2965504 +HeapAlloc dt=12 heapalloc_value=2973696 +HeapAlloc dt=13 heapalloc_value=2981888 +HeapAlloc dt=12 heapalloc_value=2990080 +HeapAlloc dt=11 heapalloc_value=2998272 +HeapAlloc dt=12 heapalloc_value=3006464 +HeapAlloc dt=13 heapalloc_value=3014656 +HeapAlloc dt=12 heapalloc_value=3022848 +HeapAlloc dt=11 heapalloc_value=3031040 +HeapAlloc dt=11 heapalloc_value=3039232 +HeapAlloc dt=13 heapalloc_value=3047424 +HeapAlloc dt=11 heapalloc_value=3055616 +HeapAlloc dt=20 heapalloc_value=3063808 +HeapAlloc dt=12 heapalloc_value=3072000 +HeapAlloc dt=12 heapalloc_value=3080192 +HeapAlloc dt=11 heapalloc_value=3088384 +HeapAlloc dt=12 heapalloc_value=3096576 +HeapAlloc dt=11 heapalloc_value=3104768 +HeapAlloc dt=11 heapalloc_value=3112960 +HeapAlloc dt=12 heapalloc_value=3121152 +HeapAlloc dt=11 heapalloc_value=3129344 +HeapAlloc dt=15 heapalloc_value=3137536 +HeapAlloc dt=15 heapalloc_value=3145728 +HeapAlloc dt=18 heapalloc_value=3153920 +HeapAlloc dt=13 heapalloc_value=3162112 +HeapAlloc dt=12 heapalloc_value=3170304 +HeapAlloc dt=16 heapalloc_value=3178496 +HeapAlloc dt=11 heapalloc_value=3186688 +HeapAlloc dt=12 heapalloc_value=3194880 +HeapAlloc dt=11 heapalloc_value=3203072 +HeapAlloc dt=13 heapalloc_value=3211264 +HeapAlloc dt=12 heapalloc_value=3219456 +HeapAlloc dt=11 heapalloc_value=3227648 +HeapAlloc dt=13 heapalloc_value=3244032 +HeapAlloc dt=734 heapalloc_value=3252224 +HeapAlloc dt=16 heapalloc_value=3260416 +HeapAlloc dt=8 heapalloc_value=3268608 +HeapAlloc dt=5 heapalloc_value=3276800 +HeapAlloc dt=8 heapalloc_value=3284992 +HeapAlloc dt=88 heapalloc_value=3293184 +HeapAlloc dt=7 heapalloc_value=3301376 +HeapAlloc dt=5 heapalloc_value=3309568 +HeapAlloc dt=6 heapalloc_value=3317760 +HeapAlloc dt=5 heapalloc_value=3325952 +HeapAlloc dt=5 heapalloc_value=3334144 +HeapAlloc dt=5 heapalloc_value=3342336 +HeapAlloc dt=5 heapalloc_value=3350528 +HeapAlloc dt=6 heapalloc_value=3358720 +HeapAlloc dt=5 heapalloc_value=3366912 +HeapAlloc dt=5 heapalloc_value=3375104 +HeapAlloc dt=7 heapalloc_value=3383296 +HeapAlloc dt=6 heapalloc_value=3391488 +HeapAlloc dt=5 heapalloc_value=3399680 +HeapAlloc dt=5 heapalloc_value=3407872 +HeapAlloc dt=5 heapalloc_value=3416064 +HeapAlloc dt=6 heapalloc_value=3424256 +HeapAlloc dt=5 heapalloc_value=3432448 +HeapAlloc dt=5 heapalloc_value=3440640 +HeapAlloc dt=5 heapalloc_value=3448832 +HeapAlloc dt=6 heapalloc_value=3457024 +HeapAlloc dt=5 heapalloc_value=3465216 +HeapAlloc dt=38 heapalloc_value=3473408 +HeapAlloc dt=6 heapalloc_value=3481600 +HeapAlloc dt=5 heapalloc_value=3489792 +HeapAlloc dt=6 heapalloc_value=3497984 +HeapAlloc dt=5 heapalloc_value=3506176 +HeapAlloc dt=6 heapalloc_value=3514368 +HeapAlloc dt=5 heapalloc_value=3522560 +HeapAlloc dt=5 heapalloc_value=3530752 +HeapAlloc dt=5 heapalloc_value=3538944 +HeapAlloc dt=5 heapalloc_value=3547136 +HeapAlloc dt=6 heapalloc_value=3555328 +HeapAlloc dt=5 heapalloc_value=3563520 +HeapAlloc dt=5 heapalloc_value=3571712 +HeapAlloc dt=5 heapalloc_value=3579904 +HeapAlloc dt=5 heapalloc_value=3588096 +HeapAlloc dt=6 heapalloc_value=3596288 +HeapAlloc dt=10 heapalloc_value=3678208 +HeapAlloc dt=2433 heapalloc_value=3686400 +HeapAlloc dt=6 heapalloc_value=3694592 +HeapAlloc dt=6 heapalloc_value=3702784 +HeapAlloc dt=6 heapalloc_value=3710976 +HeapAlloc dt=5 heapalloc_value=3719168 +HeapAlloc dt=6 heapalloc_value=3727360 +HeapAlloc dt=5 heapalloc_value=3735552 +HeapAlloc dt=5 heapalloc_value=3743744 +HeapAlloc dt=5 heapalloc_value=3751936 +HeapAlloc dt=6 heapalloc_value=3760128 +HeapAlloc dt=5 heapalloc_value=3768320 +HeapAlloc dt=11 heapalloc_value=3776512 +HeapAlloc dt=31 heapalloc_value=3784704 +HeapAlloc dt=7 heapalloc_value=3792896 +HeapAlloc dt=6 heapalloc_value=3801088 +HeapAlloc dt=5 heapalloc_value=3809280 +HeapAlloc dt=6 heapalloc_value=3817472 +HeapAlloc dt=5 heapalloc_value=3825664 +HeapAlloc dt=5 heapalloc_value=3833856 +HeapAlloc dt=6 heapalloc_value=3842048 +HeapAlloc dt=5 heapalloc_value=3850240 +HeapAlloc dt=5 heapalloc_value=3858432 +HeapAlloc dt=6 heapalloc_value=3866624 +HeapAlloc dt=5 heapalloc_value=3874816 +HeapAlloc dt=5 heapalloc_value=3883008 +HeapAlloc dt=78 heapalloc_value=3891200 +HeapAlloc dt=7 heapalloc_value=3899392 +HeapAlloc dt=6 heapalloc_value=3907584 +HeapAlloc dt=5 heapalloc_value=3915776 +HeapAlloc dt=5 heapalloc_value=3923968 +HeapAlloc dt=5 heapalloc_value=3932160 +HeapAlloc dt=6 heapalloc_value=3940352 +HeapAlloc dt=5 heapalloc_value=3948544 +HeapAlloc dt=5 heapalloc_value=3956736 +HeapAlloc dt=5 heapalloc_value=3964928 +HeapAlloc dt=5 heapalloc_value=3973120 +HeapAlloc dt=6 heapalloc_value=3981312 +HeapAlloc dt=5 heapalloc_value=3989504 +HeapAlloc dt=6 heapalloc_value=3997696 +GCBegin dt=38 gc_seq=1 stack=22 +HeapAlloc dt=42 heapalloc_value=4005888 +HeapAlloc dt=14 heapalloc_value=4014080 +GoCreate dt=73 new_g=18 new_stack=23 stack=24 +GoBlock dt=235 reason_string=12 stack=25 +GoStart dt=11 g=18 g_seq=1 +HeapAlloc dt=16 heapalloc_value=4022272 +GoUnblock dt=15 g=1 g_seq=5 stack=26 +GoBlock dt=9 reason_string=15 stack=27 +GoStart dt=12 g=1 g_seq=6 +GoCreate dt=44 new_g=19 new_stack=23 stack=24 +GoBlock dt=4 reason_string=12 stack=25 +GoStart dt=3 g=19 g_seq=1 +GoUnblock dt=5 g=1 g_seq=7 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=2 g=1 g_seq=8 +GoCreate dt=8 new_g=20 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=2 g=20 g_seq=1 +GoUnblock dt=3 g=1 g_seq=9 stack=26 +GoBlock dt=1 reason_string=15 stack=27 +GoStart dt=2 g=1 g_seq=10 +GoCreate dt=6 new_g=21 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=21 g_seq=1 +GoUnblock dt=6 g=1 g_seq=11 stack=26 +GoBlock dt=1 reason_string=15 stack=27 +GoStart dt=8 g=1 g_seq=12 +GoCreate dt=7 new_g=22 new_stack=23 stack=24 +GoBlock dt=2 reason_string=12 stack=25 +GoStart dt=2 g=22 g_seq=1 +GoUnblock dt=2 g=1 g_seq=13 stack=26 +GoBlock dt=6 reason_string=15 stack=27 +GoStart dt=4 g=1 g_seq=14 +GoCreate dt=15 new_g=23 new_stack=23 stack=24 +GoBlock dt=166 reason_string=12 stack=25 +GoStart dt=4 g=23 g_seq=1 +GoUnblock dt=3 g=1 g_seq=15 stack=26 +GoBlock dt=3 reason_string=15 stack=27 +GoStart dt=3 g=1 g_seq=16 +HeapAlloc dt=18 heapalloc_value=4030464 +GoCreate dt=11 new_g=24 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=24 g_seq=1 +GoUnblock dt=3 g=1 g_seq=17 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=1 g=1 g_seq=18 +GoCreate dt=6 new_g=25 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=25 g_seq=1 +GoUnblock dt=2 g=1 g_seq=19 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=1 g=1 g_seq=20 +STWBegin dt=118 kind_string=22 stack=28 +GoStatus dt=1398 g=4 m=18446744073709551615 gstatus=4 +GoUnblock dt=83 g=4 g_seq=1 stack=29 +ProcsChange dt=91 procs_value=8 stack=30 +STWEnd dt=31 +GCMarkAssistBegin dt=149 stack=31 +GCMarkAssistEnd dt=1458 +GoBlock dt=23 reason_string=19 stack=21 +GoStart dt=166 g=4 g_seq=2 +GoBlock dt=22 reason_string=15 stack=32 +GoUnblock dt=35 g=23 g_seq=2 stack=0 +GoStart dt=4 g=23 g_seq=3 +GoLabel dt=1 label_string=4 +GoBlock dt=441 reason_string=15 stack=27 +ProcStop dt=23 +ProcStart dt=16781 p=0 p_seq=6 +GoUnblock dt=28 g=1 g_seq=27 stack=0 +GoStart dt=162 g=1 g_seq=28 +HeapAlloc dt=69 heapalloc_value=4663024 +HeapAlloc dt=23 heapalloc_value=4671216 +HeapAlloc dt=15 heapalloc_value=4679408 +HeapAlloc dt=10 heapalloc_value=4687600 +HeapAlloc dt=12 heapalloc_value=4695792 +HeapAlloc dt=8 heapalloc_value=4703984 +HeapAlloc dt=6 heapalloc_value=4712176 +HeapAlloc dt=12 heapalloc_value=4720368 +HeapAlloc dt=12 heapalloc_value=4728560 +HeapAlloc dt=12 heapalloc_value=4736752 +HeapAlloc dt=15 heapalloc_value=4744944 +HeapAlloc dt=9 heapalloc_value=4753136 +HeapAlloc dt=9 heapalloc_value=4761328 +HeapAlloc dt=7 heapalloc_value=4769520 +HeapAlloc dt=8 heapalloc_value=4777712 +HeapAlloc dt=9 heapalloc_value=4785904 +HeapAlloc dt=112 heapalloc_value=4794096 +HeapAlloc dt=7 heapalloc_value=4802288 +HeapAlloc dt=9 heapalloc_value=4810480 +HeapAlloc dt=13 heapalloc_value=4818672 +HeapAlloc dt=14 heapalloc_value=4826864 +HeapAlloc dt=6 heapalloc_value=4835056 +HeapAlloc dt=5 heapalloc_value=4843248 +HeapAlloc dt=6 heapalloc_value=4851440 +HeapAlloc dt=14 heapalloc_value=4859632 +HeapAlloc dt=10 heapalloc_value=4867824 +HeapAlloc dt=10 heapalloc_value=4876016 +HeapAlloc dt=6 heapalloc_value=4884208 +HeapAlloc dt=9 heapalloc_value=4892400 +HeapAlloc dt=72 heapalloc_value=4900592 +HeapAlloc dt=6 heapalloc_value=4908784 +HeapAlloc dt=5 heapalloc_value=4916976 +HeapAlloc dt=6 heapalloc_value=4925168 +HeapAlloc dt=6 heapalloc_value=4933360 +HeapAlloc dt=9 heapalloc_value=4941552 +HeapAlloc dt=46 heapalloc_value=4949744 +HeapAlloc dt=10 heapalloc_value=4957936 +HeapAlloc dt=6 heapalloc_value=4966128 +HeapAlloc dt=6 heapalloc_value=4974320 +HeapAlloc dt=6 heapalloc_value=4982512 +HeapAlloc dt=5 heapalloc_value=4990704 +HeapAlloc dt=6 heapalloc_value=4998896 +HeapAlloc dt=45 heapalloc_value=5007088 +HeapAlloc dt=6 heapalloc_value=5015280 +HeapAlloc dt=9 heapalloc_value=5023472 +HeapAlloc dt=6 heapalloc_value=5031664 +HeapAlloc dt=5 heapalloc_value=5039856 +HeapAlloc dt=6 heapalloc_value=5048048 +HeapAlloc dt=6 heapalloc_value=5056240 +HeapAlloc dt=15 heapalloc_value=5138160 +HeapAlloc dt=81 heapalloc_value=5146352 +HeapAlloc dt=6 heapalloc_value=5154544 +HeapAlloc dt=6 heapalloc_value=5162736 +HeapAlloc dt=5 heapalloc_value=5170928 +HeapAlloc dt=6 heapalloc_value=5179120 +HeapAlloc dt=5 heapalloc_value=5187312 +HeapAlloc dt=6 heapalloc_value=5195504 +HeapAlloc dt=7 heapalloc_value=5203696 +HeapAlloc dt=5 heapalloc_value=5211888 +HeapAlloc dt=6 heapalloc_value=5220080 +HeapAlloc dt=6 heapalloc_value=5228272 +HeapAlloc dt=37 heapalloc_value=5236464 +HeapAlloc dt=7 heapalloc_value=5244656 +HeapAlloc dt=6 heapalloc_value=5252848 +HeapAlloc dt=5 heapalloc_value=5261040 +HeapAlloc dt=8 heapalloc_value=5269232 +HeapAlloc dt=6 heapalloc_value=5277424 +HeapAlloc dt=6 heapalloc_value=5285616 +HeapAlloc dt=5 heapalloc_value=5293808 +HeapAlloc dt=7 heapalloc_value=5302000 +HeapAlloc dt=5 heapalloc_value=5310192 +HeapAlloc dt=5 heapalloc_value=5318384 +HeapAlloc dt=6 heapalloc_value=5326576 +HeapAlloc dt=7 heapalloc_value=5334768 +HeapAlloc dt=6 heapalloc_value=5342960 +HeapAlloc dt=5 heapalloc_value=5351152 +HeapAlloc dt=6 heapalloc_value=5359344 +HeapAlloc dt=5 heapalloc_value=5367536 +HeapAlloc dt=13 heapalloc_value=5375728 +HeapAlloc dt=6 heapalloc_value=5383920 +HeapAlloc dt=100 heapalloc_value=5392112 +HeapAlloc dt=8 heapalloc_value=5400304 +HeapAlloc dt=6 heapalloc_value=5408496 +HeapAlloc dt=6 heapalloc_value=5416688 +HeapAlloc dt=5 heapalloc_value=5424880 +HeapAlloc dt=6 heapalloc_value=5433072 +HeapAlloc dt=33 heapalloc_value=5441264 +HeapAlloc dt=7 heapalloc_value=5449456 +HeapAlloc dt=5 heapalloc_value=5457648 +HeapAlloc dt=8 heapalloc_value=5465840 +HeapAlloc dt=6 heapalloc_value=5474032 +HeapAlloc dt=5 heapalloc_value=5482224 +HeapAlloc dt=6 heapalloc_value=5490416 +HeapAlloc dt=5 heapalloc_value=5498608 +HeapAlloc dt=6 heapalloc_value=5506800 +HeapAlloc dt=6 heapalloc_value=5514992 +HeapAlloc dt=5 heapalloc_value=5523184 +HeapAlloc dt=12 heapalloc_value=5531376 +HeapAlloc dt=6 heapalloc_value=5539568 +HeapAlloc dt=6 heapalloc_value=5547760 +HeapAlloc dt=5 heapalloc_value=5555952 +HeapAlloc dt=6 heapalloc_value=5564144 +HeapAlloc dt=5 heapalloc_value=5572336 +HeapAlloc dt=6 heapalloc_value=5580528 +HeapAlloc dt=5 heapalloc_value=5588720 +HeapAlloc dt=7 heapalloc_value=5596912 +HeapAlloc dt=6 heapalloc_value=5605104 +HeapAlloc dt=5 heapalloc_value=5613296 +HeapAlloc dt=6 heapalloc_value=5621488 +HeapAlloc dt=5 heapalloc_value=5629680 +HeapAlloc dt=6 heapalloc_value=5637872 +HeapAlloc dt=6 heapalloc_value=5646064 +HeapAlloc dt=37 heapalloc_value=5654256 +HeapAlloc dt=7 heapalloc_value=5662448 +HeapAlloc dt=6 heapalloc_value=5670640 +HeapAlloc dt=5 heapalloc_value=5678832 +HeapAlloc dt=6 heapalloc_value=5687024 +HeapAlloc dt=5 heapalloc_value=5695216 +HeapAlloc dt=6 heapalloc_value=5703408 +HeapAlloc dt=6 heapalloc_value=5711600 +HeapAlloc dt=5 heapalloc_value=5719792 +HeapAlloc dt=5 heapalloc_value=5727984 +HeapAlloc dt=6 heapalloc_value=5736176 +HeapAlloc dt=6 heapalloc_value=5744368 +HeapAlloc dt=5 heapalloc_value=5752560 +HeapAlloc dt=5 heapalloc_value=5760752 +GoBlock dt=15 reason_string=19 stack=21 +ProcStop dt=178 +ProcStart dt=17613 p=4 p_seq=3 +ProcStop dt=26 +ProcStart dt=3944 p=0 p_seq=9 +ProcStop dt=12 +ProcStart dt=16762 p=4 p_seq=6 +ProcStop dt=14 +ProcStart dt=9275 p=0 p_seq=12 +ProcStop dt=9 +ProcStart dt=16732 p=0 p_seq=13 +GoUnblock dt=9 g=1 g_seq=38 stack=0 +GoStart dt=84 g=1 g_seq=39 +HeapAlloc dt=23 heapalloc_value=9631048 +HeapAlloc dt=24 heapalloc_value=9639240 +HeapAlloc dt=15 heapalloc_value=9647432 +HeapAlloc dt=15 heapalloc_value=9655624 +HeapAlloc dt=15 heapalloc_value=9663816 +HeapAlloc dt=16 heapalloc_value=9672008 +HeapAlloc dt=14 heapalloc_value=9680200 +HeapAlloc dt=18 heapalloc_value=9688392 +HeapAlloc dt=14 heapalloc_value=9696584 +HeapAlloc dt=19 heapalloc_value=9704776 +HeapAlloc dt=15 heapalloc_value=9712968 +HeapAlloc dt=76 heapalloc_value=9721160 +HeapAlloc dt=18 heapalloc_value=9729352 +HeapAlloc dt=17 heapalloc_value=9737544 +HeapAlloc dt=14 heapalloc_value=9745736 +HeapAlloc dt=15 heapalloc_value=9753928 +HeapAlloc dt=16 heapalloc_value=9762120 +HeapAlloc dt=28 heapalloc_value=9770312 +HeapAlloc dt=23 heapalloc_value=9778504 +HeapAlloc dt=19 heapalloc_value=9786696 +HeapAlloc dt=14 heapalloc_value=9794888 +HeapAlloc dt=26 heapalloc_value=9803080 +HeapAlloc dt=18 heapalloc_value=9811272 +HeapAlloc dt=16 heapalloc_value=9819464 +HeapAlloc dt=15 heapalloc_value=9827656 +HeapAlloc dt=19 heapalloc_value=9835848 +HeapAlloc dt=16 heapalloc_value=9844040 +HeapAlloc dt=15 heapalloc_value=9852232 +HeapAlloc dt=15 heapalloc_value=9860424 +HeapAlloc dt=15 heapalloc_value=9868616 +HeapAlloc dt=15 heapalloc_value=9876808 +HeapAlloc dt=15 heapalloc_value=9885000 +HeapAlloc dt=15 heapalloc_value=9893192 +HeapAlloc dt=15 heapalloc_value=9901384 +HeapAlloc dt=16 heapalloc_value=9909576 +HeapAlloc dt=16 heapalloc_value=9917768 +HeapAlloc dt=15 heapalloc_value=9925960 +HeapAlloc dt=15 heapalloc_value=9934152 +HeapAlloc dt=15 heapalloc_value=9942344 +HeapAlloc dt=15 heapalloc_value=9950536 +HeapAlloc dt=16 heapalloc_value=9958728 +HeapAlloc dt=15 heapalloc_value=9966920 +HeapAlloc dt=63 heapalloc_value=9975112 +HeapAlloc dt=20 heapalloc_value=9983304 +HeapAlloc dt=14 heapalloc_value=9991496 +HeapAlloc dt=15 heapalloc_value=9999688 +HeapAlloc dt=14 heapalloc_value=10007880 +HeapAlloc dt=15 heapalloc_value=10016072 +HeapAlloc dt=16 heapalloc_value=10024264 +HeapAlloc dt=16 heapalloc_value=10032456 +HeapAlloc dt=16 heapalloc_value=10040648 +HeapAlloc dt=16 heapalloc_value=10048840 +HeapAlloc dt=15 heapalloc_value=10057032 +HeapAlloc dt=16 heapalloc_value=10065224 +HeapAlloc dt=14 heapalloc_value=10073416 +HeapAlloc dt=16 heapalloc_value=10081608 +HeapAlloc dt=15 heapalloc_value=10089800 +HeapAlloc dt=16 heapalloc_value=10097992 +HeapAlloc dt=16 heapalloc_value=10106184 +HeapAlloc dt=17 heapalloc_value=10114376 +HeapAlloc dt=15 heapalloc_value=10122568 +HeapAlloc dt=33 heapalloc_value=10327368 +HeapAlloc dt=367 heapalloc_value=10335560 +HeapAlloc dt=21 heapalloc_value=10343752 +HeapAlloc dt=16 heapalloc_value=10351944 +HeapAlloc dt=15 heapalloc_value=10360136 +HeapAlloc dt=16 heapalloc_value=10368328 +HeapAlloc dt=16 heapalloc_value=10376520 +HeapAlloc dt=16 heapalloc_value=10384712 +HeapAlloc dt=15 heapalloc_value=10392904 +HeapAlloc dt=15 heapalloc_value=10401096 +HeapAlloc dt=15 heapalloc_value=10409288 +HeapAlloc dt=15 heapalloc_value=10417480 +HeapAlloc dt=15 heapalloc_value=10425672 +HeapAlloc dt=17 heapalloc_value=10433864 +HeapAlloc dt=15 heapalloc_value=10442056 +HeapAlloc dt=15 heapalloc_value=10450248 +HeapAlloc dt=15 heapalloc_value=10458440 +HeapAlloc dt=15 heapalloc_value=10466632 +HeapAlloc dt=15 heapalloc_value=10474824 +HeapAlloc dt=15 heapalloc_value=10483016 +HeapAlloc dt=14 heapalloc_value=10491208 +HeapAlloc dt=22 heapalloc_value=10499400 +HeapAlloc dt=7 heapalloc_value=10507592 +HeapAlloc dt=9 heapalloc_value=10515784 +HeapAlloc dt=7 heapalloc_value=10523976 +HeapAlloc dt=6 heapalloc_value=10532168 +HeapAlloc dt=5 heapalloc_value=10540360 +HeapAlloc dt=6 heapalloc_value=10548552 +HeapAlloc dt=6 heapalloc_value=10556744 +HeapAlloc dt=5 heapalloc_value=10564936 +HeapAlloc dt=6 heapalloc_value=10573128 +HeapAlloc dt=6 heapalloc_value=10581320 +HeapAlloc dt=5 heapalloc_value=10589512 +HeapAlloc dt=6 heapalloc_value=10597704 +HeapAlloc dt=6 heapalloc_value=10605896 +HeapAlloc dt=5 heapalloc_value=10614088 +HeapAlloc dt=6 heapalloc_value=10622280 +HeapAlloc dt=5 heapalloc_value=10630472 +HeapAlloc dt=6 heapalloc_value=10638664 +HeapAlloc dt=6 heapalloc_value=10646856 +HeapAlloc dt=5 heapalloc_value=10655048 +HeapAlloc dt=6 heapalloc_value=10663240 +HeapAlloc dt=5 heapalloc_value=10671432 +HeapAlloc dt=6 heapalloc_value=10679624 +HeapAlloc dt=5 heapalloc_value=10687816 +HeapAlloc dt=221 heapalloc_value=10696008 +HeapAlloc dt=9 heapalloc_value=10704200 +HeapAlloc dt=6 heapalloc_value=10712392 +HeapAlloc dt=5 heapalloc_value=10720584 +HeapAlloc dt=6 heapalloc_value=10728776 +HeapAlloc dt=6 heapalloc_value=10736968 +HeapAlloc dt=5 heapalloc_value=10745160 +HeapAlloc dt=6 heapalloc_value=10753352 +HeapAlloc dt=5 heapalloc_value=10761544 +HeapAlloc dt=6 heapalloc_value=10769736 +HeapAlloc dt=5 heapalloc_value=10777928 +HeapAlloc dt=5 heapalloc_value=10786120 +HeapAlloc dt=6 heapalloc_value=10794312 +HeapAlloc dt=6 heapalloc_value=10802504 +HeapAlloc dt=5 heapalloc_value=10810696 +HeapAlloc dt=6 heapalloc_value=10818888 +HeapAlloc dt=5 heapalloc_value=10827080 +HeapAlloc dt=6 heapalloc_value=10835272 +HeapAlloc dt=5 heapalloc_value=10843464 +HeapAlloc dt=6 heapalloc_value=10851656 +GoBlock dt=11 reason_string=19 stack=21 +ProcStop dt=119 +ProcStart dt=17350 p=2 p_seq=7 +ProcStop dt=13 +ProcStart dt=1133 p=0 p_seq=16 +ProcStop dt=8 +ProcStart dt=16748 p=0 p_seq=17 +GoUnblock dt=7 g=1 g_seq=42 stack=0 +GoStart dt=84 g=1 g_seq=43 +HeapAlloc dt=15 heapalloc_value=11883848 +HeapAlloc dt=10 heapalloc_value=11892040 +HeapAlloc dt=6 heapalloc_value=11900232 +HeapAlloc dt=6 heapalloc_value=11908424 +HeapAlloc dt=6 heapalloc_value=11916616 +HeapAlloc dt=6 heapalloc_value=11924808 +HeapAlloc dt=8 heapalloc_value=11933000 +HeapAlloc dt=5 heapalloc_value=11941192 +HeapAlloc dt=6 heapalloc_value=11949384 +HeapAlloc dt=62 heapalloc_value=11957576 +HeapAlloc dt=7 heapalloc_value=11965768 +HeapAlloc dt=6 heapalloc_value=11973960 +HeapAlloc dt=6 heapalloc_value=11982152 +HeapAlloc dt=5 heapalloc_value=11990344 +HeapAlloc dt=6 heapalloc_value=11998536 +HeapAlloc dt=6 heapalloc_value=12006728 +HeapAlloc dt=5 heapalloc_value=12014920 +HeapAlloc dt=6 heapalloc_value=12023112 +HeapAlloc dt=5 heapalloc_value=12031304 +HeapAlloc dt=6 heapalloc_value=12039496 +HeapAlloc dt=5 heapalloc_value=12047688 +HeapAlloc dt=6 heapalloc_value=12055880 +HeapAlloc dt=6 heapalloc_value=12064072 +HeapAlloc dt=6 heapalloc_value=12072264 +HeapAlloc dt=5 heapalloc_value=12080456 +HeapAlloc dt=352 heapalloc_value=12088648 +HeapAlloc dt=14 heapalloc_value=12096840 +HeapAlloc dt=7 heapalloc_value=12105032 +HeapAlloc dt=5 heapalloc_value=12113224 +HeapAlloc dt=6 heapalloc_value=12121416 +HeapAlloc dt=41 heapalloc_value=12129608 +HeapAlloc dt=7 heapalloc_value=12137800 +HeapAlloc dt=5 heapalloc_value=12145992 +HeapAlloc dt=6 heapalloc_value=12154184 +HeapAlloc dt=6 heapalloc_value=12162376 +HeapAlloc dt=6 heapalloc_value=12170568 +HeapAlloc dt=5 heapalloc_value=12178760 +HeapAlloc dt=6 heapalloc_value=12186952 +HeapAlloc dt=5 heapalloc_value=12195144 +HeapAlloc dt=7 heapalloc_value=12203336 +HeapAlloc dt=5 heapalloc_value=12211528 +HeapAlloc dt=6 heapalloc_value=12219720 +HeapAlloc dt=5 heapalloc_value=12227912 +HeapAlloc dt=6 heapalloc_value=12236104 +HeapAlloc dt=6 heapalloc_value=12244296 +HeapAlloc dt=6 heapalloc_value=12252488 +HeapAlloc dt=5 heapalloc_value=12260680 +HeapAlloc dt=46 heapalloc_value=12268872 +HeapAlloc dt=6 heapalloc_value=12277064 +HeapAlloc dt=6 heapalloc_value=12285256 +HeapAlloc dt=6 heapalloc_value=12293448 +HeapAlloc dt=5 heapalloc_value=12301640 +HeapAlloc dt=6 heapalloc_value=12309832 +HeapAlloc dt=5 heapalloc_value=12318024 +HeapAlloc dt=6 heapalloc_value=12326216 +HeapAlloc dt=5 heapalloc_value=12334408 +HeapAlloc dt=6 heapalloc_value=12342600 +HeapAlloc dt=5 heapalloc_value=12350792 +HeapAlloc dt=6 heapalloc_value=12358984 +HeapAlloc dt=5 heapalloc_value=12367176 +HeapAlloc dt=6 heapalloc_value=12375368 +HeapAlloc dt=37 heapalloc_value=12383560 +HeapAlloc dt=7 heapalloc_value=12391752 +HeapAlloc dt=6 heapalloc_value=12399944 +HeapAlloc dt=5 heapalloc_value=12408136 +HeapAlloc dt=6 heapalloc_value=12416328 +HeapAlloc dt=6 heapalloc_value=12424520 +HeapAlloc dt=13 heapalloc_value=12686664 +HeapAlloc dt=2516 heapalloc_value=12694856 +HeapAlloc dt=9 heapalloc_value=12703048 +HeapAlloc dt=8 heapalloc_value=12711240 +HeapAlloc dt=7 heapalloc_value=12719432 +HeapAlloc dt=8 heapalloc_value=12727624 +HeapAlloc dt=7 heapalloc_value=12735816 +HeapAlloc dt=8 heapalloc_value=12744008 +HeapAlloc dt=7 heapalloc_value=12752200 +HeapAlloc dt=8 heapalloc_value=12760392 +HeapAlloc dt=7 heapalloc_value=12768584 +HeapAlloc dt=7 heapalloc_value=12776776 +HeapAlloc dt=8 heapalloc_value=12784968 +HeapAlloc dt=7 heapalloc_value=12793160 +HeapAlloc dt=8 heapalloc_value=12801352 +HeapAlloc dt=8 heapalloc_value=12809544 +HeapAlloc dt=7 heapalloc_value=12817736 +HeapAlloc dt=7 heapalloc_value=12825928 +HeapAlloc dt=8 heapalloc_value=12834120 +HeapAlloc dt=7 heapalloc_value=12842312 +HeapAlloc dt=8 heapalloc_value=12850504 +HeapAlloc dt=8 heapalloc_value=12858696 +HeapAlloc dt=7 heapalloc_value=12866888 +HeapAlloc dt=13 heapalloc_value=12875080 +HeapAlloc dt=8 heapalloc_value=12883272 +HeapAlloc dt=5 heapalloc_value=12891464 +HeapAlloc dt=6 heapalloc_value=12899656 +HeapAlloc dt=6 heapalloc_value=12907848 +HeapAlloc dt=5 heapalloc_value=12916040 +HeapAlloc dt=6 heapalloc_value=12924232 +HeapAlloc dt=6 heapalloc_value=12932424 +HeapAlloc dt=5 heapalloc_value=12940616 +HeapAlloc dt=6 heapalloc_value=12948808 +HeapAlloc dt=5 heapalloc_value=12957000 +HeapAlloc dt=6 heapalloc_value=12965192 +HeapAlloc dt=5 heapalloc_value=12973384 +HeapAlloc dt=6 heapalloc_value=12981576 +HeapAlloc dt=6 heapalloc_value=12989768 +HeapAlloc dt=5 heapalloc_value=12997960 +HeapAlloc dt=6 heapalloc_value=13006152 +HeapAlloc dt=6 heapalloc_value=13014344 +HeapAlloc dt=5 heapalloc_value=13022536 +HeapAlloc dt=6 heapalloc_value=13030728 +HeapAlloc dt=5 heapalloc_value=13038920 +HeapAlloc dt=62 heapalloc_value=13047112 +HeapAlloc dt=39 heapalloc_value=13055304 +HeapAlloc dt=7 heapalloc_value=13063496 +HeapAlloc dt=6 heapalloc_value=13071688 +HeapAlloc dt=6 heapalloc_value=13079880 +HeapAlloc dt=6 heapalloc_value=13088072 +HeapAlloc dt=5 heapalloc_value=13096264 +HeapAlloc dt=5 heapalloc_value=13104456 +HeapAlloc dt=6 heapalloc_value=13112648 +HeapAlloc dt=6 heapalloc_value=13120840 +HeapAlloc dt=5 heapalloc_value=13129032 +HeapAlloc dt=10 heapalloc_value=13137224 +HeapAlloc dt=6 heapalloc_value=13145416 +HeapAlloc dt=5 heapalloc_value=13153608 +HeapAlloc dt=6 heapalloc_value=13161800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=124 +ProcStart dt=17212 p=2 p_seq=9 +ProcStop dt=13 +ProcStart dt=1068 p=0 p_seq=20 +ProcStop dt=8 +ProcStart dt=16756 p=0 p_seq=21 +GoUnblock dt=11 g=1 g_seq=46 stack=0 +GoStart dt=92 g=1 g_seq=47 +HeapAlloc dt=19 heapalloc_value=14193992 +HeapAlloc dt=10 heapalloc_value=14202184 +HeapAlloc dt=6 heapalloc_value=14210376 +HeapAlloc dt=6 heapalloc_value=14218568 +HeapAlloc dt=6 heapalloc_value=14226760 +HeapAlloc dt=6 heapalloc_value=14234952 +HeapAlloc dt=6 heapalloc_value=14243144 +HeapAlloc dt=6 heapalloc_value=14251336 +HeapAlloc dt=6 heapalloc_value=14259528 +HeapAlloc dt=6 heapalloc_value=14267720 +HeapAlloc dt=5 heapalloc_value=14275912 +HeapAlloc dt=6 heapalloc_value=14284104 +HeapAlloc dt=6 heapalloc_value=14292296 +HeapAlloc dt=6 heapalloc_value=14300488 +HeapAlloc dt=60 heapalloc_value=14308680 +HeapAlloc dt=8 heapalloc_value=14316872 +HeapAlloc dt=6 heapalloc_value=14325064 +HeapAlloc dt=6 heapalloc_value=14333256 +HeapAlloc dt=6 heapalloc_value=14341448 +HeapAlloc dt=5 heapalloc_value=14349640 +HeapAlloc dt=6 heapalloc_value=14357832 +HeapAlloc dt=6 heapalloc_value=14366024 +HeapAlloc dt=6 heapalloc_value=14374216 +HeapAlloc dt=6 heapalloc_value=14382408 +HeapAlloc dt=8 heapalloc_value=14390600 +HeapAlloc dt=6 heapalloc_value=14398792 +HeapAlloc dt=6 heapalloc_value=14406984 +HeapAlloc dt=6 heapalloc_value=14415176 +HeapAlloc dt=6 heapalloc_value=14423368 +HeapAlloc dt=5 heapalloc_value=14431560 +HeapAlloc dt=6 heapalloc_value=14439752 +HeapAlloc dt=7 heapalloc_value=14447944 +HeapAlloc dt=5 heapalloc_value=14456136 +HeapAlloc dt=6 heapalloc_value=14464328 +HeapAlloc dt=6 heapalloc_value=14472520 +HeapAlloc dt=5 heapalloc_value=14480712 +HeapAlloc dt=6 heapalloc_value=14488904 +HeapAlloc dt=6 heapalloc_value=14497096 +HeapAlloc dt=6 heapalloc_value=14505288 +HeapAlloc dt=6 heapalloc_value=14513480 +HeapAlloc dt=6 heapalloc_value=14521672 +HeapAlloc dt=6 heapalloc_value=14529864 +HeapAlloc dt=5 heapalloc_value=14538056 +HeapAlloc dt=6 heapalloc_value=14546248 +HeapAlloc dt=6 heapalloc_value=14554440 +HeapAlloc dt=5 heapalloc_value=14562632 +HeapAlloc dt=6 heapalloc_value=14570824 +HeapAlloc dt=6 heapalloc_value=14579016 +HeapAlloc dt=6 heapalloc_value=14587208 +HeapAlloc dt=6 heapalloc_value=14595400 +HeapAlloc dt=5 heapalloc_value=14603592 +HeapAlloc dt=6 heapalloc_value=14611784 +HeapAlloc dt=45 heapalloc_value=14619976 +HeapAlloc dt=7 heapalloc_value=14628168 +HeapAlloc dt=6 heapalloc_value=14636360 +HeapAlloc dt=7 heapalloc_value=14644552 +HeapAlloc dt=5 heapalloc_value=14652744 +HeapAlloc dt=6 heapalloc_value=14660936 +HeapAlloc dt=6 heapalloc_value=14669128 +HeapAlloc dt=5 heapalloc_value=14677320 +HeapAlloc dt=6 heapalloc_value=14685512 +HeapAlloc dt=6 heapalloc_value=14693704 +HeapAlloc dt=6 heapalloc_value=14701896 +HeapAlloc dt=15 heapalloc_value=14710088 +HeapAlloc dt=6 heapalloc_value=14718280 +HeapAlloc dt=5 heapalloc_value=14726472 +HeapAlloc dt=35 heapalloc_value=14734664 +HeapAlloc dt=7 heapalloc_value=14742856 +HeapAlloc dt=6 heapalloc_value=14751048 +HeapAlloc dt=6 heapalloc_value=14759240 +HeapAlloc dt=6 heapalloc_value=14767432 +HeapAlloc dt=6 heapalloc_value=14775624 +HeapAlloc dt=6 heapalloc_value=14783816 +HeapAlloc dt=6 heapalloc_value=14792008 +HeapAlloc dt=5 heapalloc_value=14800200 +HeapAlloc dt=6 heapalloc_value=14808392 +HeapAlloc dt=5 heapalloc_value=14816584 +HeapAlloc dt=6 heapalloc_value=14824776 +HeapAlloc dt=6 heapalloc_value=14832968 +HeapAlloc dt=6 heapalloc_value=14841160 +HeapAlloc dt=6 heapalloc_value=14849352 +HeapAlloc dt=45 heapalloc_value=14857544 +HeapAlloc dt=6 heapalloc_value=14865736 +HeapAlloc dt=5 heapalloc_value=14873928 +HeapAlloc dt=6 heapalloc_value=14882120 +HeapAlloc dt=6 heapalloc_value=14890312 +HeapAlloc dt=6 heapalloc_value=14898504 +HeapAlloc dt=6 heapalloc_value=14906696 +HeapAlloc dt=6 heapalloc_value=14914888 +HeapAlloc dt=5 heapalloc_value=14923080 +HeapAlloc dt=6 heapalloc_value=14931272 +HeapAlloc dt=6 heapalloc_value=14939464 +HeapAlloc dt=5 heapalloc_value=14947656 +HeapAlloc dt=6 heapalloc_value=14955848 +HeapAlloc dt=6 heapalloc_value=14964040 +HeapAlloc dt=6 heapalloc_value=14972232 +HeapAlloc dt=5 heapalloc_value=14980424 +HeapAlloc dt=6 heapalloc_value=14988616 +HeapAlloc dt=6 heapalloc_value=14996808 +HeapAlloc dt=5 heapalloc_value=15005000 +HeapAlloc dt=6 heapalloc_value=15013192 +HeapAlloc dt=6 heapalloc_value=15021384 +HeapAlloc dt=6 heapalloc_value=15029576 +HeapAlloc dt=6 heapalloc_value=15037768 +HeapAlloc dt=6 heapalloc_value=15045960 +HeapAlloc dt=5 heapalloc_value=15054152 +HeapAlloc dt=6 heapalloc_value=15062344 +HeapAlloc dt=6 heapalloc_value=15070536 +HeapAlloc dt=6 heapalloc_value=15078728 +HeapAlloc dt=5 heapalloc_value=15086920 +HeapAlloc dt=6 heapalloc_value=15095112 +HeapAlloc dt=6 heapalloc_value=15103304 +HeapAlloc dt=5 heapalloc_value=15111496 +HeapAlloc dt=6 heapalloc_value=15119688 +HeapAlloc dt=6 heapalloc_value=15127880 +HeapAlloc dt=5 heapalloc_value=15136072 +HeapAlloc dt=51 heapalloc_value=15471944 +HeapAlloc dt=2533 heapalloc_value=15480136 +HeapAlloc dt=11 heapalloc_value=15488328 +HeapAlloc dt=9 heapalloc_value=15496520 +HeapAlloc dt=7 heapalloc_value=15504712 +HeapAlloc dt=9 heapalloc_value=15512904 +HeapAlloc dt=9 heapalloc_value=15521096 +HeapAlloc dt=7 heapalloc_value=15529288 +HeapAlloc dt=8 heapalloc_value=15537480 +HeapAlloc dt=8 heapalloc_value=15545672 +GoBlock dt=13 reason_string=19 stack=21 +ProcStop dt=116 +ProcStart dt=17265 p=2 p_seq=11 +ProcStop dt=10 +ProcStart dt=1450 p=0 p_seq=24 +ProcStop dt=9 +ProcStart dt=17026 p=0 p_seq=25 +GoUnblock dt=12 g=1 g_seq=50 stack=0 +GoStart dt=148 g=1 g_seq=51 +HeapAlloc dt=20 heapalloc_value=16577864 +HeapAlloc dt=15 heapalloc_value=16586056 +HeapAlloc dt=10 heapalloc_value=16594248 +HeapAlloc dt=11 heapalloc_value=16602440 +HeapAlloc dt=9 heapalloc_value=16610632 +HeapAlloc dt=9 heapalloc_value=16618824 +HeapAlloc dt=10 heapalloc_value=16627016 +HeapAlloc dt=9 heapalloc_value=16635208 +HeapAlloc dt=11 heapalloc_value=16643400 +HeapAlloc dt=11 heapalloc_value=16651592 +HeapAlloc dt=9 heapalloc_value=16659784 +HeapAlloc dt=11 heapalloc_value=16667976 +HeapAlloc dt=9 heapalloc_value=16676168 +HeapAlloc dt=10 heapalloc_value=16684360 +HeapAlloc dt=10 heapalloc_value=16692552 +HeapAlloc dt=10 heapalloc_value=16700744 +HeapAlloc dt=11 heapalloc_value=16708936 +HeapAlloc dt=11 heapalloc_value=16717128 +HeapAlloc dt=9 heapalloc_value=16725320 +HeapAlloc dt=78 heapalloc_value=16733512 +HeapAlloc dt=14 heapalloc_value=16741704 +HeapAlloc dt=10 heapalloc_value=16749896 +HeapAlloc dt=11 heapalloc_value=16758088 +HeapAlloc dt=11 heapalloc_value=16766280 +HeapAlloc dt=10 heapalloc_value=16774472 +HeapAlloc dt=9 heapalloc_value=16782664 +HeapAlloc dt=10 heapalloc_value=16790856 +HeapAlloc dt=9 heapalloc_value=16799048 +HeapAlloc dt=21 heapalloc_value=16807240 +HeapAlloc dt=11 heapalloc_value=16815432 +HeapAlloc dt=9 heapalloc_value=16823624 +HeapAlloc dt=9 heapalloc_value=16831816 +HeapAlloc dt=9 heapalloc_value=16840008 +HeapAlloc dt=10 heapalloc_value=16848200 +HeapAlloc dt=11 heapalloc_value=16856392 +HeapAlloc dt=9 heapalloc_value=16864584 +HeapAlloc dt=6 heapalloc_value=16872776 +HeapAlloc dt=9 heapalloc_value=16880968 +HeapAlloc dt=6 heapalloc_value=16889160 +HeapAlloc dt=6 heapalloc_value=16897352 +HeapAlloc dt=5 heapalloc_value=16905544 +HeapAlloc dt=6 heapalloc_value=16913736 +HeapAlloc dt=6 heapalloc_value=16921928 +HeapAlloc dt=5 heapalloc_value=16930120 +HeapAlloc dt=6 heapalloc_value=16938312 +HeapAlloc dt=5 heapalloc_value=16946504 +HeapAlloc dt=6 heapalloc_value=16954696 +HeapAlloc dt=5 heapalloc_value=16962888 +HeapAlloc dt=5 heapalloc_value=16971080 +HeapAlloc dt=5 heapalloc_value=16979272 +HeapAlloc dt=6 heapalloc_value=16987464 +HeapAlloc dt=5 heapalloc_value=16995656 +HeapAlloc dt=5 heapalloc_value=17003848 +HeapAlloc dt=6 heapalloc_value=17012040 +HeapAlloc dt=5 heapalloc_value=17020232 +HeapAlloc dt=6 heapalloc_value=17028424 +HeapAlloc dt=5 heapalloc_value=17036616 +HeapAlloc dt=53 heapalloc_value=17044808 +HeapAlloc dt=7 heapalloc_value=17053000 +HeapAlloc dt=5 heapalloc_value=17061192 +HeapAlloc dt=6 heapalloc_value=17069384 +HeapAlloc dt=11 heapalloc_value=17077576 +HeapAlloc dt=10 heapalloc_value=17085768 +HeapAlloc dt=5 heapalloc_value=17093960 +HeapAlloc dt=5 heapalloc_value=17102152 +HeapAlloc dt=6 heapalloc_value=17110344 +HeapAlloc dt=5 heapalloc_value=17118536 +HeapAlloc dt=5 heapalloc_value=17126728 +HeapAlloc dt=6 heapalloc_value=17134920 +HeapAlloc dt=5 heapalloc_value=17143112 +HeapAlloc dt=6 heapalloc_value=17151304 +HeapAlloc dt=37 heapalloc_value=17159496 +GCBegin dt=15 gc_seq=5 stack=22 +STWBegin dt=37 kind_string=22 stack=28 +GoUnblock dt=288 g=4 g_seq=9 stack=29 +ProcsChange dt=56 procs_value=8 stack=30 +STWEnd dt=23 +GCMarkAssistBegin dt=90 stack=31 +GCMarkAssistEnd dt=3424 +HeapAlloc dt=523 heapalloc_value=17175048 +HeapAlloc dt=21 heapalloc_value=17183240 +HeapAlloc dt=46 heapalloc_value=17191432 +HeapAlloc dt=96 heapalloc_value=17199624 +HeapAlloc dt=12 heapalloc_value=17207816 +HeapAlloc dt=12 heapalloc_value=17216008 +HeapAlloc dt=13 heapalloc_value=17224200 +HeapAlloc dt=10 heapalloc_value=17232392 +HeapAlloc dt=12 heapalloc_value=17240584 +HeapAlloc dt=13 heapalloc_value=17248776 +HeapAlloc dt=12 heapalloc_value=17256968 +HeapAlloc dt=14 heapalloc_value=17265160 +HeapAlloc dt=12 heapalloc_value=17273352 +HeapAlloc dt=12 heapalloc_value=17281544 +HeapAlloc dt=11 heapalloc_value=17289736 +HeapAlloc dt=13 heapalloc_value=17297928 +HeapAlloc dt=36 heapalloc_value=17306120 +HeapAlloc dt=12 heapalloc_value=17314312 +HeapAlloc dt=10 heapalloc_value=17322504 +HeapAlloc dt=12 heapalloc_value=17330696 +HeapAlloc dt=10 heapalloc_value=17338888 +HeapAlloc dt=11 heapalloc_value=17347080 +HeapAlloc dt=10 heapalloc_value=17355272 +HeapAlloc dt=10 heapalloc_value=17363464 +HeapAlloc dt=10 heapalloc_value=17371656 +HeapAlloc dt=11 heapalloc_value=17379848 +HeapAlloc dt=8 heapalloc_value=17388040 +HeapAlloc dt=13 heapalloc_value=17396232 +HeapAlloc dt=10 heapalloc_value=17404424 +HeapAlloc dt=13 heapalloc_value=17412616 +HeapAlloc dt=13 heapalloc_value=17420808 +HeapAlloc dt=10 heapalloc_value=17429000 +HeapAlloc dt=31 heapalloc_value=17437192 +HeapAlloc dt=6 heapalloc_value=17445384 +HeapAlloc dt=7 heapalloc_value=17453576 +HeapAlloc dt=6 heapalloc_value=17461768 +HeapAlloc dt=7 heapalloc_value=17469960 +HeapAlloc dt=7 heapalloc_value=17478152 +HeapAlloc dt=7 heapalloc_value=17486344 +HeapAlloc dt=7 heapalloc_value=17494536 +HeapAlloc dt=12 heapalloc_value=17502728 +HeapAlloc dt=7 heapalloc_value=17510920 +HeapAlloc dt=12 heapalloc_value=17519112 +HeapAlloc dt=13 heapalloc_value=17527304 +HeapAlloc dt=20 heapalloc_value=17535496 +HeapAlloc dt=15 heapalloc_value=17543688 +HeapAlloc dt=6 heapalloc_value=17551880 +HeapAlloc dt=7 heapalloc_value=17560072 +HeapAlloc dt=72 heapalloc_value=17568264 +HeapAlloc dt=37 heapalloc_value=17576456 +HeapAlloc dt=7 heapalloc_value=17584648 +HeapAlloc dt=7 heapalloc_value=17592840 +HeapAlloc dt=6 heapalloc_value=17601032 +GoBlock dt=13 reason_string=19 stack=21 +GoUnblock dt=157 g=24 g_seq=12 stack=0 +GoStart dt=7 g=24 g_seq=13 +GoLabel dt=1 label_string=2 +STWBegin dt=4128 kind_string=23 stack=37 +GoUnblock dt=64 g=25 g_seq=8 stack=38 +HeapAlloc dt=25 heapalloc_value=16970376 +GoUnblock dt=24 g=3 g_seq=5 stack=39 +GCEnd dt=6 gc_seq=6 +HeapGoal dt=7 heapgoal_value=34360936 +ProcsChange dt=46 procs_value=8 stack=40 +STWEnd dt=49 +GoBlock dt=756 reason_string=15 stack=27 +GoStart dt=10 g=3 g_seq=6 +GoBlock dt=14862 reason_string=14 stack=44 +ProcStop dt=25 +ProcStart dt=132428 p=0 p_seq=32 +GoStart dt=162 g=4 g_seq=12 +GoBlock dt=19 reason_string=15 stack=32 +ProcStop dt=20 +ProcStart dt=8304 p=0 p_seq=33 +GoStart dt=191 g=39 g_seq=1 +GoStop dt=306173 reason_string=16 stack=50 +GoStart dt=17 g=39 g_seq=2 +GoStop dt=315175 reason_string=16 stack=50 +GoStart dt=7 g=39 g_seq=3 +GoDestroy dt=159902 +ProcStop dt=50 +EventBatch gen=1 m=1709040 time=7689670148204 size=3534 +ProcStart dt=256 p=1 p_seq=1 +GoStart dt=186 g=6 g_seq=1 +HeapAlloc dt=320 heapalloc_value=2768896 +HeapAlloc dt=22 heapalloc_value=2777088 +GoBlock dt=229 reason_string=12 stack=15 +GoStart dt=12 g=8 g_seq=1 +HeapAlloc dt=15 heapalloc_value=2785280 +GoSyscallBegin dt=16 p_seq=2 stack=16 +GoSyscallEnd dt=254 +GoBlock dt=293 reason_string=15 stack=17 +GoStart dt=19 g=9 g_seq=1 +GoDestroy dt=156265 +ProcStop dt=44 +ProcStart dt=67218 p=1 p_seq=3 +ProcStop dt=19 +ProcStart dt=88214 p=1 p_seq=4 +ProcStop dt=13 +ProcStart dt=17539 p=0 p_seq=1 +ProcStop dt=14 +ProcStart dt=9071 p=4 p_seq=1 +GoUnblock dt=33 g=22 g_seq=2 stack=0 +GoStart dt=6 g=22 g_seq=3 +GoLabel dt=1 label_string=4 +GoUnblock dt=2321 g=1 g_seq=23 stack=34 +STWBegin dt=1205 kind_string=23 stack=37 +GoUnblock dt=78 g=24 g_seq=6 stack=38 +HeapAlloc dt=26 heapalloc_value=3840752 +GoStatus dt=14 g=3 m=18446744073709551615 gstatus=4 +GoUnblock dt=7 g=3 g_seq=1 stack=39 +GCEnd dt=3 gc_seq=2 +HeapGoal dt=6 heapgoal_value=8101720 +ProcsChange dt=43 procs_value=8 stack=40 +STWEnd dt=31 +GoBlock dt=4030 reason_string=15 stack=27 +GoStart dt=12 g=3 g_seq=2 +GoBlock dt=1406 reason_string=14 stack=44 +ProcStop dt=24 +ProcStart dt=34332 p=4 p_seq=4 +GoStart dt=153 g=4 g_seq=4 +GoBlock dt=20 reason_string=15 stack=32 +ProcStop dt=19 +ProcStart dt=1832 p=2 p_seq=5 +GoUnblock dt=22 g=24 g_seq=8 stack=0 +GoStart dt=102 g=24 g_seq=9 +GoLabel dt=1 label_string=2 +STWBegin dt=11769 kind_string=23 stack=37 +GoUnblock dt=60 g=1 g_seq=36 stack=38 +HeapAlloc dt=23 heapalloc_value=8744264 +GoUnblock dt=17 g=3 g_seq=3 stack=39 +GCEnd dt=6 gc_seq=4 +HeapGoal dt=7 heapgoal_value=17908728 +ProcsChange dt=47 procs_value=8 stack=40 +STWEnd dt=28 +GoBlock dt=572 reason_string=15 stack=27 +GoStart dt=13 g=3 g_seq=4 +GoBlock dt=5707 reason_string=14 stack=44 +ProcStop dt=16 +ProcStart dt=136502 p=1 p_seq=11 +GoStart dt=17 g=4 g_seq=8 +GoBlock dt=12 reason_string=15 stack=32 +ProcStop dt=22 +ProcStart dt=5977 p=6 p_seq=1 +ProcStop dt=34 +ProcStart dt=16775 p=2 p_seq=15 +ProcStop dt=23 +ProcStart dt=3966 p=1 p_seq=14 +ProcStop dt=15 +ProcStart dt=16753 p=1 p_seq=15 +GoUnblock dt=35 g=1 g_seq=57 stack=0 +GoStart dt=139 g=1 g_seq=58 +HeapAlloc dt=71 heapalloc_value=17593992 +HeapAlloc dt=47 heapalloc_value=17602184 +HeapAlloc dt=24 heapalloc_value=17610376 +HeapAlloc dt=97 heapalloc_value=17618568 +HeapAlloc dt=23 heapalloc_value=17626760 +HeapAlloc dt=18 heapalloc_value=17634952 +HeapAlloc dt=15 heapalloc_value=17643144 +HeapAlloc dt=18 heapalloc_value=17651336 +HeapAlloc dt=21 heapalloc_value=17659528 +HeapAlloc dt=28 heapalloc_value=17667720 +HeapAlloc dt=26 heapalloc_value=17675912 +HeapAlloc dt=23 heapalloc_value=17684104 +HeapAlloc dt=12 heapalloc_value=17692296 +HeapAlloc dt=12 heapalloc_value=17700488 +HeapAlloc dt=11 heapalloc_value=17708680 +HeapAlloc dt=15 heapalloc_value=17716872 +HeapAlloc dt=18 heapalloc_value=17725064 +HeapAlloc dt=15 heapalloc_value=17733256 +HeapAlloc dt=165 heapalloc_value=17741448 +HeapAlloc dt=16 heapalloc_value=17749640 +HeapAlloc dt=12 heapalloc_value=17757832 +HeapAlloc dt=15 heapalloc_value=17766024 +HeapAlloc dt=12 heapalloc_value=17774216 +HeapAlloc dt=12 heapalloc_value=17782408 +HeapAlloc dt=15 heapalloc_value=17790600 +HeapAlloc dt=11 heapalloc_value=17798792 +HeapAlloc dt=11 heapalloc_value=17806984 +HeapAlloc dt=12 heapalloc_value=17815176 +HeapAlloc dt=12 heapalloc_value=17823368 +HeapAlloc dt=15 heapalloc_value=17831560 +HeapAlloc dt=11 heapalloc_value=17839752 +HeapAlloc dt=12 heapalloc_value=17847944 +HeapAlloc dt=15 heapalloc_value=17856136 +HeapAlloc dt=11 heapalloc_value=17864328 +HeapAlloc dt=12 heapalloc_value=17872520 +HeapAlloc dt=12 heapalloc_value=17880712 +HeapAlloc dt=14 heapalloc_value=17888904 +HeapAlloc dt=42 heapalloc_value=17897096 +HeapAlloc dt=54 heapalloc_value=17905288 +HeapAlloc dt=49 heapalloc_value=17913480 +HeapAlloc dt=54 heapalloc_value=17921672 +HeapAlloc dt=56 heapalloc_value=17929864 +HeapAlloc dt=45 heapalloc_value=17938056 +HeapAlloc dt=57 heapalloc_value=17946248 +HeapAlloc dt=63 heapalloc_value=17954440 +HeapAlloc dt=57 heapalloc_value=17962632 +HeapAlloc dt=56 heapalloc_value=17970824 +HeapAlloc dt=62 heapalloc_value=17979016 +HeapAlloc dt=109 heapalloc_value=17987208 +HeapAlloc dt=59 heapalloc_value=17995400 +HeapAlloc dt=45 heapalloc_value=18003592 +HeapAlloc dt=61 heapalloc_value=18011784 +HeapAlloc dt=35 heapalloc_value=18019976 +HeapAlloc dt=16 heapalloc_value=18028168 +HeapAlloc dt=15 heapalloc_value=18036360 +HeapAlloc dt=15 heapalloc_value=18044552 +HeapAlloc dt=21 heapalloc_value=18052744 +HeapAlloc dt=16 heapalloc_value=18060936 +HeapAlloc dt=16 heapalloc_value=18069128 +HeapAlloc dt=22 heapalloc_value=18077320 +HeapAlloc dt=43 heapalloc_value=18085512 +HeapAlloc dt=46 heapalloc_value=18093704 +HeapAlloc dt=43 heapalloc_value=18101896 +HeapAlloc dt=42 heapalloc_value=18110088 +HeapAlloc dt=44 heapalloc_value=18118280 +HeapAlloc dt=35 heapalloc_value=18126472 +HeapAlloc dt=39 heapalloc_value=18134664 +HeapAlloc dt=40 heapalloc_value=18142856 +HeapAlloc dt=43 heapalloc_value=18151048 +HeapAlloc dt=44 heapalloc_value=18159240 +HeapAlloc dt=38 heapalloc_value=18167432 +HeapAlloc dt=42 heapalloc_value=18175624 +HeapAlloc dt=40 heapalloc_value=18183816 +HeapAlloc dt=40 heapalloc_value=18192008 +HeapAlloc dt=36 heapalloc_value=18200200 +HeapAlloc dt=55 heapalloc_value=18208392 +HeapAlloc dt=54 heapalloc_value=18216584 +HeapAlloc dt=54 heapalloc_value=18224776 +HeapAlloc dt=41 heapalloc_value=18232968 +HeapAlloc dt=58 heapalloc_value=18241160 +HeapAlloc dt=61 heapalloc_value=18249352 +HeapAlloc dt=55 heapalloc_value=18257544 +HeapAlloc dt=141 heapalloc_value=18265736 +HeapAlloc dt=55 heapalloc_value=18273928 +HeapAlloc dt=54 heapalloc_value=18282120 +HeapAlloc dt=50 heapalloc_value=18290312 +HeapAlloc dt=82 heapalloc_value=18298504 +HeapAlloc dt=64 heapalloc_value=18306696 +HeapAlloc dt=55 heapalloc_value=18314888 +HeapAlloc dt=58 heapalloc_value=18323080 +HeapAlloc dt=54 heapalloc_value=18331272 +HeapAlloc dt=57 heapalloc_value=18339464 +HeapAlloc dt=46 heapalloc_value=18347656 +HeapAlloc dt=41 heapalloc_value=18355848 +HeapAlloc dt=56 heapalloc_value=18364040 +HeapAlloc dt=50 heapalloc_value=18372232 +HeapAlloc dt=54 heapalloc_value=18380424 +HeapAlloc dt=56 heapalloc_value=18388616 +HeapAlloc dt=57 heapalloc_value=18396808 +HeapAlloc dt=55 heapalloc_value=18405000 +HeapAlloc dt=55 heapalloc_value=18413192 +HeapAlloc dt=51 heapalloc_value=18421384 +HeapAlloc dt=52 heapalloc_value=18429576 +HeapAlloc dt=67 heapalloc_value=18437768 +HeapAlloc dt=36 heapalloc_value=18445960 +HeapAlloc dt=28 heapalloc_value=18454152 +HeapAlloc dt=30 heapalloc_value=18462344 +HeapAlloc dt=40 heapalloc_value=18470536 +HeapAlloc dt=29 heapalloc_value=18478728 +HeapAlloc dt=37 heapalloc_value=18486920 +HeapAlloc dt=34 heapalloc_value=18495112 +HeapAlloc dt=73 heapalloc_value=18503304 +HeapAlloc dt=37 heapalloc_value=18511496 +HeapAlloc dt=38 heapalloc_value=18519688 +HeapAlloc dt=29 heapalloc_value=18527880 +HeapAlloc dt=35 heapalloc_value=18536072 +HeapAlloc dt=33 heapalloc_value=18544264 +HeapAlloc dt=40 heapalloc_value=18552456 +HeapAlloc dt=32 heapalloc_value=18560648 +HeapAlloc dt=42 heapalloc_value=18568840 +HeapAlloc dt=34 heapalloc_value=18577032 +HeapAlloc dt=37 heapalloc_value=18585224 +HeapAlloc dt=35 heapalloc_value=18593416 +HeapAlloc dt=39 heapalloc_value=18601608 +HeapAlloc dt=35 heapalloc_value=18609800 +GoBlock dt=51 reason_string=19 stack=21 +ProcStop dt=192 +ProcStart dt=17579 p=0 p_seq=27 +ProcStop dt=18 +ProcStart dt=1930 p=1 p_seq=18 +ProcStop dt=15 +ProcStart dt=16696 p=1 p_seq=19 +GoUnblock dt=22 g=1 g_seq=61 stack=0 +GoStart dt=125 g=1 g_seq=62 +HeapAlloc dt=53 heapalloc_value=19641992 +HeapAlloc dt=19 heapalloc_value=19650184 +HeapAlloc dt=20 heapalloc_value=19658376 +HeapAlloc dt=23 heapalloc_value=19666568 +HeapAlloc dt=16 heapalloc_value=19674760 +HeapAlloc dt=16 heapalloc_value=19682952 +HeapAlloc dt=19 heapalloc_value=19691144 +HeapAlloc dt=15 heapalloc_value=19699336 +HeapAlloc dt=12 heapalloc_value=19707528 +HeapAlloc dt=12 heapalloc_value=19715720 +HeapAlloc dt=13 heapalloc_value=19723912 +HeapAlloc dt=18 heapalloc_value=19732104 +HeapAlloc dt=12 heapalloc_value=19740296 +HeapAlloc dt=12 heapalloc_value=19748488 +HeapAlloc dt=9 heapalloc_value=19756680 +HeapAlloc dt=6 heapalloc_value=19764872 +HeapAlloc dt=5 heapalloc_value=19773064 +HeapAlloc dt=6 heapalloc_value=19781256 +HeapAlloc dt=5 heapalloc_value=19789448 +HeapAlloc dt=10 heapalloc_value=19797640 +HeapAlloc dt=5 heapalloc_value=19805832 +HeapAlloc dt=6 heapalloc_value=19814024 +HeapAlloc dt=9 heapalloc_value=19822216 +HeapAlloc dt=6 heapalloc_value=19830408 +HeapAlloc dt=117 heapalloc_value=19838600 +HeapAlloc dt=17 heapalloc_value=19846792 +HeapAlloc dt=5 heapalloc_value=19854984 +HeapAlloc dt=10 heapalloc_value=19863176 +HeapAlloc dt=6 heapalloc_value=19871368 +HeapAlloc dt=6 heapalloc_value=19879560 +HeapAlloc dt=9 heapalloc_value=19887752 +HeapAlloc dt=6 heapalloc_value=19895944 +HeapAlloc dt=6 heapalloc_value=19904136 +HeapAlloc dt=5 heapalloc_value=19912328 +HeapAlloc dt=6 heapalloc_value=19920520 +HeapAlloc dt=10 heapalloc_value=19928712 +HeapAlloc dt=5 heapalloc_value=19936904 +HeapAlloc dt=6 heapalloc_value=19945096 +HeapAlloc dt=9 heapalloc_value=19953288 +HeapAlloc dt=6 heapalloc_value=19961480 +HeapAlloc dt=35 heapalloc_value=19969672 +HeapAlloc dt=7 heapalloc_value=19977864 +HeapAlloc dt=5 heapalloc_value=19986056 +HeapAlloc dt=468 heapalloc_value=19994248 +HeapAlloc dt=14 heapalloc_value=20002440 +HeapAlloc dt=6 heapalloc_value=20010632 +HeapAlloc dt=10 heapalloc_value=20018824 +HeapAlloc dt=5 heapalloc_value=20027016 +HeapAlloc dt=6 heapalloc_value=20035208 +HeapAlloc dt=11 heapalloc_value=20043400 +HeapAlloc dt=6 heapalloc_value=20051592 +HeapAlloc dt=5 heapalloc_value=20059784 +HeapAlloc dt=6 heapalloc_value=20067976 +HeapAlloc dt=5 heapalloc_value=20076168 +HeapAlloc dt=7 heapalloc_value=20084360 +HeapAlloc dt=6 heapalloc_value=20092552 +HeapAlloc dt=5 heapalloc_value=20100744 +HeapAlloc dt=6 heapalloc_value=20108936 +HeapAlloc dt=6 heapalloc_value=20117128 +HeapAlloc dt=5 heapalloc_value=20125320 +HeapAlloc dt=6 heapalloc_value=20133512 +HeapAlloc dt=6 heapalloc_value=20141704 +HeapAlloc dt=7 heapalloc_value=20149896 +HeapAlloc dt=5 heapalloc_value=20158088 +HeapAlloc dt=6 heapalloc_value=20166280 +HeapAlloc dt=5 heapalloc_value=20174472 +HeapAlloc dt=6 heapalloc_value=20182664 +HeapAlloc dt=6 heapalloc_value=20190856 +HeapAlloc dt=5 heapalloc_value=20199048 +HeapAlloc dt=5 heapalloc_value=20207240 +HeapAlloc dt=6 heapalloc_value=20215432 +HeapAlloc dt=6 heapalloc_value=20223624 +HeapAlloc dt=5 heapalloc_value=20231816 +HeapAlloc dt=6 heapalloc_value=20240008 +HeapAlloc dt=5 heapalloc_value=20248200 +HeapAlloc dt=5 heapalloc_value=20256392 +HeapAlloc dt=6 heapalloc_value=20264584 +HeapAlloc dt=5 heapalloc_value=20272776 +HeapAlloc dt=6 heapalloc_value=20280968 +HeapAlloc dt=5 heapalloc_value=20289160 +HeapAlloc dt=6 heapalloc_value=20297352 +HeapAlloc dt=5 heapalloc_value=20305544 +HeapAlloc dt=6 heapalloc_value=20313736 +HeapAlloc dt=5 heapalloc_value=20321928 +HeapAlloc dt=6 heapalloc_value=20330120 +HeapAlloc dt=5 heapalloc_value=20338312 +HeapAlloc dt=6 heapalloc_value=20346504 +HeapAlloc dt=6 heapalloc_value=20354696 +HeapAlloc dt=62 heapalloc_value=20362888 +HeapAlloc dt=7 heapalloc_value=20371080 +HeapAlloc dt=5 heapalloc_value=20379272 +HeapAlloc dt=6 heapalloc_value=20387464 +HeapAlloc dt=37 heapalloc_value=20395656 +HeapAlloc dt=7 heapalloc_value=20403848 +HeapAlloc dt=6 heapalloc_value=20412040 +HeapAlloc dt=5 heapalloc_value=20420232 +HeapAlloc dt=6 heapalloc_value=20428424 +HeapAlloc dt=5 heapalloc_value=20436616 +HeapAlloc dt=6 heapalloc_value=20444808 +HeapAlloc dt=5 heapalloc_value=20453000 +HeapAlloc dt=6 heapalloc_value=20461192 +HeapAlloc dt=5 heapalloc_value=20469384 +HeapAlloc dt=6 heapalloc_value=20477576 +HeapAlloc dt=5 heapalloc_value=20485768 +HeapAlloc dt=6 heapalloc_value=20493960 +HeapAlloc dt=5 heapalloc_value=20502152 +HeapAlloc dt=6 heapalloc_value=20510344 +HeapAlloc dt=9 heapalloc_value=20518536 +HeapAlloc dt=6 heapalloc_value=20526728 +HeapAlloc dt=5 heapalloc_value=20534920 +HeapAlloc dt=6 heapalloc_value=20543112 +HeapAlloc dt=5 heapalloc_value=20551304 +HeapAlloc dt=6 heapalloc_value=20559496 +HeapAlloc dt=5 heapalloc_value=20567688 +HeapAlloc dt=6 heapalloc_value=20575880 +HeapAlloc dt=5 heapalloc_value=20584072 +HeapAlloc dt=6 heapalloc_value=20592264 +HeapAlloc dt=38 heapalloc_value=20600456 +HeapAlloc dt=7 heapalloc_value=20608648 +HeapAlloc dt=5 heapalloc_value=20616840 +HeapAlloc dt=6 heapalloc_value=20625032 +HeapAlloc dt=5 heapalloc_value=20633224 +HeapAlloc dt=6 heapalloc_value=20641416 +HeapAlloc dt=5 heapalloc_value=20649608 +HeapAlloc dt=6 heapalloc_value=20657800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=167 +ProcStart dt=17576 p=0 p_seq=29 +ProcStop dt=20 +ProcStart dt=3256 p=1 p_seq=22 +ProcStop dt=17 +ProcStart dt=16071 p=1 p_seq=23 +GoUnblock dt=21 g=1 g_seq=65 stack=0 +GoStart dt=124 g=1 g_seq=66 +HeapAlloc dt=51 heapalloc_value=22230664 +HeapAlloc dt=26 heapalloc_value=22238856 +HeapAlloc dt=16 heapalloc_value=22247048 +HeapAlloc dt=19 heapalloc_value=22255240 +HeapAlloc dt=19 heapalloc_value=22263432 +HeapAlloc dt=16 heapalloc_value=22271624 +HeapAlloc dt=16 heapalloc_value=22279816 +HeapAlloc dt=19 heapalloc_value=22288008 +HeapAlloc dt=18 heapalloc_value=22296200 +HeapAlloc dt=16 heapalloc_value=22304392 +HeapAlloc dt=12 heapalloc_value=22312584 +HeapAlloc dt=13 heapalloc_value=22320776 +HeapAlloc dt=15 heapalloc_value=22328968 +HeapAlloc dt=12 heapalloc_value=22337160 +HeapAlloc dt=6 heapalloc_value=22345352 +HeapAlloc dt=8 heapalloc_value=22353544 +HeapAlloc dt=6 heapalloc_value=22361736 +HeapAlloc dt=5 heapalloc_value=22369928 +HeapAlloc dt=25 heapalloc_value=22378120 +HeapAlloc dt=23 heapalloc_value=22386312 +HeapAlloc dt=9 heapalloc_value=22394504 +HeapAlloc dt=6 heapalloc_value=22402696 +HeapAlloc dt=5 heapalloc_value=22410888 +HeapAlloc dt=10 heapalloc_value=22419080 +HeapAlloc dt=5 heapalloc_value=22427272 +HeapAlloc dt=6 heapalloc_value=22435464 +HeapAlloc dt=5 heapalloc_value=22443656 +HeapAlloc dt=6 heapalloc_value=22451848 +HeapAlloc dt=8 heapalloc_value=22460040 +HeapAlloc dt=135 heapalloc_value=22468232 +HeapAlloc dt=8 heapalloc_value=22476424 +HeapAlloc dt=9 heapalloc_value=22484616 +HeapAlloc dt=6 heapalloc_value=22492808 +HeapAlloc dt=6 heapalloc_value=22501000 +HeapAlloc dt=6 heapalloc_value=22509192 +HeapAlloc dt=5 heapalloc_value=22517384 +HeapAlloc dt=9 heapalloc_value=22525576 +HeapAlloc dt=6 heapalloc_value=22533768 +HeapAlloc dt=6 heapalloc_value=22541960 +HeapAlloc dt=5 heapalloc_value=22550152 +HeapAlloc dt=6 heapalloc_value=22558344 +HeapAlloc dt=5 heapalloc_value=22566536 +HeapAlloc dt=6 heapalloc_value=22574728 +HeapAlloc dt=5 heapalloc_value=22582920 +HeapAlloc dt=9 heapalloc_value=22591112 +HeapAlloc dt=44 heapalloc_value=22599304 +HeapAlloc dt=7 heapalloc_value=22607496 +HeapAlloc dt=38 heapalloc_value=22615688 +HeapAlloc dt=6 heapalloc_value=22623880 +HeapAlloc dt=6 heapalloc_value=22632072 +HeapAlloc dt=6 heapalloc_value=22640264 +HeapAlloc dt=6 heapalloc_value=22648456 +HeapAlloc dt=6 heapalloc_value=22656648 +HeapAlloc dt=5 heapalloc_value=22664840 +HeapAlloc dt=6 heapalloc_value=22673032 +HeapAlloc dt=5 heapalloc_value=22681224 +HeapAlloc dt=6 heapalloc_value=22689416 +HeapAlloc dt=5 heapalloc_value=22697608 +HeapAlloc dt=6 heapalloc_value=22705800 +HeapAlloc dt=6 heapalloc_value=22713992 +HeapAlloc dt=5 heapalloc_value=22722184 +HeapAlloc dt=5 heapalloc_value=22730376 +HeapAlloc dt=6 heapalloc_value=22738568 +HeapAlloc dt=6 heapalloc_value=22746760 +HeapAlloc dt=5 heapalloc_value=22754952 +HeapAlloc dt=6 heapalloc_value=22763144 +HeapAlloc dt=6 heapalloc_value=22771336 +HeapAlloc dt=6 heapalloc_value=22779528 +HeapAlloc dt=5 heapalloc_value=22787720 +HeapAlloc dt=5 heapalloc_value=22795912 +HeapAlloc dt=6 heapalloc_value=22804104 +HeapAlloc dt=75 heapalloc_value=22812296 +HeapAlloc dt=7 heapalloc_value=22820488 +HeapAlloc dt=5 heapalloc_value=22828680 +HeapAlloc dt=6 heapalloc_value=22836872 +HeapAlloc dt=5 heapalloc_value=22845064 +HeapAlloc dt=6 heapalloc_value=22853256 +HeapAlloc dt=6 heapalloc_value=22861448 +HeapAlloc dt=5 heapalloc_value=22869640 +HeapAlloc dt=6 heapalloc_value=22877832 +HeapAlloc dt=5 heapalloc_value=22886024 +HeapAlloc dt=5 heapalloc_value=22894216 +HeapAlloc dt=6 heapalloc_value=22902408 +HeapAlloc dt=7 heapalloc_value=22910600 +HeapAlloc dt=6 heapalloc_value=22918792 +HeapAlloc dt=5 heapalloc_value=22926984 +HeapAlloc dt=6 heapalloc_value=22935176 +HeapAlloc dt=6 heapalloc_value=22943368 +HeapAlloc dt=6 heapalloc_value=22951560 +HeapAlloc dt=5 heapalloc_value=22959752 +HeapAlloc dt=6 heapalloc_value=22967944 +HeapAlloc dt=7 heapalloc_value=22976136 +HeapAlloc dt=5 heapalloc_value=22984328 +HeapAlloc dt=43 heapalloc_value=22992520 +HeapAlloc dt=7 heapalloc_value=23000712 +HeapAlloc dt=5 heapalloc_value=23008904 +HeapAlloc dt=6 heapalloc_value=23017096 +HeapAlloc dt=35 heapalloc_value=23025288 +HeapAlloc dt=7 heapalloc_value=23033480 +HeapAlloc dt=5 heapalloc_value=23041672 +HeapAlloc dt=5 heapalloc_value=23049864 +HeapAlloc dt=6 heapalloc_value=23058056 +HeapAlloc dt=5 heapalloc_value=23066248 +HeapAlloc dt=6 heapalloc_value=23074440 +HeapAlloc dt=5 heapalloc_value=23082632 +HeapAlloc dt=6 heapalloc_value=23090824 +HeapAlloc dt=5 heapalloc_value=23099016 +HeapAlloc dt=6 heapalloc_value=23107208 +HeapAlloc dt=5 heapalloc_value=23115400 +HeapAlloc dt=6 heapalloc_value=23123592 +HeapAlloc dt=5 heapalloc_value=23131784 +HeapAlloc dt=12 heapalloc_value=23139976 +HeapAlloc dt=5 heapalloc_value=23148168 +HeapAlloc dt=6 heapalloc_value=23156360 +HeapAlloc dt=5 heapalloc_value=23164552 +HeapAlloc dt=6 heapalloc_value=23172744 +HeapAlloc dt=5 heapalloc_value=23180936 +HeapAlloc dt=6 heapalloc_value=23189128 +HeapAlloc dt=5 heapalloc_value=23197320 +HeapAlloc dt=7 heapalloc_value=23205512 +HeapAlloc dt=5 heapalloc_value=23213704 +HeapAlloc dt=6 heapalloc_value=23221896 +HeapAlloc dt=38 heapalloc_value=23230088 +HeapAlloc dt=7 heapalloc_value=23238280 +HeapAlloc dt=5 heapalloc_value=23246472 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=164 +ProcStart dt=17494 p=0 p_seq=31 +ProcStop dt=25 +ProcStart dt=1701 p=1 p_seq=26 +ProcStop dt=16 +ProcStart dt=16748 p=2 p_seq=17 +GoUnblock dt=36 g=1 g_seq=71 stack=0 +GoStart dt=149 g=1 g_seq=72 +HeapAlloc dt=67 heapalloc_value=25302664 +HeapAlloc dt=38 heapalloc_value=25310856 +HeapAlloc dt=23 heapalloc_value=25319048 +HeapAlloc dt=17 heapalloc_value=25327240 +HeapAlloc dt=21 heapalloc_value=25335432 +HeapAlloc dt=17 heapalloc_value=25343624 +HeapAlloc dt=17 heapalloc_value=25351816 +HeapAlloc dt=16 heapalloc_value=25360008 +HeapAlloc dt=19 heapalloc_value=25368200 +HeapAlloc dt=16 heapalloc_value=25376392 +HeapAlloc dt=16 heapalloc_value=25384584 +HeapAlloc dt=16 heapalloc_value=25392776 +HeapAlloc dt=17 heapalloc_value=25400968 +HeapAlloc dt=16 heapalloc_value=25409160 +HeapAlloc dt=9 heapalloc_value=25417352 +HeapAlloc dt=9 heapalloc_value=25425544 +HeapAlloc dt=9 heapalloc_value=25433736 +HeapAlloc dt=10 heapalloc_value=25441928 +HeapAlloc dt=9 heapalloc_value=25450120 +HeapAlloc dt=10 heapalloc_value=25458312 +HeapAlloc dt=9 heapalloc_value=25466504 +HeapAlloc dt=6 heapalloc_value=25474696 +HeapAlloc dt=5 heapalloc_value=25482888 +HeapAlloc dt=6 heapalloc_value=25491080 +HeapAlloc dt=9 heapalloc_value=25499272 +HeapAlloc dt=6 heapalloc_value=25507464 +HeapAlloc dt=8 heapalloc_value=25515656 +HeapAlloc dt=7 heapalloc_value=25523848 +HeapAlloc dt=10 heapalloc_value=25532040 +HeapAlloc dt=9 heapalloc_value=25540232 +HeapAlloc dt=102 heapalloc_value=25548424 +HeapAlloc dt=7 heapalloc_value=25556616 +HeapAlloc dt=10 heapalloc_value=25564808 +HeapAlloc dt=5 heapalloc_value=25573000 +HeapAlloc dt=5 heapalloc_value=25581192 +HeapAlloc dt=36 heapalloc_value=25589384 +HeapAlloc dt=8 heapalloc_value=25597576 +HeapAlloc dt=5 heapalloc_value=25605768 +HeapAlloc dt=43 heapalloc_value=25613960 +HeapAlloc dt=7 heapalloc_value=25622152 +HeapAlloc dt=10 heapalloc_value=25630344 +HeapAlloc dt=6 heapalloc_value=25638536 +HeapAlloc dt=6 heapalloc_value=25646728 +HeapAlloc dt=6 heapalloc_value=25654920 +HeapAlloc dt=7 heapalloc_value=25663112 +HeapAlloc dt=5 heapalloc_value=25671304 +HeapAlloc dt=6 heapalloc_value=25679496 +HeapAlloc dt=41 heapalloc_value=25687688 +HeapAlloc dt=13 heapalloc_value=25695880 +HeapAlloc dt=5 heapalloc_value=25704072 +HeapAlloc dt=6 heapalloc_value=25712264 +HeapAlloc dt=13 heapalloc_value=25720456 +HeapAlloc dt=13 heapalloc_value=25728648 +HeapAlloc dt=5 heapalloc_value=25736840 +HeapAlloc dt=6 heapalloc_value=25745032 +HeapAlloc dt=6 heapalloc_value=25753224 +HeapAlloc dt=9 heapalloc_value=25761416 +HeapAlloc dt=6 heapalloc_value=25769608 +HeapAlloc dt=5 heapalloc_value=25777800 +HeapAlloc dt=6 heapalloc_value=25785992 +HeapAlloc dt=5 heapalloc_value=25794184 +HeapAlloc dt=6 heapalloc_value=25802376 +HeapAlloc dt=5 heapalloc_value=25810568 +HeapAlloc dt=6 heapalloc_value=25818760 +HeapAlloc dt=10 heapalloc_value=25826952 +HeapAlloc dt=6 heapalloc_value=25835144 +HeapAlloc dt=6 heapalloc_value=25843336 +HeapAlloc dt=5 heapalloc_value=25851528 +HeapAlloc dt=6 heapalloc_value=25859720 +HeapAlloc dt=5 heapalloc_value=25867912 +HeapAlloc dt=6 heapalloc_value=25876104 +HeapAlloc dt=6 heapalloc_value=25884296 +HeapAlloc dt=7 heapalloc_value=25892488 +HeapAlloc dt=6 heapalloc_value=25900680 +HeapAlloc dt=5 heapalloc_value=25908872 +HeapAlloc dt=6 heapalloc_value=25917064 +HeapAlloc dt=6 heapalloc_value=25925256 +HeapAlloc dt=5 heapalloc_value=25933448 +HeapAlloc dt=6 heapalloc_value=25941640 +HeapAlloc dt=6 heapalloc_value=25949832 +HeapAlloc dt=6 heapalloc_value=25958024 +HeapAlloc dt=5 heapalloc_value=25966216 +HeapAlloc dt=6 heapalloc_value=25974408 +HeapAlloc dt=5 heapalloc_value=25982600 +HeapAlloc dt=6 heapalloc_value=25990792 +HeapAlloc dt=6 heapalloc_value=25998984 +HeapAlloc dt=5 heapalloc_value=26007176 +HeapAlloc dt=6 heapalloc_value=26015368 +HeapAlloc dt=6 heapalloc_value=26023560 +HeapAlloc dt=6 heapalloc_value=26031752 +HeapAlloc dt=5 heapalloc_value=26039944 +HeapAlloc dt=6 heapalloc_value=26048136 +HeapAlloc dt=5 heapalloc_value=26056328 +HeapAlloc dt=6 heapalloc_value=26064520 +HeapAlloc dt=94 heapalloc_value=26072712 +HeapAlloc dt=7 heapalloc_value=26080904 +HeapAlloc dt=5 heapalloc_value=26089096 +HeapAlloc dt=6 heapalloc_value=26097288 +HeapAlloc dt=6 heapalloc_value=26105480 +HeapAlloc dt=5 heapalloc_value=26113672 +HeapAlloc dt=6 heapalloc_value=26121864 +HeapAlloc dt=6 heapalloc_value=26130056 +HeapAlloc dt=5 heapalloc_value=26138248 +HeapAlloc dt=6 heapalloc_value=26146440 +HeapAlloc dt=6 heapalloc_value=26154632 +HeapAlloc dt=5 heapalloc_value=26162824 +HeapAlloc dt=1696 heapalloc_value=26171016 +HeapAlloc dt=7 heapalloc_value=26179208 +HeapAlloc dt=6 heapalloc_value=26187400 +HeapAlloc dt=5 heapalloc_value=26195592 +HeapAlloc dt=6 heapalloc_value=26203784 +HeapAlloc dt=5 heapalloc_value=26211976 +HeapAlloc dt=47 heapalloc_value=26220168 +HeapAlloc dt=8 heapalloc_value=26228360 +HeapAlloc dt=5 heapalloc_value=26236552 +HeapAlloc dt=6 heapalloc_value=26244744 +HeapAlloc dt=6 heapalloc_value=26252936 +HeapAlloc dt=5 heapalloc_value=26261128 +HeapAlloc dt=6 heapalloc_value=26269320 +HeapAlloc dt=5 heapalloc_value=26277512 +HeapAlloc dt=6 heapalloc_value=26285704 +HeapAlloc dt=6 heapalloc_value=26293896 +HeapAlloc dt=5 heapalloc_value=26302088 +HeapAlloc dt=6 heapalloc_value=26310280 +HeapAlloc dt=6 heapalloc_value=26318472 +HeapAlloc dt=30 heapalloc_value=26326360 +HeapAlloc dt=30 heapalloc_value=26334536 +HeapAlloc dt=24 heapalloc_value=26336904 +GoCreate dt=72 new_g=34 new_stack=47 stack=48 +GoCreate dt=183 new_g=35 new_stack=47 stack=48 +GoCreate dt=15 new_g=36 new_stack=47 stack=48 +GoCreate dt=12 new_g=37 new_stack=47 stack=48 +GoCreate dt=14 new_g=38 new_stack=47 stack=48 +HeapAlloc dt=25 heapalloc_value=26344200 +GoCreate dt=9 new_g=39 new_stack=47 stack=48 +GoCreate dt=13 new_g=40 new_stack=47 stack=48 +GoCreate dt=4 new_g=41 new_stack=47 stack=48 +HeapAlloc dt=17 heapalloc_value=26351912 +GoBlock dt=15 reason_string=10 stack=49 +GoStart dt=5 g=41 g_seq=1 +GoStop dt=307427 reason_string=16 stack=51 +GoStart dt=34 g=41 g_seq=2 +GoStop dt=315328 reason_string=16 stack=50 +GoStart dt=10 g=41 g_seq=3 +GoDestroy dt=158464 +ProcStop dt=40 +EventBatch gen=1 m=1709039 time=7689670530705 size=53 +GoUnblock dt=117 g=4 g_seq=3 stack=0 +GoUnblock dt=157408 g=4 g_seq=7 stack=0 +GoUnblock dt=157553 g=4 g_seq=11 stack=0 +ProcSteal dt=947714 p=7 p_seq=9 m=1709048 +ProcSteal dt=646055 p=7 p_seq=13 m=1709046 +ProcSteal dt=5677 p=5 p_seq=11 m=1709046 +ProcSteal dt=1312 p=6 p_seq=9 m=1709048 +EventBatch gen=1 m=1709038 time=7689670147327 size=336 +ProcStatus dt=56 p=0 pstatus=1 +GoStatus dt=4 g=1 m=1709038 gstatus=2 +ProcsChange dt=184 procs_value=8 stack=1 +STWBegin dt=81 kind_string=21 stack=2 +HeapGoal dt=5 heapgoal_value=4194304 +ProcStatus dt=2 p=1 pstatus=2 +ProcStatus dt=1 p=2 pstatus=2 +ProcStatus dt=1 p=3 pstatus=2 +ProcStatus dt=1 p=4 pstatus=2 +ProcStatus dt=1 p=5 pstatus=2 +ProcStatus dt=1 p=6 pstatus=2 +ProcStatus dt=1 p=7 pstatus=2 +ProcsChange dt=51 procs_value=8 stack=3 +STWEnd dt=74 +GoCreate dt=216 new_g=6 new_stack=4 stack=5 +HeapAlloc dt=174 heapalloc_value=2752512 +GoCreate dt=140 new_g=7 new_stack=6 stack=7 +HeapAlloc dt=16 heapalloc_value=2760704 +GoCreate dt=11 new_g=8 new_stack=8 stack=9 +GoCreate dt=197 new_g=9 new_stack=10 stack=11 +GoCreate dt=18 new_g=10 new_stack=12 stack=13 +GoBlock dt=159 reason_string=10 stack=14 +GoStart dt=10 g=10 g_seq=1 +GoStop dt=224159 reason_string=16 stack=19 +GoStart dt=105 g=10 g_seq=2 +GoUnblock dt=88262 g=1 g_seq=1 stack=20 +GoDestroy dt=111 +GoStart dt=10 g=1 g_seq=2 +GoBlock dt=18 reason_string=19 stack=21 +ProcStop dt=177 +ProcStart dt=22598 p=0 p_seq=2 +ProcStop dt=20 +ProcStart dt=30 p=2 p_seq=2 +ProcStop dt=1158 +ProcStart dt=1116 p=0 p_seq=4 +GoUnblock dt=19 g=25 g_seq=2 stack=0 +GoStart dt=130 g=25 g_seq=3 +GoLabel dt=1 label_string=2 +GoBlock dt=1809 reason_string=15 stack=27 +ProcStop dt=35 +ProcStart dt=45680 p=3 p_seq=4 +HeapAlloc dt=46 heapalloc_value=7659248 +HeapAlloc dt=48 heapalloc_value=7663408 +HeapAlloc dt=6065 heapalloc_value=7876144 +GoStart dt=2865 g=4 g_seq=6 +GoBlock dt=31 reason_string=15 stack=32 +ProcStop dt=49 +ProcStart dt=1490 p=3 p_seq=5 +ProcStop dt=29 +ProcStart dt=2071 p=1 p_seq=10 +ProcStop dt=21 +ProcStart dt=143297 p=2 p_seq=13 +GoUnblock dt=21 g=22 g_seq=6 stack=0 +GoStart dt=177 g=22 g_seq=7 +GoLabel dt=2 label_string=2 +GoBlock dt=2058 reason_string=15 stack=27 +ProcStop dt=2352 +ProcStart dt=162401 p=5 p_seq=2 +HeapAlloc dt=51 heapalloc_value=26353960 +HeapAlloc dt=42 heapalloc_value=26360360 +HeapAlloc dt=6510 heapalloc_value=26367784 +GoStart dt=1039 g=40 g_seq=1 +GoStop dt=297000 reason_string=16 stack=50 +GoStart dt=15 g=40 g_seq=2 +GoStop dt=315522 reason_string=16 stack=50 +GoStart dt=7 g=40 g_seq=3 +GoDestroy dt=168735 +ProcStop dt=43 +ProcStart dt=799345 p=6 p_seq=6 +ProcStop dt=33 +ProcStart dt=1506 p=6 p_seq=10 +ProcStop dt=26 +ProcStart dt=18634 p=7 p_seq=33 +ProcStop dt=34 +EventBatch gen=1 m=18446744073709551615 time=7689672466616 size=28 +GoStatus dt=61 g=2 m=18446744073709551615 gstatus=4 +GoStatus dt=3 g=5 m=18446744073709551615 gstatus=4 +EventBatch gen=1 m=18446744073709551615 time=7689672467258 size=4540 +Stacks +Stack id=86 nframes=7 + pc=4754167 func=24 file=25 line=736 + pc=4814861 func=26 file=27 line=181 + pc=4814837 func=28 file=29 line=736 + pc=4814480 func=30 file=29 line=160 + pc=4996132 func=31 file=32 line=55 + pc=5032836 func=33 file=34 line=179 + pc=5078635 func=35 file=36 line=73 +Stack id=77 nframes=16 + pc=4756520 func=37 file=25 line=1442 + pc=4751813 func=38 file=27 line=298 + pc=4996815 func=39 file=40 line=59 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=13 nframes=1 + pc=5077820 func=35 file=36 line=28 +Stack id=65 nframes=2 + pc=4224086 func=57 file=58 line=145 + pc=5080123 func=59 file=36 line=94 +Stack id=21 nframes=3 + pc=4640852 func=60 file=61 line=195 + pc=5081128 func=62 file=36 line=125 + pc=5077843 func=35 file=36 line=32 +Stack id=11 nframes=1 + pc=5077754 func=35 file=36 line=27 +Stack id=10 nframes=1 + pc=5080288 func=63 file=36 line=97 +Stack id=44 nframes=2 + pc=4354430 func=64 file=65 line=408 + pc=4354396 func=66 file=67 line=318 +Stack id=51 nframes=3 + pc=4658586 func=68 file=69 line=53 + pc=5080816 func=70 file=36 line=110 + pc=5079149 func=71 file=36 line=40 +Stack id=36 nframes=7 + pc=4310007 func=72 file=73 line=806 + pc=4326610 func=74 file=75 line=562 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=57 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4824373 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=16 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4821008 func=91 file=92 line=46 + pc=4821000 func=93 file=94 line=189 + pc=5077114 func=95 file=96 line=134 +Stack id=63 nframes=1 + pc=5080224 func=97 file=36 line=89 +Stack id=2 nframes=3 + pc=4567556 func=98 file=99 line=239 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=80 nframes=15 + pc=4998478 func=101 file=29 line=683 + pc=4998507 func=39 file=40 line=141 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=47 nframes=1 + pc=5079072 func=71 file=36 line=38 +Stack id=55 nframes=2 + pc=4227441 func=102 file=58 line=442 + pc=5078106 func=35 file=36 line=48 +Stack id=5 nframes=4 + pc=4576789 func=103 file=104 line=44 + pc=4567832 func=98 file=99 line=258 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=46 nframes=3 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=8 nframes=1 + pc=5077056 func=95 file=96 line=128 +Stack id=24 nframes=6 + pc=4315620 func=105 file=73 line=1249 + pc=4308860 func=106 file=73 line=662 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=37 nframes=1 + pc=4316644 func=107 file=73 line=1469 +Stack id=79 nframes=5 + pc=4817209 func=108 file=29 line=611 + pc=5000296 func=109 file=40 line=172 + pc=5058941 func=110 file=47 line=159 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=17 nframes=1 + pc=5077124 func=95 file=96 line=130 +Stack id=41 nframes=2 + pc=4310763 func=72 file=73 line=816 + pc=4316644 func=107 file=73 line=1469 +Stack id=33 nframes=7 + pc=4328420 func=114 file=75 line=747 + pc=4326674 func=74 file=75 line=587 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=29 nframes=6 + pc=4644903 func=115 file=116 line=474 + pc=4309092 func=106 file=73 line=683 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=73 nframes=10 + pc=4756296 func=117 file=25 line=1432 + pc=4751685 func=118 file=27 line=290 + pc=5051812 func=119 file=42 line=167 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=92 nframes=2 + pc=4640852 func=60 file=61 line=195 + pc=5078782 func=113 file=36 line=63 +Stack id=32 nframes=2 + pc=4344589 func=124 file=125 line=425 + pc=4346072 func=126 file=125 line=658 +Stack id=45 nframes=1 + pc=5077843 func=35 file=36 line=32 +Stack id=62 nframes=3 + pc=4754167 func=24 file=25 line=736 + pc=5079848 func=26 file=27 line=181 + pc=5079785 func=59 file=36 line=90 +Stack id=15 nframes=3 + pc=4227441 func=102 file=58 line=442 + pc=4574090 func=127 file=99 line=937 + pc=4576964 func=128 file=104 line=56 +Stack id=28 nframes=4 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=64 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4821008 func=91 file=92 line=46 + pc=4821000 func=93 file=94 line=189 + pc=5080260 func=97 file=36 line=89 +Stack id=91 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5060022 func=133 file=134 line=21 + pc=5055784 func=135 file=112 line=257 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=95 nframes=8 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4996049 func=143 file=32 line=37 + pc=5033653 func=144 file=34 line=203 + pc=5078651 func=35 file=36 line=74 +Stack id=22 nframes=4 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=56 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4824373 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=60 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4813961 func=145 file=29 line=129 + pc=5079772 func=146 file=85 line=90 + pc=5079785 func=59 file=36 line=90 +Stack id=38 nframes=2 + pc=4310679 func=72 file=73 line=914 + pc=4316644 func=107 file=73 line=1469 +Stack id=52 nframes=3 + pc=4708004 func=147 file=148 line=81 + pc=5079238 func=149 file=148 line=87 + pc=5079164 func=71 file=36 line=41 +Stack id=20 nframes=3 + pc=4708004 func=147 file=148 line=81 + pc=5080678 func=149 file=148 line=87 + pc=5080600 func=150 file=36 line=105 +Stack id=67 nframes=19 + pc=4752943 func=151 file=25 line=98 + pc=4822218 func=152 file=153 line=280 + pc=4822195 func=154 file=155 line=15 + pc=4823409 func=156 file=85 line=272 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=84 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059867 func=133 file=134 line=18 + pc=5055784 func=135 file=112 line=257 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=74 nframes=9 + pc=4755428 func=168 file=25 line=1213 + pc=5051952 func=119 file=42 line=170 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=50 nframes=1 + pc=5079149 func=71 file=36 line=40 +Stack id=14 nframes=2 + pc=4708263 func=169 file=148 line=116 + pc=5077833 func=35 file=36 line=29 +Stack id=27 nframes=2 + pc=4437613 func=170 file=65 line=402 + pc=4316040 func=107 file=73 line=1333 +Stack id=30 nframes=5 + pc=4309402 func=106 file=73 line=745 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=75 nframes=1 + pc=5078720 func=113 file=36 line=58 +Stack id=88 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059594 func=171 file=172 line=15 + pc=5055722 func=135 file=112 line=251 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=70 nframes=21 + pc=4754167 func=24 file=25 line=736 + pc=4814861 func=26 file=27 line=181 + pc=4814837 func=28 file=29 line=736 + pc=4814480 func=30 file=29 line=160 + pc=4820817 func=173 file=92 line=29 + pc=4820809 func=174 file=94 line=118 + pc=4742703 func=175 file=176 line=335 + pc=5041967 func=177 file=176 line=354 + pc=5041927 func=178 file=160 line=55 + pc=5047143 func=161 file=162 line=40 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=25 nframes=7 + pc=4227441 func=102 file=58 line=442 + pc=4315507 func=105 file=73 line=1259 + pc=4308860 func=106 file=73 line=662 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=58 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4824408 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=69 nframes=19 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4823631 func=156 file=85 line=301 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=83 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5054762 func=179 file=180 line=88 + pc=5055769 func=135 file=112 line=256 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=43 nframes=9 + pc=4368154 func=181 file=182 line=958 + pc=4293585 func=183 file=184 line=254 + pc=4293175 func=185 file=184 line=170 + pc=4290674 func=186 file=187 line=182 + pc=4255364 func=188 file=77 line=948 + pc=4256932 func=78 file=77 line=1149 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=78 nframes=8 + pc=4756062 func=189 file=25 line=1421 + pc=4750293 func=190 file=153 line=684 + pc=4818215 func=191 file=192 line=17 + pc=4816989 func=108 file=29 line=602 + pc=5000296 func=109 file=40 line=172 + pc=5058941 func=110 file=47 line=159 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=71 nframes=20 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4823895 func=193 file=85 line=315 + pc=5047564 func=194 file=92 line=23 + pc=5047547 func=195 file=160 line=23 + pc=5047406 func=161 file=162 line=53 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=3 nframes=4 + pc=4446827 func=196 file=65 line=1369 + pc=4567827 func=98 file=99 line=256 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=35 nframes=2 + pc=4310007 func=72 file=73 line=806 + pc=4316644 func=107 file=73 line=1469 +Stack id=6 nframes=1 + pc=4573664 func=197 file=99 line=877 +Stack id=19 nframes=1 + pc=5080585 func=150 file=36 line=104 +Stack id=54 nframes=1 + pc=5078085 func=35 file=36 line=47 +Stack id=82 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059594 func=171 file=172 line=15 + pc=5055722 func=135 file=112 line=251 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=90 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059867 func=133 file=134 line=18 + pc=5055784 func=135 file=112 line=257 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=61 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4813961 func=145 file=29 line=129 + pc=5079772 func=146 file=85 line=90 + pc=5079785 func=59 file=36 line=90 +Stack id=23 nframes=1 + pc=4315808 func=107 file=73 line=1298 +Stack id=12 nframes=1 + pc=5080512 func=150 file=36 line=102 +Stack id=68 nframes=19 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4823631 func=156 file=85 line=301 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=4 nframes=1 + pc=4576896 func=128 file=104 line=44 +Stack id=66 nframes=6 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=81 nframes=16 + pc=4757147 func=198 file=25 line=1478 + pc=4752076 func=199 file=27 line=313 + pc=4998549 func=39 file=40 line=149 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=87 nframes=4 + pc=4814791 func=30 file=29 line=164 + pc=4996132 func=31 file=32 line=55 + pc=5032836 func=33 file=34 line=179 + pc=5078635 func=35 file=36 line=73 +Stack id=85 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5060022 func=133 file=134 line=21 + pc=5055784 func=135 file=112 line=257 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=39 nframes=4 + pc=4644903 func=115 file=116 line=474 + pc=4311677 func=200 file=73 line=964 + pc=4310756 func=72 file=73 line=926 + pc=4316644 func=107 file=73 line=1469 +Stack id=31 nframes=7 + pc=4585153 func=201 file=202 line=383 + pc=4326396 func=74 file=75 line=534 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=89 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5054762 func=179 file=180 line=88 + pc=5055769 func=135 file=112 line=256 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=53 nframes=1 + pc=5079488 func=59 file=36 line=81 +Stack id=18 nframes=3 + pc=4227441 func=102 file=58 line=442 + pc=4574090 func=127 file=99 line=937 + pc=4573703 func=197 file=99 line=880 +Stack id=48 nframes=1 + pc=5077881 func=35 file=36 line=38 +Stack id=94 nframes=8 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4996049 func=143 file=32 line=37 + pc=5033653 func=144 file=34 line=203 + pc=5078837 func=113 file=36 line=66 +Stack id=42 nframes=9 + pc=4584693 func=203 file=202 line=357 + pc=4355940 func=204 file=67 line=522 + pc=4292956 func=185 file=184 line=147 + pc=4290674 func=186 file=187 line=182 + pc=4255364 func=188 file=77 line=948 + pc=4256932 func=78 file=77 line=1149 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=93 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4996388 func=205 file=32 line=96 + pc=5033284 func=206 file=34 line=191 + pc=5078821 func=113 file=36 line=65 +Stack id=34 nframes=2 + pc=4644903 func=115 file=116 line=474 + pc=4316309 func=107 file=73 line=1393 +Stack id=49 nframes=2 + pc=4708263 func=169 file=148 line=116 + pc=5078001 func=35 file=36 line=43 +Stack id=7 nframes=4 + pc=4573636 func=207 file=99 line=877 + pc=4567844 func=98 file=99 line=259 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=76 nframes=1 + pc=5078444 func=35 file=36 line=58 +Stack id=1 nframes=4 + pc=4583115 func=208 file=202 line=260 + pc=4567535 func=98 file=99 line=238 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=26 nframes=2 + pc=4224086 func=57 file=58 line=145 + pc=4316011 func=107 file=73 line=1312 +Stack id=40 nframes=3 + pc=4312646 func=200 file=73 line=1086 + pc=4310756 func=72 file=73 line=926 + pc=4316644 func=107 file=73 line=1469 +Stack id=72 nframes=11 + pc=4757394 func=129 file=25 line=1488 + pc=5054386 func=130 file=27 line=462 + pc=5054396 func=209 file=210 line=28 + pc=5051349 func=119 file=42 line=152 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=59 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4824408 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=9 nframes=2 + pc=5076879 func=100 file=96 line=128 + pc=5077595 func=35 file=36 line=20 +EventBatch gen=1 m=18446744073709551615 time=7689670146021 size=6980 +Strings +String id=1 + data="Not worker" +String id=2 + data="GC (dedicated)" +String id=3 + data="GC (fractional)" +String id=4 + data="GC (idle)" +String id=5 + data="unspecified" +String id=6 + data="forever" +String id=7 + data="network" +String id=8 + data="select" +String id=9 + data="sync.(*Cond).Wait" +String id=10 + data="sync" +String id=11 + data="chan send" +String id=12 + data="chan receive" +String id=13 + data="GC mark assist wait for work" +String id=14 + data="GC background sweeper wait" +String id=15 + data="system goroutine wait" +String id=16 + data="preempted" +String id=17 + data="wait for debug call" +String id=18 + data="wait until GC ends" +String id=19 + data="sleep" +String id=20 + data="runtime.Gosched" +String id=21 + data="start trace" +String id=22 + data="GC sweep termination" +String id=23 + data="GC mark termination" +String id=24 + data="syscall.read" +String id=25 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go" +String id=26 + data="syscall.Read" +String id=27 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go" +String id=28 + data="internal/poll.ignoringEINTRIO" +String id=29 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go" +String id=30 + data="internal/poll.(*FD).Read" +String id=31 + data="net.(*netFD).Read" +String id=32 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_posix.go" +String id=33 + data="net.(*conn).Read" +String id=34 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/net.go" +String id=35 + data="main.main" +String id=36 + data="/usr/local/google/home/mknyszek/work/go-1/src/cmd/trace/v2/testdata/testprog/main.go" +String id=37 + data="syscall.connect" +String id=38 + data="syscall.Connect" +String id=39 + data="net.(*netFD).connect" +String id=40 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_unix.go" +String id=41 + data="net.(*netFD).dial" +String id=42 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_posix.go" +String id=43 + data="net.socket" +String id=44 + data="net.internetSocket" +String id=45 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/ipsock_posix.go" +String id=46 + data="net.(*sysDialer).doDialTCPProto" +String id=47 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock_posix.go" +String id=48 + data="net.(*sysDialer).doDialTCP" +String id=49 + data="net.(*sysDialer).dialTCP" +String id=50 + data="net.(*sysDialer).dialSingle" +String id=51 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/dial.go" +String id=52 + data="net.(*sysDialer).dialSerial" +String id=53 + data="net.(*sysDialer).dialParallel" +String id=54 + data="net.(*Dialer).DialContext" +String id=55 + data="net.(*Dialer).Dial" +String id=56 + data="net.Dial" +String id=57 + data="runtime.chansend1" +String id=58 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go" +String id=59 + data="main.blockingSyscall" +String id=60 + data="time.Sleep" +String id=61 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/time.go" +String id=62 + data="main.allocHog" +String id=63 + data="main.cpu10" +String id=64 + data="runtime.goparkunlock" +String id=65 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go" +String id=66 + data="runtime.bgsweep" +String id=67 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcsweep.go" +String id=68 + data="runtime.asyncPreempt" +String id=69 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/preempt_amd64.s" +String id=70 + data="main.cpuHog" +String id=71 + data="main.main.func1" +String id=72 + data="runtime.gcMarkDone" +String id=73 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgc.go" +String id=74 + data="runtime.gcAssistAlloc" +String id=75 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcmark.go" +String id=76 + data="runtime.deductAssistCredit" +String id=77 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go" +String id=78 + data="runtime.mallocgc" +String id=79 + data="runtime.makeslice" +String id=80 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/slice.go" +String id=81 + data="syscall.fcntl" +String id=82 + data="syscall.SetNonblock" +String id=83 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/exec_unix.go" +String id=84 + data="os.newFile" +String id=85 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_unix.go" +String id=86 + data="os.Pipe" +String id=87 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/pipe2_unix.go" +String id=88 + data="syscall.write" +String id=89 + data="syscall.Write" +String id=90 + data="internal/poll.(*FD).Write" +String id=91 + data="os.(*File).write" +String id=92 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go" +String id=93 + data="os.(*File).Write" +String id=94 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go" +String id=95 + data="runtime/trace.Start.func1" +String id=96 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go" +String id=97 + data="main.blockingSyscall.func1" +String id=98 + data="runtime.StartTrace" +String id=99 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go" +String id=100 + data="runtime/trace.Start" +String id=101 + data="internal/poll.(*FD).WaitWrite" +String id=102 + data="runtime.chanrecv1" +String id=103 + data="runtime.traceStartReadCPU" +String id=104 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go" +String id=105 + data="runtime.gcBgMarkStartWorkers" +String id=106 + data="runtime.gcStart" +String id=107 + data="runtime.gcBgMarkWorker" +String id=108 + data="internal/poll.(*FD).Accept" +String id=109 + data="net.(*netFD).accept" +String id=110 + data="net.(*TCPListener).accept" +String id=111 + data="net.(*TCPListener).Accept" +String id=112 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock.go" +String id=113 + data="main.main.func2" +String id=114 + data="runtime.gcParkAssist" +String id=115 + data="runtime.systemstack_switch" +String id=116 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s" +String id=117 + data="syscall.bind" +String id=118 + data="syscall.Bind" +String id=119 + data="net.(*netFD).listenStream" +String id=120 + data="net.(*sysListener).listenTCPProto" +String id=121 + data="net.(*sysListener).listenTCP" +String id=122 + data="net.(*ListenConfig).Listen" +String id=123 + data="net.Listen" +String id=124 + data="runtime.(*scavengerState).park" +String id=125 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcscavenge.go" +String id=126 + data="runtime.bgscavenge" +String id=127 + data="runtime.(*wakeableSleep).sleep" +String id=128 + data="runtime.traceStartReadCPU.func1" +String id=129 + data="syscall.setsockopt" +String id=130 + data="syscall.SetsockoptInt" +String id=131 + data="internal/poll.(*FD).SetsockoptInt" +String id=132 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sockopt.go" +String id=133 + data="net.setKeepAlivePeriod" +String id=134 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_unix.go" +String id=135 + data="net.newTCPConn" +String id=136 + data="syscall.Close" +String id=137 + data="internal/poll.(*SysFile).destroy" +String id=138 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unixjs.go" +String id=139 + data="internal/poll.(*FD).destroy" +String id=140 + data="internal/poll.(*FD).decref" +String id=141 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_mutex.go" +String id=142 + data="internal/poll.(*FD).Close" +String id=143 + data="net.(*netFD).Close" +String id=144 + data="net.(*conn).Close" +String id=145 + data="internal/poll.(*FD).SetBlocking" +String id=146 + data="os.(*File).Fd" +String id=147 + data="sync.(*WaitGroup).Add" +String id=148 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/waitgroup.go" +String id=149 + data="sync.(*WaitGroup).Done" +String id=150 + data="main.cpu20" +String id=151 + data="syscall.openat" +String id=152 + data="syscall.Open" +String id=153 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_linux.go" +String id=154 + data="os.open" +String id=155 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_open_unix.go" +String id=156 + data="os.openFileNolog" +String id=157 + data="os.OpenFile" +String id=158 + data="os.Open" +String id=159 + data="net.open" +String id=160 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/parse.go" +String id=161 + data="net.maxListenerBacklog" +String id=162 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_linux.go" +String id=163 + data="net.listenerBacklog.func1" +String id=164 + data="sync.(*Once).doSlow" +String id=165 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/once.go" +String id=166 + data="sync.(*Once).Do" +String id=167 + data="net.listenerBacklog" +String id=168 + data="syscall.Listen" +String id=169 + data="sync.(*WaitGroup).Wait" +String id=170 + data="runtime.gopark" +String id=171 + data="net.setNoDelay" +String id=172 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_posix.go" +String id=173 + data="os.(*File).read" +String id=174 + data="os.(*File).Read" +String id=175 + data="io.ReadAtLeast" +String id=176 + data="/usr/local/google/home/mknyszek/work/go-1/src/io/io.go" +String id=177 + data="io.ReadFull" +String id=178 + data="net.(*file).readLine" +String id=179 + data="net.setKeepAlive" +String id=180 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_posix.go" +String id=181 + data="runtime.(*mheap).alloc" +String id=182 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go" +String id=183 + data="runtime.(*mcentral).grow" +String id=184 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcentral.go" +String id=185 + data="runtime.(*mcentral).cacheSpan" +String id=186 + data="runtime.(*mcache).refill" +String id=187 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcache.go" +String id=188 + data="runtime.(*mcache).nextFree" +String id=189 + data="syscall.accept4" +String id=190 + data="syscall.Accept4" +String id=191 + data="internal/poll.accept" +String id=192 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sock_cloexec.go" +String id=193 + data="os.(*file).close" +String id=194 + data="os.(*File).Close" +String id=195 + data="net.(*file).close" +String id=196 + data="runtime.startTheWorld" +String id=197 + data="runtime.(*traceAdvancerState).start.func1" +String id=198 + data="syscall.getsockopt" +String id=199 + data="syscall.GetsockoptInt" +String id=200 + data="runtime.gcMarkTermination" +String id=201 + data="runtime.traceLocker.GCMarkAssistStart" +String id=202 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go" +String id=203 + data="runtime.traceLocker.GCSweepSpan" +String id=204 + data="runtime.(*sweepLocked).sweep" +String id=205 + data="net.(*netFD).Write" +String id=206 + data="net.(*conn).Write" +String id=207 + data="runtime.(*traceAdvancerState).start" +String id=208 + data="runtime.traceLocker.Gomaxprocs" +String id=209 + data="net.setDefaultListenerSockopts" +String id=210 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_linux.go" diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/mktests.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/mktests.go new file mode 100644 index 0000000000000000000000000000000000000000..143e8ece35f28c4cc0a020887d3a1476f037d1dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/mktests.go @@ -0,0 +1,60 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "internal/trace/v2/raw" + "internal/trace/v2/version" + "io" + "log" + "os" + "os/exec" +) + +func main() { + // Create command. + var trace, stderr bytes.Buffer + cmd := exec.Command("go", "run", "./testprog/main.go") + // TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default. + cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2") + cmd.Stdout = &trace + cmd.Stderr = &stderr + + // Run trace program; the trace will appear in stdout. + fmt.Fprintln(os.Stderr, "running trace program...") + if err := cmd.Run(); err != nil { + log.Fatalf("running trace program: %v:\n%s", err, stderr.String()) + } + + // Create file. + f, err := os.Create(fmt.Sprintf("./go1%d.test", version.Current)) + if err != nil { + log.Fatalf("creating output file: %v", err) + } + defer f.Close() + + // Write out the trace. + r, err := raw.NewReader(&trace) + if err != nil { + log.Fatalf("reading trace: %v", err) + } + w, err := raw.NewTextWriter(f, version.Current) + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("reading trace: %v", err) + } + if err := w.WriteEvent(ev); err != nil { + log.Fatalf("writing trace: %v", err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/testprog/main.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/testprog/main.go new file mode 100644 index 0000000000000000000000000000000000000000..fcf4dc156cd45d996789f97f7fb78c7e2e4bab9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/testdata/testprog/main.go @@ -0,0 +1,129 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "runtime/trace" + "sync" + "syscall" + "time" +) + +func main() { + if err := trace.Start(os.Stdout); err != nil { + log.Fatal(err) + } + + // checkExecutionTimes relies on this. + var wg sync.WaitGroup + wg.Add(2) + go cpu10(&wg) + go cpu20(&wg) + wg.Wait() + + // checkHeapMetrics relies on this. + allocHog(25 * time.Millisecond) + + // checkProcStartStop relies on this. + var wg2 sync.WaitGroup + for i := 0; i < runtime.GOMAXPROCS(0); i++ { + wg2.Add(1) + go func() { + defer wg2.Done() + cpuHog(50 * time.Millisecond) + }() + } + wg2.Wait() + + // checkSyscalls relies on this. + done := make(chan error) + go blockingSyscall(50*time.Millisecond, done) + if err := <-done; err != nil { + log.Fatal(err) + } + + // checkNetworkUnblock relies on this. + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + log.Fatalf("listen failed: %v", err) + } + defer ln.Close() + go func() { + c, err := ln.Accept() + if err != nil { + return + } + time.Sleep(time.Millisecond) + var buf [1]byte + c.Write(buf[:]) + c.Close() + }() + c, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + log.Fatalf("dial failed: %v", err) + } + var tmp [1]byte + c.Read(tmp[:]) + c.Close() + + trace.Stop() +} + +// blockingSyscall blocks the current goroutine for duration d in a syscall and +// sends a message to done when it is done or if the syscall failed. +func blockingSyscall(d time.Duration, done chan<- error) { + r, w, err := os.Pipe() + if err != nil { + done <- err + return + } + start := time.Now() + msg := []byte("hello") + time.AfterFunc(d, func() { w.Write(msg) }) + _, err = syscall.Read(int(r.Fd()), make([]byte, len(msg))) + if err == nil && time.Since(start) < d { + err = fmt.Errorf("syscall returned too early: want=%s got=%s", d, time.Since(start)) + } + done <- err +} + +func cpu10(wg *sync.WaitGroup) { + defer wg.Done() + cpuHog(10 * time.Millisecond) +} + +func cpu20(wg *sync.WaitGroup) { + defer wg.Done() + cpuHog(20 * time.Millisecond) +} + +func cpuHog(dt time.Duration) { + start := time.Now() + for i := 0; ; i++ { + if i%1000 == 0 && time.Since(start) > dt { + return + } + } +} + +func allocHog(dt time.Duration) { + start := time.Now() + var s [][]byte + for i := 0; ; i++ { + if i%1000 == 0 { + if time.Since(start) > dt { + return + } + // Take a break... this will generate a ton of events otherwise. + time.Sleep(50 * time.Microsecond) + } + s = append(s, make([]byte, 1024)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/threadgen.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/threadgen.go new file mode 100644 index 0000000000000000000000000000000000000000..e1cae2b2cf93bf9d447b760a7dc17cdb333d07d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/threadgen.go @@ -0,0 +1,204 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" +) + +var _ generator = &threadGenerator{} + +type threadGenerator struct { + globalRangeGenerator + globalMetricGenerator + stackSampleGenerator[tracev2.ThreadID] + logEventGenerator[tracev2.ThreadID] + + gStates map[tracev2.GoID]*gState[tracev2.ThreadID] + threads map[tracev2.ThreadID]struct{} +} + +func newThreadGenerator() *threadGenerator { + tg := new(threadGenerator) + rg := func(ev *tracev2.Event) tracev2.ThreadID { + return ev.Thread() + } + tg.stackSampleGenerator.getResource = rg + tg.logEventGenerator.getResource = rg + tg.gStates = make(map[tracev2.GoID]*gState[tracev2.ThreadID]) + tg.threads = make(map[tracev2.ThreadID]struct{}) + return tg +} + +func (g *threadGenerator) Sync() { + g.globalRangeGenerator.Sync() +} + +func (g *threadGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *threadGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *threadGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + if ev.Thread() != tracev2.NoThread { + if _, ok := g.threads[ev.Thread()]; !ok { + g.threads[ev.Thread()] = struct{}{} + } + } + + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.ThreadID](goID) + g.gStates[goID] = gs + } + // If we haven't already named this goroutine, try to name it. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from.Executing() && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to.Executing() { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, ev.Thread(), ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine was unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Thread(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Thread(), ev.Stack()) + } + if from == tracev2.GoSyscall { + // Exiting syscall. + gs.syscallEnd(ev.Time(), to != tracev2.GoRunning, ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, ev.Thread(), ev.Stack()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + if ev.Thread() != tracev2.NoThread { + if _, ok := g.threads[ev.Thread()]; !ok { + g.threads[ev.Thread()] = struct{}{} + } + } + + type procArg struct { + Proc uint64 `json:"proc,omitempty"` + } + st := ev.StateTransition() + viewerEv := traceviewer.InstantEvent{ + Resource: uint64(ev.Thread()), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + Arg: procArg{Proc: uint64(st.Resource.Proc())}, + } + + from, to := st.Proc() + if from == to { + // Filter out no-op events. + return + } + if to.Executing() { + start := ev.Time() + if from == tracev2.ProcUndetermined { + start = ctx.startTime + } + viewerEv.Name = "proc start" + viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} + viewerEv.Ts = ctx.elapsed(start) + // TODO(mknyszek): We don't have a state machine for threads, so approximate + // running threads with running Ps. + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) + } + if from.Executing() { + start := ev.Time() + viewerEv.Name = "proc stop" + viewerEv.Ts = ctx.elapsed(start) + // TODO(mknyszek): We don't have a state machine for threads, so approximate + // running threads with running Ps. + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1) + } + // TODO(mknyszek): Consider modeling procs differently and have them be + // transition to and from NotExist when GOMAXPROCS changes. We can emit + // events for this to clearly delineate GOMAXPROCS changes. + + if viewerEv.Name != "" { + ctx.Instant(viewerEv) + } +} + +func (g *threadGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges on threads. +} + +func (g *threadGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("OS THREADS") + + // Finish off global ranges. + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for _, gs := range g.gStates { + gs.finish(ctx) + } + + // Name all the threads to the emitter. + for id := range g.threads { + ctx.Resource(uint64(id), fmt.Sprintf("Thread %d", id)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/v2/viewer.go b/platform/dbops/binaries/go/go/src/cmd/trace/v2/viewer.go new file mode 100644 index 0000000000000000000000000000000000000000..de67fc4e0e7e10a212234285bf0db866831cce55 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/v2/viewer.go @@ -0,0 +1,56 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "time" +) + +// viewerFrames returns the frames of the stack of ev. The given frame slice is +// used to store the frames to reduce allocations. +func viewerFrames(stk tracev2.Stack) []*trace.Frame { + var frames []*trace.Frame + stk.Frames(func(f tracev2.StackFrame) bool { + frames = append(frames, &trace.Frame{ + PC: f.PC, + Fn: f.Func, + File: f.File, + Line: int(f.Line), + }) + return true + }) + return frames +} + +func viewerGState(state tracev2.GoState, inMarkAssist bool) traceviewer.GState { + switch state { + case tracev2.GoUndetermined: + return traceviewer.GDead + case tracev2.GoNotExist: + return traceviewer.GDead + case tracev2.GoRunnable: + return traceviewer.GRunnable + case tracev2.GoRunning: + return traceviewer.GRunning + case tracev2.GoWaiting: + if inMarkAssist { + return traceviewer.GWaitingGC + } + return traceviewer.GWaiting + case tracev2.GoSyscall: + // N.B. A goroutine in a syscall is considered "executing" (state.Executing() == true). + return traceviewer.GRunning + default: + panic(fmt.Sprintf("unknown GoState: %s", state.String())) + } +} + +func viewerTime(t time.Duration) float64 { + return float64(t) / float64(time.Microsecond) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/AUTHORS b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..fd736cb1cfb437fc33c1f49905a6290238abcebd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 0000000000000000000000000000000000000000..8c8c37d2c8f59a065e52f72bdebde10d7db6bf2e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/LICENSE b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/driver/driver.go new file mode 100644 index 0000000000000000000000000000000000000000..5a8222f70ac69fde701e4810a91f65e58a5ec201 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -0,0 +1,298 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driver provides an external entry point to the pprof driver. +package driver + +import ( + "io" + "net/http" + "regexp" + "time" + + internaldriver "github.com/google/pprof/internal/driver" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// PProf acquires a profile, and symbolizes it using a profile +// manager. Then it generates a report formatted according to the +// options selected through the flags package. +func PProf(o *Options) error { + return internaldriver.PProf(o.internalOptions()) +} + +func (o *Options) internalOptions() *plugin.Options { + var obj plugin.ObjTool + if o.Obj != nil { + obj = &internalObjTool{o.Obj} + } + var sym plugin.Symbolizer + if o.Sym != nil { + sym = &internalSymbolizer{o.Sym} + } + var httpServer func(args *plugin.HTTPServerArgs) error + if o.HTTPServer != nil { + httpServer = func(args *plugin.HTTPServerArgs) error { + return o.HTTPServer(((*HTTPServerArgs)(args))) + } + } + return &plugin.Options{ + Writer: o.Writer, + Flagset: o.Flagset, + Fetch: o.Fetch, + Sym: sym, + Obj: obj, + UI: o.UI, + HTTPServer: httpServer, + HTTPTransport: o.HTTPTransport, + } +} + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs plugin.HTTPServerArgs + +// Options groups all the optional plugins into pprof. +type Options struct { + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + HTTPServer func(*HTTPServerArgs) error + HTTPTransport http.RoundTripper +} + +// Writer provides a mechanism to write data under a certain name, +// typically a filename. +type Writer interface { + Open(name string) (io.WriteCloser, error) +} + +// A FlagSet creates and parses command-line flags. +// It is similar to the standard flag.FlagSet. +type FlagSet interface { + // Bool, Int, Float64, and String define new flags, + // like the functions of the same name in package flag. + Bool(name string, def bool, usage string) *bool + Int(name string, def int, usage string) *int + Float64(name string, def float64, usage string) *float64 + String(name string, def string, usage string) *string + + // StringList is similar to String but allows multiple values for a + // single flag + StringList(name string, def string, usage string) *[]*string + + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. + ExtraUsage() string + + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + + // Parse initializes the flags with their values for this run + // and returns the non-flag command line arguments. + // If an unknown flag is encountered or there are no arguments, + // Parse should call usage and return nil. + Parse(usage func()) []string +} + +// A Fetcher reads and returns the profile named by src, using +// the specified duration and timeout. It returns the fetched +// profile and a string indicating a URL from where the profile +// was fetched, which may be different than src. +type Fetcher interface { + Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) +} + +// A Symbolizer introduces symbol information into a profile. +type Symbolizer interface { + Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error +} + +// MappingSources map each profile.Mapping to the source of the profile. +// The key is either Mapping.File or Mapping.BuildId. +type MappingSources map[string][]struct { + Source string // URL of the source the mapping was collected from + Start uint64 // delta applied to addresses from this source (to represent Merge adjustments) +} + +// An ObjTool inspects shared libraries and executable files. +type ObjTool interface { + // Open opens the named object file. If the object is a shared + // library, start/limit/offset are the addresses where it is mapped + // into memory in the address space being inspected. If the object + // is a linux kernel, relocationSymbol is the name of the symbol + // corresponding to the start address. + Open(file string, start, limit, offset uint64, relocationSymbol string) (ObjFile, error) + + // Disasm disassembles the named object file, starting at + // the start address and stopping at (before) the end address. + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) +} + +// An Inst is a single instruction in an assembly listing. +type Inst struct { + Addr uint64 // virtual address of instruction + Text string // instruction text + Function string // function name + File string // source file + Line int // source line +} + +// An ObjFile is a single object file: a shared library or executable. +type ObjFile interface { + // Name returns the underlying file name, if available. + Name() string + + // ObjAddr returns the objdump address corresponding to a runtime address. + ObjAddr(addr uint64) (uint64, error) + + // BuildID returns the GNU build ID of the file, or an empty string. + BuildID() string + + // SourceLine reports the source line information for a given + // address in the file. Due to inlining, the source line information + // is in general a list of positions representing a call stack, + // with the leaf function first. + SourceLine(addr uint64) ([]Frame, error) + + // Symbols returns a list of symbols in the object file. + // If r is not nil, Symbols restricts the list to symbols + // with names matching the regular expression. + // If addr is not zero, Symbols restricts the list to symbols + // containing that address. + Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error) + + // Close closes the file, releasing associated resources. + Close() error +} + +// A Frame describes a single line in a source file. +type Frame struct { + Func string // name of function + File string // source file name + Line int // line in file +} + +// A Sym describes a single symbol in an object file. +type Sym struct { + Name []string // names of symbol (many if symbol was dedup'ed) + File string // object file containing symbol + Start uint64 // start virtual address + End uint64 // virtual address of last byte in sym (Start+size-1) +} + +// A UI manages user interactions. +type UI interface { + // Read returns a line of text (a command) read from the user. + // prompt is printed before reading the command. + ReadLine(prompt string) (string, error) + + // Print shows a message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, Print writes to standard error. + // (Standard output is reserved for report data.) + Print(...interface{}) + + // PrintErr shows an error message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, PrintErr writes to standard error. + PrintErr(...interface{}) + + // IsTerminal returns whether the UI is known to be tied to an + // interactive terminal (as opposed to being redirected to a file). + IsTerminal() bool + + // WantBrowser indicates whether browser should be opened with the -http option. + WantBrowser() bool + + // SetAutoComplete instructs the UI to call complete(cmd) to obtain + // the auto-completion of cmd, if the UI supports auto-completion at all. + SetAutoComplete(complete func(string) string) +} + +// internalObjTool is a wrapper to map from the pprof external +// interface to the internal interface. +type internalObjTool struct { + ObjTool +} + +func (o *internalObjTool) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + f, err := o.ObjTool.Open(file, start, limit, offset, relocationSymbol) + if err != nil { + return nil, err + } + return &internalObjFile{f}, err +} + +type internalObjFile struct { + ObjFile +} + +func (f *internalObjFile) SourceLine(frame uint64) ([]plugin.Frame, error) { + frames, err := f.ObjFile.SourceLine(frame) + if err != nil { + return nil, err + } + var pluginFrames []plugin.Frame + for _, f := range frames { + pluginFrames = append(pluginFrames, plugin.Frame(f)) + } + return pluginFrames, nil +} + +func (f *internalObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + syms, err := f.ObjFile.Symbols(r, addr) + if err != nil { + return nil, err + } + var pluginSyms []*plugin.Sym + for _, s := range syms { + ps := plugin.Sym(*s) + pluginSyms = append(pluginSyms, &ps) + } + return pluginSyms, nil +} + +func (o *internalObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + insts, err := o.ObjTool.Disasm(file, start, end, intelSyntax) + if err != nil { + return nil, err + } + var pluginInst []plugin.Inst + for _, inst := range insts { + pluginInst = append(pluginInst, plugin.Inst(inst)) + } + return pluginInst, nil +} + +// internalSymbolizer is a wrapper to map from the pprof external +// interface to the internal interface. +type internalSymbolizer struct { + Symbolizer +} + +func (s *internalSymbolizer) Symbolize(mode string, srcs plugin.MappingSources, prof *profile.Profile) error { + isrcs := MappingSources{} + for m, s := range srcs { + isrcs[m] = s + } + return s.Symbolizer.Symbolize(mode, isrcs, prof) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go new file mode 100644 index 0000000000000000000000000000000000000000..c2e45c6a83e720115d39644c51364ef8776e1cba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go @@ -0,0 +1,238 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultAddr2line = "addr2line" + + // addr2line may produce multiple lines of output. We + // use this sentinel to identify the end of the output. + sentinel = ^uint64(0) +) + +// addr2Liner is a connection to an addr2line command for obtaining +// address and line number information from a binary. +type addr2Liner struct { + mu sync.Mutex + rw lineReaderWriter + base uint64 + + // nm holds an addr2Liner using nm tool. Certain versions of addr2line + // produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. As a workaround, + // the names from nm are used when they look more complete. See addrInfo() + // code below for the exact heuristic. + nm *addr2LinerNM +} + +// lineReaderWriter is an interface to abstract the I/O to an addr2line +// process. It writes a line of input to the job, and reads its output +// one line at a time. +type lineReaderWriter interface { + write(string) error + readLine() (string, error) + close() +} + +type addr2LinerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader +} + +func (a *addr2LinerJob) write(s string) error { + _, err := fmt.Fprint(a.in, s+"\n") + return err +} + +func (a *addr2LinerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the addr2liner object. +func (a *addr2LinerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newAddr2Liner starts the given addr2liner command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) { + if cmd == "" { + cmd = defaultAddr2line + } + + j := &addr2LinerJob{ + cmd: exec.Command(cmd, "-aif", "-e", file), + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &addr2Liner{ + rw: j, + base: base, + } + + return a, nil +} + +// readFrame parses the addr2line output for a single address. It +// returns a populated plugin.Frame and whether it has reached the end of the +// data. +func (d *addr2Liner) readFrame() (plugin.Frame, bool) { + funcname, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + if strings.HasPrefix(funcname, "0x") { + // If addr2line returns a hex address we can assume it is the + // sentinel. Read and ignore next two lines of output from + // addr2line + d.rw.readLine() + d.rw.readLine() + return plugin.Frame{}, true + } + + fileline, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + + linenumber := 0 + + if funcname == "??" { + funcname = "" + } + + if fileline == "??:0" { + fileline = "" + } else { + if i := strings.LastIndex(fileline, ":"); i >= 0 { + // Remove discriminator, if present + if disc := strings.Index(fileline, " (discriminator"); disc > 0 { + fileline = fileline[:disc] + } + // If we cannot parse a number after the last ":", keep it as + // part of the filename. + if line, err := strconv.Atoi(fileline[i+1:]); err == nil { + linenumber = line + fileline = fileline[:i] + } + } + } + + return plugin.Frame{ + Func: funcname, + File: fileline, + Line: linenumber}, false +} + +func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) { + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.rw.write(fmt.Sprintf("%x", addr-d.base)); err != nil { + return nil, err + } + + if err := d.rw.write(fmt.Sprintf("%x", sentinel)); err != nil { + return nil, err + } + + resp, err := d.rw.readLine() + if err != nil { + return nil, err + } + + if !strings.HasPrefix(resp, "0x") { + return nil, fmt.Errorf("unexpected addr2line output: %s", resp) + } + + var stack []plugin.Frame + for { + frame, end := d.readFrame() + if end { + break + } + + if frame != (plugin.Frame{}) { + stack = append(stack, frame) + } + } + return stack, err +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *addr2Liner) addrInfo(addr uint64) ([]plugin.Frame, error) { + stack, err := d.rawAddrInfo(addr) + if err != nil { + return nil, err + } + + // Certain versions of addr2line produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. Attempt to replace + // the name with a better one from nm. + if len(stack) > 0 && d.nm != nil { + nm, err := d.nm.addrInfo(addr) + if err == nil && len(nm) > 0 { + // Last entry in frame list should match since it is non-inlined. As a + // simple heuristic, we only switch to the nm-based name if it is longer + // by 2 or more characters. We consider nm names that are longer by 1 + // character insignificant to avoid replacing foo with _foo on MacOS (for + // unknown reasons read2line produces the former and nm produces the + // latter on MacOS even though both tools are asked to produce mangled + // names). + nmName := nm[len(nm)-1].Func + a2lName := stack[len(stack)-1].Func + if len(nmName) > len(a2lName)+1 { + stack[len(stack)-1].Func = nmName + } + } + } + + return stack, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go new file mode 100644 index 0000000000000000000000000000000000000000..491422fcda140fa887ab005380ed57610d3f39ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go @@ -0,0 +1,181 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultLLVMSymbolizer = "llvm-symbolizer" +) + +// llvmSymbolizer is a connection to an llvm-symbolizer command for +// obtaining address and line number information from a binary. +type llvmSymbolizer struct { + sync.Mutex + filename string + rw lineReaderWriter + base uint64 +} + +type llvmSymbolizerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader + // llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization. + symType string +} + +func (a *llvmSymbolizerJob) write(s string) error { + _, err := fmt.Fprintln(a.in, a.symType, s) + return err +} + +func (a *llvmSymbolizerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the llvmSymbolizer object. +func (a *llvmSymbolizerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newLLVMSymbolizer starts the given llvmSymbolizer command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) { + if cmd == "" { + cmd = defaultLLVMSymbolizer + } + + j := &llvmSymbolizerJob{ + cmd: exec.Command(cmd, "--inlining", "-demangle=false"), + symType: "CODE", + } + if isData { + j.symType = "DATA" + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &llvmSymbolizer{ + filename: file, + rw: j, + base: base, + } + + return a, nil +} + +// readFrame parses the llvm-symbolizer output for a single address. It +// returns a populated plugin.Frame and whether it has reached the end of the +// data. +func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { + funcname, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + + switch funcname { + case "": + return plugin.Frame{}, true + case "??": + funcname = "" + } + + fileline, err := d.rw.readLine() + if err != nil { + return plugin.Frame{Func: funcname}, true + } + + linenumber := 0 + // The llvm-symbolizer outputs the ::. + // When it cannot identify the source code location, it outputs "??:0:0". + // Older versions output just the filename and line number, so we check for + // both conditions here. + if fileline == "??:0" || fileline == "??:0:0" { + fileline = "" + } else { + switch split := strings.Split(fileline, ":"); len(split) { + case 1: + // filename + fileline = split[0] + case 2, 3: + // filename:line , or + // filename:line:disc , or + fileline = split[0] + if line, err := strconv.Atoi(split[1]); err == nil { + linenumber = line + } + default: + // Unrecognized, ignore + } + } + + return plugin.Frame{Func: funcname, File: fileline, Line: linenumber}, false +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *llvmSymbolizer) addrInfo(addr uint64) ([]plugin.Frame, error) { + d.Lock() + defer d.Unlock() + + if err := d.rw.write(fmt.Sprintf("%s 0x%x", d.filename, addr-d.base)); err != nil { + return nil, err + } + + var stack []plugin.Frame + for { + frame, end := d.readFrame() + if end { + break + } + + if frame != (plugin.Frame{}) { + stack = append(stack, frame) + } + } + + return stack, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go new file mode 100644 index 0000000000000000000000000000000000000000..8e0ccc728d5fde14501690555fda6374bbd07361 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go @@ -0,0 +1,144 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultNM = "nm" +) + +// addr2LinerNM is a connection to an nm command for obtaining symbol +// information from a binary. +type addr2LinerNM struct { + m []symbolInfo // Sorted list of symbol addresses from binary. +} + +type symbolInfo struct { + address uint64 + size uint64 + name string + symType string +} + +// isData returns if the symbol has a known data object symbol type. +func (s *symbolInfo) isData() bool { + // The following symbol types are taken from https://linux.die.net/man/1/nm: + // Lowercase letter means local symbol, uppercase denotes a global symbol. + // - b or B: the symbol is in the uninitialized data section, e.g. .bss; + // - d or D: the symbol is in the initialized data section; + // - r or R: the symbol is in a read only data section; + // - v or V: the symbol is a weak object; + // - W: the symbol is a weak symbol that has not been specifically tagged as a + // weak object symbol. Experiments with some binaries, showed these to be + // mostly data objects. + return strings.ContainsAny(s.symType, "bBdDrRvVW") +} + +// newAddr2LinerNM starts the given nm command reporting information about the +// given executable file. If file is a shared library, base should be the +// address at which it was mapped in the program under consideration. +func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) { + if cmd == "" { + cmd = defaultNM + } + var b bytes.Buffer + c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file) + c.Stdout = &b + if err := c.Run(); err != nil { + return nil, err + } + return parseAddr2LinerNM(base, &b) +} + +func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) { + a := &addr2LinerNM{ + m: []symbolInfo{}, + } + + // Parse nm output and populate symbol map. + // Skip lines we fail to parse. + buf := bufio.NewReader(nm) + for { + line, err := buf.ReadString('\n') + if line == "" && err != nil { + if err == io.EOF { + break + } + return nil, err + } + line = strings.TrimSpace(line) + fields := strings.Split(line, " ") + if len(fields) != 4 { + continue + } + address, err := strconv.ParseUint(fields[2], 16, 64) + if err != nil { + continue + } + size, err := strconv.ParseUint(fields[3], 16, 64) + if err != nil { + continue + } + a.m = append(a.m, symbolInfo{ + address: address + base, + size: size, + name: fields[0], + symType: fields[1], + }) + } + + return a, nil +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) { + if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) { + return nil, nil + } + + // Binary search. Search until low, high are separated by 1. + low, high := 0, len(a.m) + for low+1 < high { + mid := (low + high) / 2 + v := a.m[mid].address + if addr == v { + low = mid + break + } else if addr > v { + low = mid + } else { + high = mid + } + } + + // Address is between a.m[low] and a.m[high]. Pick low, as it represents + // [low, high). For data symbols, we use a strict check that the address is in + // the [start, start + size) range of a.m[low]. + if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) { + return nil, nil + } + return []plugin.Frame{{Func: a.m[low].name}}, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go new file mode 100644 index 0000000000000000000000000000000000000000..efa9167af7d643fb26ae6c6a1a16e6dbaa448ded --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go @@ -0,0 +1,738 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package binutils provides access to the GNU binutils. +package binutils + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/elfexec" + "github.com/google/pprof/internal/plugin" +) + +// A Binutils implements plugin.ObjTool by invoking the GNU binutils. +type Binutils struct { + mu sync.Mutex + rep *binrep +} + +var ( + objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) + + // Defined for testing + elfOpen = elf.Open +) + +// binrep is an immutable representation for Binutils. It is atomically +// replaced on every mutation to provide thread-safe access. +type binrep struct { + // Commands to invoke. + llvmSymbolizer string + llvmSymbolizerFound bool + addr2line string + addr2lineFound bool + nm string + nmFound bool + objdump string + objdumpFound bool + isLLVMObjdump bool + + // if fast, perform symbolization using nm (symbol names only), + // instead of file-line detail from the slower addr2line. + fast bool +} + +// get returns the current representation for bu, initializing it if necessary. +func (bu *Binutils) get() *binrep { + bu.mu.Lock() + r := bu.rep + if r == nil { + r = &binrep{} + initTools(r, "") + bu.rep = r + } + bu.mu.Unlock() + return r +} + +// update modifies the rep for bu via the supplied function. +func (bu *Binutils) update(fn func(r *binrep)) { + r := &binrep{} + bu.mu.Lock() + defer bu.mu.Unlock() + if bu.rep == nil { + initTools(r, "") + } else { + *r = *bu.rep + } + fn(r) + bu.rep = r +} + +// String returns string representation of the binutils state for debug logging. +func (bu *Binutils) String() string { + r := bu.get() + var llvmSymbolizer, addr2line, nm, objdump string + if r.llvmSymbolizerFound { + llvmSymbolizer = r.llvmSymbolizer + } + if r.addr2lineFound { + addr2line = r.addr2line + } + if r.nmFound { + nm = r.nm + } + if r.objdumpFound { + objdump = r.objdump + } + return fmt.Sprintf("llvm-symbolizer=%q addr2line=%q nm=%q objdump=%q fast=%t", + llvmSymbolizer, addr2line, nm, objdump, r.fast) +} + +// SetFastSymbolization sets a toggle that makes binutils use fast +// symbolization (using nm), which is much faster than addr2line but +// provides only symbol name information (no file/line). +func (bu *Binutils) SetFastSymbolization(fast bool) { + bu.update(func(r *binrep) { r.fast = fast }) +} + +// SetTools processes the contents of the tools option. It +// expects a set of entries separated by commas; each entry is a pair +// of the form t:path, where cmd will be used to look only for the +// tool named t. If t is not specified, the path is searched for all +// tools. +func (bu *Binutils) SetTools(config string) { + bu.update(func(r *binrep) { initTools(r, config) }) +} + +func initTools(b *binrep, config string) { + // paths collect paths per tool; Key "" contains the default. + paths := make(map[string][]string) + for _, t := range strings.Split(config, ",") { + name, path := "", t + if ct := strings.SplitN(t, ":", 2); len(ct) == 2 { + name, path = ct[0], ct[1] + } + paths[name] = append(paths[name], path) + } + + defaultPath := paths[""] + b.llvmSymbolizer, b.llvmSymbolizerFound = chooseExe([]string{"llvm-symbolizer"}, []string{}, append(paths["llvm-symbolizer"], defaultPath...)) + b.addr2line, b.addr2lineFound = chooseExe([]string{"addr2line"}, []string{"gaddr2line"}, append(paths["addr2line"], defaultPath...)) + // The "-n" option is supported by LLVM since 2011. The output of llvm-nm + // and GNU nm with "-n" option is interchangeable for our purposes, so we do + // not need to differrentiate them. + b.nm, b.nmFound = chooseExe([]string{"llvm-nm", "nm"}, []string{"gnm"}, append(paths["nm"], defaultPath...)) + b.objdump, b.objdumpFound, b.isLLVMObjdump = findObjdump(append(paths["objdump"], defaultPath...)) +} + +// findObjdump finds and returns path to preferred objdump binary. +// Order of preference is: llvm-objdump, objdump. +// On MacOS only, also looks for gobjdump with least preference. +// Accepts a list of paths and returns: +// a string with path to the preferred objdump binary if found, +// or an empty string if not found; +// a boolean if any acceptable objdump was found; +// a boolean indicating if it is an LLVM objdump. +func findObjdump(paths []string) (string, bool, bool) { + objdumpNames := []string{"llvm-objdump", "objdump"} + if runtime.GOOS == "darwin" { + objdumpNames = append(objdumpNames, "gobjdump") + } + + for _, objdumpName := range objdumpNames { + if objdump, objdumpFound := findExe(objdumpName, paths); objdumpFound { + cmdOut, err := exec.Command(objdump, "--version").Output() + if err != nil { + continue + } + if isLLVMObjdump(string(cmdOut)) { + return objdump, true, true + } + if isBuObjdump(string(cmdOut)) { + return objdump, true, false + } + } + } + return "", false, false +} + +// chooseExe finds and returns path to preferred binary. names is a list of +// names to search on both Linux and OSX. osxNames is a list of names specific +// to OSX. names always has a higher priority than osxNames. The order of +// the name within each list decides its priority (e.g. the first name has a +// higher priority than the second name in the list). +// +// It returns a string with path to the binary and a boolean indicating if any +// acceptable binary was found. +func chooseExe(names, osxNames []string, paths []string) (string, bool) { + if runtime.GOOS == "darwin" { + names = append(names, osxNames...) + } + for _, name := range names { + if binary, found := findExe(name, paths); found { + return binary, true + } + } + return "", false +} + +// isLLVMObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is an LLVM +// objdump binary of an acceptable version. +func isLLVMObjdump(output string) bool { + fields := objdumpLLVMVerRE.FindStringSubmatch(output) + if len(fields) != 5 { + return false + } + if fields[4] == "trunk" { + return true + } + verMajor, err := strconv.Atoi(fields[1]) + if err != nil { + return false + } + verPatch, err := strconv.Atoi(fields[3]) + if err != nil { + return false + } + if runtime.GOOS == "linux" && verMajor >= 8 { + // Ensure LLVM objdump is at least version 8.0 on Linux. + // Some flags, like --demangle, and double dashes for options are + // not supported by previous versions. + return true + } + if runtime.GOOS == "darwin" { + // Ensure LLVM objdump is at least version 10.0.1 on MacOS. + return verMajor > 10 || (verMajor == 10 && verPatch >= 1) + } + return false +} + +// isBuObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is a GNU +// binutils objdump binary. No version check is performed. +func isBuObjdump(output string) bool { + return strings.Contains(output, "GNU objdump") +} + +// findExe looks for an executable command on a set of paths. +// If it cannot find it, returns cmd. +func findExe(cmd string, paths []string) (string, bool) { + for _, p := range paths { + cp := filepath.Join(p, cmd) + if c, err := exec.LookPath(cp); err == nil { + return c, true + } + } + return cmd, false +} + +// Disasm returns the assembly instructions for the specified address range +// of a binary. +func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + b := bu.get() + if !b.objdumpFound { + return nil, errors.New("cannot disasm: no objdump tool available") + } + args := []string{"--disassemble", "--demangle", "--no-show-raw-insn", + "--line-numbers", fmt.Sprintf("--start-address=%#x", start), + fmt.Sprintf("--stop-address=%#x", end)} + + if intelSyntax { + if b.isLLVMObjdump { + args = append(args, "--x86-asm-syntax=intel") + } else { + args = append(args, "-M", "intel") + } + } + + args = append(args, file) + cmd := exec.Command(b.objdump, args...) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return disassemble(out) +} + +// Open satisfies the plugin.ObjTool interface. +func (bu *Binutils) Open(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + b := bu.get() + + // Make sure file is a supported executable. + // This uses magic numbers, mainly to provide better error messages but + // it should also help speed. + + if _, err := os.Stat(name); err != nil { + // For testing, do not require file name to exist. + if strings.Contains(b.addr2line, "testdata/") { + return &fileAddr2Line{file: file{b: b, name: name}}, nil + } + return nil, err + } + + // Read the first 4 bytes of the file. + + f, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("error opening %s: %v", name, err) + } + defer f.Close() + + var header [4]byte + if _, err = io.ReadFull(f, header[:]); err != nil { + return nil, fmt.Errorf("error reading magic number from %s: %v", name, err) + } + + elfMagic := string(header[:]) + + // Match against supported file types. + if elfMagic == elf.ELFMAG { + f, err := b.openELF(name, start, limit, offset, relocationSymbol) + if err != nil { + return nil, fmt.Errorf("error reading ELF file %s: %v", name, err) + } + return f, nil + } + + // Mach-O magic numbers can be big or little endian. + machoMagicLittle := binary.LittleEndian.Uint32(header[:]) + machoMagicBig := binary.BigEndian.Uint32(header[:]) + + if machoMagicLittle == macho.Magic32 || machoMagicLittle == macho.Magic64 || + machoMagicBig == macho.Magic32 || machoMagicBig == macho.Magic64 { + f, err := b.openMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading Mach-O file %s: %v", name, err) + } + return f, nil + } + if machoMagicLittle == macho.MagicFat || machoMagicBig == macho.MagicFat { + f, err := b.openFatMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading fat Mach-O file %s: %v", name, err) + } + return f, nil + } + + peMagic := string(header[:2]) + if peMagic == "MZ" { + f, err := b.openPE(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading PE file %s: %v", name, err) + } + return f, nil + } + + return nil, fmt.Errorf("unrecognized binary format: %s", name) +} + +func (b *binrep) openMachOCommon(name string, of *macho.File, start, limit, offset uint64) (plugin.ObjFile, error) { + + // Subtract the load address of the __TEXT section. Usually 0 for shared + // libraries or 0x100000000 for executables. You can check this value by + // running `objdump -private-headers `. + + textSegment := of.Segment("__TEXT") + if textSegment == nil { + return nil, fmt.Errorf("could not identify base for %s: no __TEXT segment", name) + } + if textSegment.Addr > start { + return nil, fmt.Errorf("could not identify base for %s: __TEXT segment address (0x%x) > mapping start address (0x%x)", + name, textSegment.Addr, start) + } + + base := start - textSegment.Addr + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +func (b *binrep) openFatMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.OpenFat(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + if len(of.Arches) == 0 { + return nil, fmt.Errorf("empty fat Mach-O file: %s", name) + } + + var arch macho.Cpu + // Use the host architecture. + // TODO: This is not ideal because the host architecture may not be the one + // that was profiled. E.g. an amd64 host can profile a 386 program. + switch runtime.GOARCH { + case "386": + arch = macho.Cpu386 + case "amd64", "amd64p32": + arch = macho.CpuAmd64 + case "arm", "armbe", "arm64", "arm64be": + arch = macho.CpuArm + case "ppc": + arch = macho.CpuPpc + case "ppc64", "ppc64le": + arch = macho.CpuPpc64 + default: + return nil, fmt.Errorf("unsupported host architecture for %s: %s", name, runtime.GOARCH) + } + for i := range of.Arches { + if of.Arches[i].Cpu == arch { + return b.openMachOCommon(name, of.Arches[i].File, start, limit, offset) + } + } + return nil, fmt.Errorf("architecture not found in %s: %s", name, runtime.GOARCH) +} + +func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + return b.openMachOCommon(name, of, start, limit, offset) +} + +func (b *binrep) openELF(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + ef, err := elfOpen(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer ef.Close() + + buildID := "" + if f, err := os.Open(name); err == nil { + if id, err := elfexec.GetBuildID(f); err == nil { + buildID = fmt.Sprintf("%x", id) + } + } + + var ( + kernelOffset *uint64 + pageAligned = func(addr uint64) bool { return addr%4096 == 0 } + ) + if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) { + // Reading all Symbols is expensive, and we only rarely need it so + // we don't want to do it every time. But if _stext happens to be + // page-aligned but isn't the same as Vaddr, we would symbolize + // wrong. So if the name the addresses aren't page aligned, or if + // the name is "vmlinux" we read _stext. We can be wrong if: (1) + // someone passes a kernel path that doesn't contain "vmlinux" AND + // (2) _stext is page-aligned AND (3) _stext is not at Vaddr + symbols, err := ef.Symbols() + if err != nil && err != elf.ErrNoSymbols { + return nil, err + } + + // The kernel relocation symbol (the mapping start address) can be either + // _text or _stext. When profiles are generated by `perf`, which one was used is + // distinguished by the mapping name for the kernel image: + // '[kernel.kallsyms]_text' or '[kernel.kallsyms]_stext', respectively. If we haven't + // been able to parse it from the mapping, we default to _stext. + if relocationSymbol == "" { + relocationSymbol = "_stext" + } + for _, s := range symbols { + if s.Name == relocationSymbol { + kernelOffset = &s.Value + break + } + } + } + + // Check that we can compute a base for the binary. This may not be the + // correct base value, so we don't save it. We delay computing the actual base + // value until we have a sample address for this mapping, so that we can + // correctly identify the associated program segment that is needed to compute + // the base. + if _, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), kernelOffset, start, limit, offset); err != nil { + return nil, fmt.Errorf("could not identify base for %s: %v", name, err) + } + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil + } + return &fileAddr2Line{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil +} + +func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + pf, err := pe.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer pf.Close() + + var imageBase uint64 + switch h := pf.OptionalHeader.(type) { + case *pe.OptionalHeader32: + imageBase = uint64(h.ImageBase) + case *pe.OptionalHeader64: + imageBase = uint64(h.ImageBase) + default: + return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader) + } + + var base uint64 + if start > 0 { + base = start - imageBase + } + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +// elfMapping stores the parameters of a runtime mapping that are needed to +// identify the ELF segment associated with a mapping. +type elfMapping struct { + // Runtime mapping parameters. + start, limit, offset uint64 + // Offset of kernel relocation symbol. Only defined for kernel images, nil otherwise. + kernelOffset *uint64 +} + +// findProgramHeader returns the program segment that matches the current +// mapping and the given address, or an error if it cannot find a unique program +// header. +func (m *elfMapping) findProgramHeader(ef *elf.File, addr uint64) (*elf.ProgHeader, error) { + // For user space executables, we try to find the actual program segment that + // is associated with the given mapping. Skip this search if limit <= start. + // We cannot use just a check on the start address of the mapping to tell if + // it's a kernel / .ko module mapping, because with quipper address remapping + // enabled, the address would be in the lower half of the address space. + + if m.kernelOffset != nil || m.start >= m.limit || m.limit >= (uint64(1)<<63) { + // For the kernel, find the program segment that includes the .text section. + return elfexec.FindTextProgHeader(ef), nil + } + + // Fetch all the loadable segments. + var phdrs []elf.ProgHeader + for i := range ef.Progs { + if ef.Progs[i].Type == elf.PT_LOAD { + phdrs = append(phdrs, ef.Progs[i].ProgHeader) + } + } + // Some ELF files don't contain any loadable program segments, e.g. .ko + // kernel modules. It's not an error to have no header in such cases. + if len(phdrs) == 0 { + return nil, nil + } + // Get all program headers associated with the mapping. + headers := elfexec.ProgramHeadersForMapping(phdrs, m.offset, m.limit-m.start) + if len(headers) == 0 { + return nil, errors.New("no program header matches mapping info") + } + if len(headers) == 1 { + return headers[0], nil + } + + // Use the file offset corresponding to the address to symbolize, to narrow + // down the header. + return elfexec.HeaderForFileOffset(headers, addr-m.start+m.offset) +} + +// file implements the binutils.ObjFile interface. +type file struct { + b *binrep + name string + buildID string + + baseOnce sync.Once // Ensures the base, baseErr and isData are computed once. + base uint64 + baseErr error // Any eventual error while computing the base. + isData bool + // Mapping information. Relevant only for ELF files, nil otherwise. + m *elfMapping +} + +// computeBase computes the relocation base for the given binary file only if +// the elfMapping field is set. It populates the base and isData fields and +// returns an error. +func (f *file) computeBase(addr uint64) error { + if f == nil || f.m == nil { + return nil + } + if addr < f.m.start || addr >= f.m.limit { + return fmt.Errorf("specified address %x is outside the mapping range [%x, %x] for file %q", addr, f.m.start, f.m.limit, f.name) + } + ef, err := elfOpen(f.name) + if err != nil { + return fmt.Errorf("error parsing %s: %v", f.name, err) + } + defer ef.Close() + + ph, err := f.m.findProgramHeader(ef, addr) + if err != nil { + return fmt.Errorf("failed to find program header for file %q, ELF mapping %#v, address %x: %v", f.name, *f.m, addr, err) + } + + base, err := elfexec.GetBase(&ef.FileHeader, ph, f.m.kernelOffset, f.m.start, f.m.limit, f.m.offset) + if err != nil { + return err + } + f.base = base + f.isData = ph != nil && ph.Flags&elf.PF_X == 0 + return nil +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) ObjAddr(addr uint64) (uint64, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return 0, f.baseErr + } + return addr - f.base, nil +} + +func (f *file) BuildID() string { + return f.buildID +} + +func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + return nil, nil +} + +func (f *file) Close() error { + return nil +} + +func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + // Get from nm a list of symbols sorted by address. + cmd := exec.Command(f.b.nm, "-n", f.name) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return findSymbols(out, f.name, r, addr) +} + +// fileNM implements the binutils.ObjFile interface, using 'nm' to map +// addresses to symbols (without file/line number information). It is +// faster than fileAddr2Line. +type fileNM struct { + file + addr2linernm *addr2LinerNM +} + +func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + if f.addr2linernm == nil { + addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base) + if err != nil { + return nil, err + } + f.addr2linernm = addr2liner + } + return f.addr2linernm.addrInfo(addr) +} + +// fileAddr2Line implements the binutils.ObjFile interface, using +// llvm-symbolizer, if that's available, or addr2line to map addresses to +// symbols (with file/line number information). It can be slow for large +// binaries with debug information. +type fileAddr2Line struct { + once sync.Once + file + addr2liner *addr2Liner + llvmSymbolizer *llvmSymbolizer + isData bool +} + +func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + f.once.Do(f.init) + if f.llvmSymbolizer != nil { + return f.llvmSymbolizer.addrInfo(addr) + } + if f.addr2liner != nil { + return f.addr2liner.addrInfo(addr) + } + return nil, fmt.Errorf("could not find local addr2liner") +} + +func (f *fileAddr2Line) init() { + if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil { + f.llvmSymbolizer = llvmSymbolizer + return + } + + if addr2liner, err := newAddr2Liner(f.b.addr2line, f.name, f.base); err == nil { + f.addr2liner = addr2liner + + // When addr2line encounters some gcc compiled binaries, it + // drops interesting parts of names in anonymous namespaces. + // Fallback to NM for better function names. + if nm, err := newAddr2LinerNM(f.b.nm, f.name, f.base); err == nil { + f.addr2liner.nm = nm + } + } +} + +func (f *fileAddr2Line) Close() error { + if f.llvmSymbolizer != nil { + f.llvmSymbolizer.rw.close() + f.llvmSymbolizer = nil + } + if f.addr2liner != nil { + f.addr2liner.rw.close() + f.addr2liner = nil + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go new file mode 100644 index 0000000000000000000000000000000000000000..2709ef877c3c4549b5fc0e83e346ed6d40e5f290 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go @@ -0,0 +1,180 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bytes" + "io" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/ianlancetaylor/demangle" +) + +var ( + nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`) + objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`) + objdumpOutputFileLine = regexp.MustCompile(`^;?\s?(.*):([0-9]+)`) + objdumpOutputFunction = regexp.MustCompile(`^;?\s?(\S.*)\(\):`) + objdumpOutputFunctionLLVM = regexp.MustCompile(`^([[:xdigit:]]+)?\s?(.*):`) +) + +func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) { + // Collect all symbols from the nm output, grouping names mapped to + // the same address into a single symbol. + + // The symbols to return. + var symbols []*plugin.Sym + + // The current group of symbol names, and the address they are all at. + names, start := []string{}, uint64(0) + + buf := bytes.NewBuffer(syms) + + for { + symAddr, name, err := nextSymbol(buf) + if err == io.EOF { + // Done. If there was an unfinished group, append it. + if len(names) != 0 { + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + } + + // And return the symbols. + return symbols, nil + } + + if err != nil { + // There was some kind of serious error reading nm's output. + return nil, err + } + + // If this symbol is at the same address as the current group, add it to the group. + if symAddr == start { + names = append(names, name) + continue + } + + // Otherwise append the current group to the list of symbols. + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + + // And start a new group. + names, start = []string{name}, symAddr + } +} + +// matchSymbol checks if a symbol is to be selected by checking its +// name to the regexp and optionally its address. It returns the name(s) +// to be used for the matched symbol, or nil if no match +func matchSymbol(names []string, start, end uint64, r *regexp.Regexp, address uint64) []string { + if address != 0 && address >= start && address <= end { + return names + } + for _, name := range names { + if r == nil || r.MatchString(name) { + return []string{name} + } + + // Match all possible demangled versions of the name. + for _, o := range [][]demangle.Option{ + {demangle.NoClones}, + {demangle.NoParams, demangle.NoEnclosingParams}, + {demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams}, + } { + if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) { + return []string{demangled} + } + } + } + return nil +} + +// disassemble parses the output of the objdump command and returns +// the assembly instructions in a slice. +func disassemble(asm []byte) ([]plugin.Inst, error) { + buf := bytes.NewBuffer(asm) + function, file, line := "", "", 0 + var assembly []plugin.Inst + for { + input, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, err + } + if input == "" { + break + } + } + input = strings.TrimSpace(input) + + if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + assembly = append(assembly, + plugin.Inst{ + Addr: address, + Text: fields[2], + Function: function, + File: file, + Line: line, + }) + continue + } + } + if fields := objdumpOutputFileLine.FindStringSubmatch(input); len(fields) == 3 { + if l, err := strconv.ParseUint(fields[2], 10, 32); err == nil { + file, line = fields[1], int(l) + } + continue + } + if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 { + function = fields[1] + continue + } else { + if fields := objdumpOutputFunctionLLVM.FindStringSubmatch(input); len(fields) == 3 { + function = fields[2] + continue + } + } + // Reset on unrecognized lines. + function, file, line = "", "", 0 + } + + return assembly, nil +} + +// nextSymbol parses the nm output to find the next symbol listed. +// Skips over any output it cannot recognize. +func nextSymbol(buf *bytes.Buffer) (uint64, string, error) { + for { + line, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF || line == "" { + return 0, "", err + } + } + line = strings.TrimSpace(line) + + if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + return address, fields[3], nil + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go new file mode 100644 index 0000000000000000000000000000000000000000..b97ef8516934ae157ccdd068fa85e316ddd40b62 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go @@ -0,0 +1,360 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "errors" + "fmt" + "os" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" +) + +type source struct { + Sources []string + ExecName string + BuildID string + Base []string + DiffBase bool + Normalize bool + + Seconds int + Timeout int + Symbolize string + HTTPHostport string + HTTPDisableBrowser bool + Comment string +} + +// parseFlags parses the command lines through the specified flags package +// and returns the source of the profile and optionally the command +// for the kind of report to generate (nil for interactive use). +func parseFlags(o *plugin.Options) (*source, []string, error) { + flag := o.Flagset + // Comparisons. + flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") + flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") + // Source options. + flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") + flagBuildID := flag.String("buildid", "", "Override build id for first mapping") + flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile") + flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile") + // CPU profile options + flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles") + // Heap profile options + flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size") + flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts") + flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size") + flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts") + // Contention profile options + flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region") + flagContentions := flag.Bool("contentions", false, "Display number of delays at each region") + flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region") + flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames") + + flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") + flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browser for the interactive web UI") + + // Flags that set configuration properties. + cfg := currentConfig() + configFlagSetter := installConfigFlags(flag, &cfg) + + flagCommands := make(map[string]*bool) + flagParamCommands := make(map[string]*string) + for name, cmd := range pprofCommands { + if cmd.hasParam { + flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp") + } else { + flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format") + } + } + + args := flag.Parse(func() { + o.UI.Print(usageMsgHdr + + usage(true) + + usageMsgSrc + + flag.ExtraUsage() + + usageMsgVars) + }) + if len(args) == 0 { + return nil, nil, errors.New("no profile source specified") + } + + var execName string + // Recognize first argument as an executable or buildid override. + if len(args) > 1 { + arg0 := args[0] + if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil { + file.Close() + execName = arg0 + args = args[1:] + } + } + + // Apply any specified flags to cfg. + if err := configFlagSetter(); err != nil { + return nil, nil, err + } + + cmd, err := outputFormat(flagCommands, flagParamCommands) + if err != nil { + return nil, nil, err + } + if cmd != nil && *flagHTTP != "" { + return nil, nil, errors.New("-http is not compatible with an output format on the command line") + } + + if *flagNoBrowser && *flagHTTP == "" { + return nil, nil, errors.New("-no_browser only makes sense with -http") + } + + si := cfg.SampleIndex + si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) + si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) + si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) + si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI) + si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) + si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) + si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) + cfg.SampleIndex = si + + if *flagMeanDelay { + cfg.Mean = true + } + + source := &source{ + Sources: args, + ExecName: execName, + BuildID: *flagBuildID, + Seconds: *flagSeconds, + Timeout: *flagTimeout, + Symbolize: *flagSymbolize, + HTTPHostport: *flagHTTP, + HTTPDisableBrowser: *flagNoBrowser, + Comment: *flagAddComment, + } + + if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil { + return nil, nil, err + } + + normalize := cfg.Normalize + if normalize && len(source.Base) == 0 { + return nil, nil, errors.New("must have base profile to normalize by") + } + source.Normalize = normalize + + if bu, ok := o.Obj.(*binutils.Binutils); ok { + bu.SetTools(*flagTools) + } + + setCurrentConfig(cfg) + return source, cmd, nil +} + +// addBaseProfiles adds the list of base profiles or diff base profiles to +// the source. This function will return an error if both base and diff base +// profiles are specified. +func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error { + base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase) + if len(base) > 0 && len(diffBase) > 0 { + return errors.New("-base and -diff_base flags cannot both be specified") + } + + source.Base = base + if len(diffBase) > 0 { + source.Base, source.DiffBase = diffBase, true + } + return nil +} + +// dropEmpty list takes a slice of string pointers, and outputs a slice of +// non-empty strings associated with the flag. +func dropEmpty(list []*string) []string { + var l []string + for _, s := range list { + if *s != "" { + l = append(l, *s) + } + } + return l +} + +// installConfigFlags creates command line flags for configuration +// fields and returns a function which can be called after flags have +// been parsed to copy any flags specified on the command line to +// *cfg. +func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { + // List of functions for setting the different parts of a config. + var setters []func() + var err error // Holds any errors encountered while running setters. + + for _, field := range configFields { + n := field.name + help := configHelp[n] + var setter func() + switch ptr := cfg.fieldPtr(field).(type) { + case *bool: + f := flag.Bool(n, *ptr, help) + setter = func() { *ptr = *f } + case *int: + f := flag.Int(n, *ptr, help) + setter = func() { *ptr = *f } + case *float64: + f := flag.Float64(n, *ptr, help) + setter = func() { *ptr = *f } + case *string: + if len(field.choices) == 0 { + f := flag.String(n, *ptr, help) + setter = func() { *ptr = *f } + } else { + // Make a separate flag per possible choice. + // Set all flags to initially false so we can + // identify conflicts. + bools := make(map[string]*bool) + for _, choice := range field.choices { + bools[choice] = flag.Bool(choice, false, configHelp[choice]) + } + setter = func() { + var set []string + for k, v := range bools { + if *v { + set = append(set, k) + } + } + switch len(set) { + case 0: + // Leave as default value. + case 1: + *ptr = set[0] + default: + err = fmt.Errorf("conflicting options set: %v", set) + } + } + } + } + setters = append(setters, setter) + } + + return func() error { + // Apply the setter for every flag. + for _, setter := range setters { + setter() + if err != nil { + return err + } + } + return nil + } +} + +func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string { + if *flag { + if si == "" { + return sampleType + } + ui.PrintErr("Multiple value selections, ignoring ", option) + } + return si +} + +func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) { + for n, b := range bcmd { + if *b { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n} + } + } + for n, s := range acmd { + if *s != "" { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n, *s} + } + } + return cmd, nil +} + +var usageMsgHdr = `usage: + +Produce output in the specified format. + + pprof [options] [binary] ... + +Omit the format to get an interactive shell whose commands can be used +to generate various views of a profile + + pprof [options] [binary] ... + +Omit the format and provide the "-http" flag to get an interactive web +interface at the specified host:port that can be used to navigate through +various views of a profile. + + pprof -http [host]:[port] [options] [binary] ... + +Details: +` + +var usageMsgSrc = "\n\n" + + " Source options:\n" + + " -seconds Duration for time-based profile collection\n" + + " -timeout Timeout in seconds for profile collection\n" + + " -buildid Override build id for main binary\n" + + " -add_comment Free-form annotation to add to the profile\n" + + " Displayed on some reports or with pprof -comments\n" + + " -diff_base source Source of base profile for comparison\n" + + " -base source Source of base profile for profile subtraction\n" + + " profile.pb.gz Profile in compressed protobuf format\n" + + " legacy_profile Profile in legacy pprof format\n" + + " http://host/profile URL for profile handler to retrieve\n" + + " -symbolize= Controls source of symbol information\n" + + " none Do not attempt symbolization\n" + + " local Examine only local binaries\n" + + " fastlocal Only get function names from local binaries\n" + + " remote Do not examine local binaries\n" + + " force Force re-symbolization\n" + + " Binary Local path or build id of binary for symbolization\n" + +var usageMsgVars = "\n\n" + + " Misc options:\n" + + " -http Provide web interface at host:port.\n" + + " Host is optional and 'localhost' by default.\n" + + " Port is optional and a randomly available port by default.\n" + + " -no_browser Skip opening a browser for the interactive web UI.\n" + + " -tools Search path for object tools\n" + + "\n" + + " Legacy convenience options:\n" + + " -inuse_space Same as -sample_index=inuse_space\n" + + " -inuse_objects Same as -sample_index=inuse_objects\n" + + " -alloc_space Same as -sample_index=alloc_space\n" + + " -alloc_objects Same as -sample_index=alloc_objects\n" + + " -total_delay Same as -sample_index=delay\n" + + " -contentions Same as -sample_index=contentions\n" + + " -mean_delay Same as -mean -sample_index=delay\n" + + "\n" + + " Environment Variables:\n" + + " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" + + " PPROF_TOOLS Search path for object-level tools\n" + + " PPROF_BINARY_PATH Search path for local binary files\n" + + " default: $HOME/pprof/binaries\n" + + " searches $buildid/$name, $buildid/*, $path/$buildid,\n" + + " ${buildid:0:2}/${buildid:2}.debug, $name, $path,\n" + + " ${name}.debug, $dir/.debug/${name}.debug,\n" + + " usr/lib/debug/$dir/${name}.debug\n" + + " * On Windows, %USERPROFILE% is used instead of $HOME" diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go new file mode 100644 index 0000000000000000000000000000000000000000..c9edf10bb43787815aa1b58fa8f2b65bfefab1b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go @@ -0,0 +1,459 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "sort" + "strings" + "time" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" +) + +// commands describes the commands accepted by pprof. +type commands map[string]*command + +// command describes the actions for a pprof command. Includes a +// function for command-line completion, the report format to use +// during report generation, any postprocessing functions, and whether +// the command expects a regexp parameter (typically a function name). +type command struct { + format int // report format to generate + postProcess PostProcessor // postprocessing to run on report + visualizer PostProcessor // display output using some callback + hasParam bool // collect a parameter from the CLI + description string // single-line description text saying what the command does + usage string // multi-line help text saying how the command is used +} + +// help returns a help string for a command. +func (c *command) help(name string) string { + message := c.description + "\n" + if c.usage != "" { + message += " Usage:\n" + lines := strings.Split(c.usage, "\n") + for _, line := range lines { + message += fmt.Sprintf(" %s\n", line) + } + } + return message + "\n" +} + +// AddCommand adds an additional command to the set of commands +// accepted by pprof. This enables extensions to add new commands for +// specialized visualization formats. If the command specified already +// exists, it is overwritten. +func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) { + pprofCommands[cmd] = &command{format, post, nil, false, desc, usage} +} + +// SetVariableDefault sets the default value for a pprof +// variable. This enables extensions to set their own defaults. +func SetVariableDefault(variable, value string) { + configure(variable, value) +} + +// PostProcessor is a function that applies post-processing to the report output +type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error + +// interactiveMode is true if pprof is running on interactive mode, reading +// commands from its shell. +var interactiveMode = false + +// pprofCommands are the report generation commands recognized by pprof. +var pprofCommands = commands{ + // Commands that require no post-processing. + "comments": {report.Comments, nil, nil, false, "Output all profile comments", ""}, + "disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)}, + "dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)}, + "list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)}, + "peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."}, + "raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""}, + "tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."}, + "text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)}, + "top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)}, + "traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""}, + "tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)}, + + // Save binary formats to a file + "callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)}, + "proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""}, + "topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""}, + + // Generate report in DOT format and postprocess with dot + "gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)}, + "pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)}, + "png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)}, + "ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)}, + + // Save SVG output into a file + "svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)}, + + // Visualize postprocessed dot output + "eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)}, + "evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)}, + "gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)}, + "web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)}, + + // Visualize callgrind output + "kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)}, + + // Visualize HTML directly generated by report. + "weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)}, +} + +// configHelp contains help text per configuration parameter. +var configHelp = map[string]string{ + // Filename for file-based output formats, stdout by default. + "output": helpText("Output filename for file-based outputs"), + + // Comparisons. + "drop_negative": helpText( + "Ignore negative differences", + "Do not show any locations with values <0."), + + // Graph handling options. + "call_tree": helpText( + "Create a context-sensitive call tree", + "Treat locations reached through different paths as separate."), + + // Display options. + "relative_percentages": helpText( + "Show percentages relative to focused subgraph", + "If unset, percentages are relative to full graph before focusing", + "to facilitate comparison with original graph."), + "unit": helpText( + "Measurement units to display", + "Scale the sample values to this unit.", + "For time-based profiles, use seconds, milliseconds, nanoseconds, etc.", + "For memory profiles, use megabytes, kilobytes, bytes, etc.", + "Using auto will scale each value independently to the most natural unit."), + "compact_labels": "Show minimal headers", + "source_path": "Search path for source files", + "trim_path": "Path to trim from source paths before search", + "intel_syntax": helpText( + "Show assembly in Intel syntax", + "Only applicable to commands `disasm` and `weblist`"), + + // Filtering options + "nodecount": helpText( + "Max number of nodes to show", + "Uses heuristics to limit the number of locations to be displayed.", + "On graphs, dotted edges represent paths through nodes that have been removed."), + "nodefraction": "Hide nodes below *total", + "edgefraction": "Hide edges below *total", + "trim": helpText( + "Honor nodefraction/edgefraction/nodecount defaults", + "Set to false to get the full profile, without any trimming."), + "focus": helpText( + "Restricts to samples going through a node matching regexp", + "Discard samples that do not include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "ignore": helpText( + "Skips paths going through any nodes matching regexp", + "If set, discard samples that include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "prune_from": helpText( + "Drops any functions below the matched frame.", + "If set, any frames matching the specified regexp and any frames", + "below it will be dropped from each sample."), + "hide": helpText( + "Skips nodes matching regexp", + "Discard nodes that match this location.", + "Other nodes from samples that include this location will be shown.", + "Matching includes the function name, filename or object name."), + "show": helpText( + "Only show nodes matching regexp", + "If set, only show nodes that match this location.", + "Matching includes the function name, filename or object name."), + "show_from": helpText( + "Drops functions above the highest matched frame.", + "If set, all frames above the highest match are dropped from every sample.", + "Matching includes the function name, filename or object name."), + "tagroot": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack root.", + "A comma-separated list of label keys.", + "The first key creates frames at the new root."), + "tagleaf": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack leaf.", + "A comma-separated list of label keys.", + "The last key creates frames at the new leaf."), + "tagfocus": helpText( + "Restricts to samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagignore": helpText( + "Discard samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagshow": helpText( + "Only consider tags matching this regexp", + "Discard tags that do not match this regexp"), + "taghide": helpText( + "Skip tags matching this regexp", + "Discard tags that match this regexp"), + // Heap profile options + "divide_by": helpText( + "Ratio to divide all samples before visualization", + "Divide all samples values by a constant, eg the number of processors or jobs."), + "mean": helpText( + "Average sample value over first value (count)", + "For memory profiles, report average memory per allocation.", + "For time-based profiles, report average time per event."), + "sample_index": helpText( + "Sample value to report (0-based index or name)", + "Profiles contain multiple values per sample.", + "Use sample_index=i to select the ith value (starting at 0)."), + "normalize": helpText( + "Scales profile based on the base profile."), + + // Data sorting criteria + "flat": helpText("Sort entries based on own weight"), + "cum": helpText("Sort entries based on cumulative weight"), + + // Output granularity + "functions": helpText( + "Aggregate at the function level.", + "Ignores the filename where the function was defined."), + "filefunctions": helpText( + "Aggregate at the function level.", + "Takes into account the filename where the function was defined."), + "files": "Aggregate at the file level.", + "lines": "Aggregate at the source code line level.", + "addresses": helpText( + "Aggregate at the address level.", + "Includes functions' addresses in the output."), + "noinlines": helpText( + "Ignore inlines.", + "Attributes inlined functions to their first out-of-line caller."), +} + +func helpText(s ...string) string { + return strings.Join(s, "\n") + "\n" +} + +// usage returns a string describing the pprof commands and configuration +// options. if commandLine is set, the output reflect cli usage. +func usage(commandLine bool) string { + var prefix string + if commandLine { + prefix = "-" + } + fmtHelp := func(c, d string) string { + return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0]) + } + + var commands []string + for name, cmd := range pprofCommands { + commands = append(commands, fmtHelp(prefix+name, cmd.description)) + } + sort.Strings(commands) + + var help string + if commandLine { + help = " Output formats (select at most one):\n" + } else { + help = " Commands:\n" + commands = append(commands, fmtHelp("o/options", "List options and their current values")) + commands = append(commands, fmtHelp("q/quit/exit/^D", "Exit pprof")) + } + + help = help + strings.Join(commands, "\n") + "\n\n" + + " Options:\n" + + // Print help for configuration options after sorting them. + // Collect choices for multi-choice options print them together. + var variables []string + var radioStrings []string + for _, f := range configFields { + if len(f.choices) == 0 { + variables = append(variables, fmtHelp(prefix+f.name, configHelp[f.name])) + continue + } + // Format help for for this group. + s := []string{fmtHelp(f.name, "")} + for _, choice := range f.choices { + s = append(s, " "+fmtHelp(prefix+choice, configHelp[choice])) + } + radioStrings = append(radioStrings, strings.Join(s, "\n")) + } + sort.Strings(variables) + sort.Strings(radioStrings) + return help + strings.Join(variables, "\n") + "\n\n" + + " Option groups (only set one per group):\n" + + strings.Join(radioStrings, "\n") +} + +func reportHelp(c string, cum, redirect bool) string { + h := []string{ + c + " [n] [focus_regex]* [-ignore_regex]*", + "Include up to n samples", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if cum { + h[0] += " [-cum]" + h = append(h, "-cum sorts the output by cumulative weight") + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +func listHelp(c string, redirect bool) string { + h := []string{ + c + " [-focus_regex]* [-ignore_regex]*", + "Include functions matching func_regex, or including the address specified.", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +// browsers returns a list of commands to attempt for web visualization. +func browsers() []string { + var cmds []string + if userBrowser := os.Getenv("BROWSER"); userBrowser != "" { + cmds = append(cmds, userBrowser) + } + switch runtime.GOOS { + case "darwin": + cmds = append(cmds, "/usr/bin/open") + case "windows": + cmds = append(cmds, "cmd /c start") + default: + // Commands opening browsers are prioritized over xdg-open, so browser() + // command can be used on linux to open the .svg file generated by the -web + // command (the .svg file includes embedded javascript so is best viewed in + // a browser). + cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox", "sensible-browser"}...) + if os.Getenv("DISPLAY") != "" { + // xdg-open is only for use in a desktop environment. + cmds = append(cmds, "xdg-open") + } + } + return cmds +} + +var kcachegrind = []string{"kcachegrind"} + +// awayFromTTY saves the output in a file if it would otherwise go to +// the terminal screen. This is used to avoid dumping binary data on +// the screen. +func awayFromTTY(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + if output == os.Stdout && (ui.IsTerminal() || interactiveMode) { + tempFile, err := newTempFile("", "profile", "."+format) + if err != nil { + return err + } + ui.PrintErr("Generating report in ", tempFile.Name()) + output = tempFile + } + _, err := io.Copy(output, input) + return err + } +} + +func invokeDot(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + cmd := exec.Command("dot", "-T"+format) + cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to execute dot. Is Graphviz installed? Error: %v", err) + } + return nil + } +} + +// massageDotSVG invokes the dot tool to generate an SVG image and alters +// the image to have panning capabilities when viewed in a browser. +func massageDotSVG() PostProcessor { + generateSVG := invokeDot("svg") + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + baseSVG := new(bytes.Buffer) + if err := generateSVG(input, baseSVG, ui); err != nil { + return err + } + _, err := output.Write([]byte(massageSVG(baseSVG.String()))) + return err + } +} + +func invokeVisualizer(suffix string, visualizers []string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix) + if err != nil { + return err + } + deferDeleteTempFile(tempFile.Name()) + if _, err := io.Copy(tempFile, input); err != nil { + return err + } + tempFile.Close() + // Try visualizers until one is successful + for _, v := range visualizers { + // Separate command and arguments for exec.Command. + args := strings.Split(v, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...) + viewer.Stderr = os.Stderr + if err = viewer.Start(); err == nil { + // Wait for a second so that the visualizer has a chance to + // open the input file. This needs to be done even if we're + // waiting for the visualizer as it can be just a wrapper that + // spawns a browser tab and returns right away. + defer func(t <-chan time.Time) { + <-t + }(time.After(time.Second)) + // On interactive mode, let the visualizer run in the background + // so other commands can be issued. + if !interactiveMode { + return viewer.Wait() + } + return nil + } + } + return err + } +} + +// stringToBool is a custom parser for bools. We avoid using strconv.ParseBool +// to remain compatible with old pprof behavior (e.g., treating "" as true). +func stringToBool(s string) (bool, error) { + switch strings.ToLower(s) { + case "true", "t", "yes", "y", "1", "": + return true, nil + case "false", "f", "no", "n", "0": + return false, nil + default: + return false, fmt.Errorf(`illegal value "%s" for bool variable`, s) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go new file mode 100644 index 0000000000000000000000000000000000000000..9fcdd459b271fa89499815d20e77257aeb35faa4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go @@ -0,0 +1,371 @@ +package driver + +import ( + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "sync" +) + +// config holds settings for a single named config. +// The JSON tag name for a field is used both for JSON encoding and as +// a named variable. +type config struct { + // Filename for file-based output formats, stdout by default. + Output string `json:"-"` + + // Display options. + CallTree bool `json:"call_tree,omitempty"` + RelativePercentages bool `json:"relative_percentages,omitempty"` + Unit string `json:"unit,omitempty"` + CompactLabels bool `json:"compact_labels,omitempty"` + SourcePath string `json:"-"` + TrimPath string `json:"-"` + IntelSyntax bool `json:"intel_syntax,omitempty"` + Mean bool `json:"mean,omitempty"` + SampleIndex string `json:"-"` + DivideBy float64 `json:"-"` + Normalize bool `json:"normalize,omitempty"` + Sort string `json:"sort,omitempty"` + + // Label pseudo stack frame generation options + TagRoot string `json:"tagroot,omitempty"` + TagLeaf string `json:"tagleaf,omitempty"` + + // Filtering options + DropNegative bool `json:"drop_negative,omitempty"` + NodeCount int `json:"nodecount,omitempty"` + NodeFraction float64 `json:"nodefraction,omitempty"` + EdgeFraction float64 `json:"edgefraction,omitempty"` + Trim bool `json:"trim,omitempty"` + Focus string `json:"focus,omitempty"` + Ignore string `json:"ignore,omitempty"` + PruneFrom string `json:"prune_from,omitempty"` + Hide string `json:"hide,omitempty"` + Show string `json:"show,omitempty"` + ShowFrom string `json:"show_from,omitempty"` + TagFocus string `json:"tagfocus,omitempty"` + TagIgnore string `json:"tagignore,omitempty"` + TagShow string `json:"tagshow,omitempty"` + TagHide string `json:"taghide,omitempty"` + NoInlines bool `json:"noinlines,omitempty"` + + // Output granularity + Granularity string `json:"granularity,omitempty"` +} + +// defaultConfig returns the default configuration values; it is unaffected by +// flags and interactive assignments. +func defaultConfig() config { + return config{ + Unit: "minimum", + NodeCount: -1, + NodeFraction: 0.005, + EdgeFraction: 0.001, + Trim: true, + DivideBy: 1.0, + Sort: "flat", + Granularity: "functions", + } +} + +// currentConfig holds the current configuration values; it is affected by +// flags and interactive assignments. +var currentCfg = defaultConfig() +var currentMu sync.Mutex + +func currentConfig() config { + currentMu.Lock() + defer currentMu.Unlock() + return currentCfg +} + +func setCurrentConfig(cfg config) { + currentMu.Lock() + defer currentMu.Unlock() + currentCfg = cfg +} + +// configField contains metadata for a single configuration field. +type configField struct { + name string // JSON field name/key in variables + urlparam string // URL parameter name + saved bool // Is field saved in settings? + field reflect.StructField // Field in config + choices []string // Name Of variables in group + defaultValue string // Default value for this field. +} + +var ( + configFields []configField // Precomputed metadata per config field + + // configFieldMap holds an entry for every config field as well as an + // entry for every valid choice for a multi-choice field. + configFieldMap map[string]configField +) + +func init() { + // Config names for fields that are not saved in settings and therefore + // do not have a JSON name. + notSaved := map[string]string{ + // Not saved in settings, but present in URLs. + "SampleIndex": "sample_index", + + // Following fields are also not placed in URLs. + "Output": "output", + "SourcePath": "source_path", + "TrimPath": "trim_path", + "DivideBy": "divide_by", + } + + // choices holds the list of allowed values for config fields that can + // take on one of a bounded set of values. + choices := map[string][]string{ + "sort": {"cum", "flat"}, + "granularity": {"functions", "filefunctions", "files", "lines", "addresses"}, + } + + // urlparam holds the mapping from a config field name to the URL + // parameter used to hold that config field. If no entry is present for + // a name, the corresponding field is not saved in URLs. + urlparam := map[string]string{ + "drop_negative": "dropneg", + "call_tree": "calltree", + "relative_percentages": "rel", + "unit": "unit", + "compact_labels": "compact", + "intel_syntax": "intel", + "nodecount": "n", + "nodefraction": "nf", + "edgefraction": "ef", + "trim": "trim", + "focus": "f", + "ignore": "i", + "prune_from": "prunefrom", + "hide": "h", + "show": "s", + "show_from": "sf", + "tagfocus": "tf", + "tagignore": "ti", + "tagshow": "ts", + "taghide": "th", + "mean": "mean", + "sample_index": "si", + "normalize": "norm", + "sort": "sort", + "granularity": "g", + "noinlines": "noinlines", + } + + def := defaultConfig() + configFieldMap = map[string]configField{} + t := reflect.TypeOf(config{}) + for i, n := 0, t.NumField(); i < n; i++ { + field := t.Field(i) + js := strings.Split(field.Tag.Get("json"), ",") + if len(js) == 0 { + continue + } + // Get the configuration name for this field. + name := js[0] + if name == "-" { + name = notSaved[field.Name] + if name == "" { + // Not a configurable field. + continue + } + } + f := configField{ + name: name, + urlparam: urlparam[name], + saved: (name == js[0]), + field: field, + choices: choices[name], + } + f.defaultValue = def.get(f) + configFields = append(configFields, f) + configFieldMap[f.name] = f + for _, choice := range f.choices { + configFieldMap[choice] = f + } + } +} + +// fieldPtr returns a pointer to the field identified by f in *cfg. +func (cfg *config) fieldPtr(f configField) interface{} { + // reflect.ValueOf: converts to reflect.Value + // Elem: dereferences cfg to make *cfg + // FieldByIndex: fetches the field + // Addr: takes address of field + // Interface: converts back from reflect.Value to a regular value + return reflect.ValueOf(cfg).Elem().FieldByIndex(f.field.Index).Addr().Interface() +} + +// get returns the value of field f in cfg. +func (cfg *config) get(f configField) string { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + return *ptr + case *int: + return fmt.Sprint(*ptr) + case *float64: + return fmt.Sprint(*ptr) + case *bool: + return fmt.Sprint(*ptr) + } + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) +} + +// set sets the value of field f in cfg to value. +func (cfg *config) set(f configField, value string) error { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + if len(f.choices) > 0 { + // Verify that value is one of the allowed choices. + for _, choice := range f.choices { + if choice == value { + *ptr = value + return nil + } + } + return fmt.Errorf("invalid %q value %q", f.name, value) + } + *ptr = value + case *int: + v, err := strconv.Atoi(value) + if err != nil { + return err + } + *ptr = v + case *float64: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + *ptr = v + case *bool: + v, err := stringToBool(value) + if err != nil { + return err + } + *ptr = v + default: + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) + } + return nil +} + +// isConfigurable returns true if name is either the name of a config field, or +// a valid value for a multi-choice config field. +func isConfigurable(name string) bool { + _, ok := configFieldMap[name] + return ok +} + +// isBoolConfig returns true if name is either name of a boolean config field, +// or a valid value for a multi-choice config field. +func isBoolConfig(name string) bool { + f, ok := configFieldMap[name] + if !ok { + return false + } + if name != f.name { + return true // name must be one possible value for the field + } + var cfg config + _, ok = cfg.fieldPtr(f).(*bool) + return ok +} + +// completeConfig returns the list of configurable names starting with prefix. +func completeConfig(prefix string) []string { + var result []string + for v := range configFieldMap { + if strings.HasPrefix(v, prefix) { + result = append(result, v) + } + } + return result +} + +// configure stores the name=value mapping into the current config, correctly +// handling the case when name identifies a particular choice in a field. +func configure(name, value string) error { + currentMu.Lock() + defer currentMu.Unlock() + f, ok := configFieldMap[name] + if !ok { + return fmt.Errorf("unknown config field %q", name) + } + if f.name == name { + return currentCfg.set(f, value) + } + // name must be one of the choices. If value is true, set field-value + // to name. + if v, err := strconv.ParseBool(value); v && err == nil { + return currentCfg.set(f, name) + } + return fmt.Errorf("unknown config field %q", name) +} + +// resetTransient sets all transient fields in *cfg to their currently +// configured values. +func (cfg *config) resetTransient() { + current := currentConfig() + cfg.Output = current.Output + cfg.SourcePath = current.SourcePath + cfg.TrimPath = current.TrimPath + cfg.DivideBy = current.DivideBy + cfg.SampleIndex = current.SampleIndex +} + +// applyURL updates *cfg based on params. +func (cfg *config) applyURL(params url.Values) error { + for _, f := range configFields { + var value string + if f.urlparam != "" { + value = params.Get(f.urlparam) + } + if value == "" { + continue + } + if err := cfg.set(f, value); err != nil { + return fmt.Errorf("error setting config field %s: %v", f.name, err) + } + } + return nil +} + +// makeURL returns a URL based on initialURL that contains the config contents +// as parameters. The second result is true iff a parameter value was changed. +func (cfg *config) makeURL(initialURL url.URL) (url.URL, bool) { + q := initialURL.Query() + changed := false + for _, f := range configFields { + if f.urlparam == "" || !f.saved { + continue + } + v := cfg.get(f) + if v == f.defaultValue { + v = "" // URL for of default value is the empty string. + } else if f.field.Type.Kind() == reflect.Bool { + // Shorten bool values to "f" or "t" + v = v[:1] + } + if q.Get(f.urlparam) == v { + continue + } + changed = true + if v == "" { + q.Del(f.urlparam) + } else { + q.Set(f.urlparam, v) + } + } + if changed { + initialURL.RawQuery = q.Encode() + } + return initialURL, changed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go new file mode 100644 index 0000000000000000000000000000000000000000..27681c540fdcdb2f2911731125def455098491a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -0,0 +1,386 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driver implements the core pprof functionality. It can be +// parameterized with a flag implementation, fetch and symbolize +// mechanisms. +package driver + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +// PProf acquires a profile, and symbolizes it using a profile +// manager. Then it generates a report formatted according to the +// options selected through the flags package. +func PProf(eo *plugin.Options) error { + // Remove any temporary files created during pprof processing. + defer cleanupTempFiles() + + o := setDefaults(eo) + + src, cmd, err := parseFlags(o) + if err != nil { + return err + } + + p, err := fetchProfiles(src, o) + if err != nil { + return err + } + + if cmd != nil { + return generateReport(p, cmd, currentConfig(), o) + } + + if src.HTTPHostport != "" { + return serveWebInterface(src.HTTPHostport, p, o, src.HTTPDisableBrowser) + } + return interactive(p, o) +} + +// generateRawReport is allowed to modify p. +func generateRawReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) (*command, *report.Report, error) { + // Identify units of numeric tags in profile. + numLabelUnits := identifyNumLabelUnits(p, o.UI) + + // Get report output format + c := pprofCommands[cmd[0]] + if c == nil { + panic("unexpected nil command") + } + + cfg = applyCommandOverrides(cmd[0], c.format, cfg) + + // Create label pseudo nodes before filtering, in case the filters use + // the generated nodes. + generateTagRootsLeaves(p, cfg, o.UI) + + // Delay focus after configuring report to get percentages on all samples. + relative := cfg.RelativePercentages + if relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + ropt, err := reportOptions(p, numLabelUnits, cfg) + if err != nil { + return nil, nil, err + } + ropt.OutputFormat = c.format + if len(cmd) == 2 { + s, err := regexp.Compile(cmd[1]) + if err != nil { + return nil, nil, fmt.Errorf("parsing argument regexp %s: %v", cmd[1], err) + } + ropt.Symbol = s + } + + rpt := report.New(p, ropt) + if !relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + if err := aggregate(p, cfg); err != nil { + return nil, nil, err + } + + return c, rpt, nil +} + +// generateReport is allowed to modify p. +func generateReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error { + c, rpt, err := generateRawReport(p, cmd, cfg, o) + if err != nil { + return err + } + + // Generate the report. + dst := new(bytes.Buffer) + if err := report.Generate(dst, rpt, o.Obj); err != nil { + return err + } + src := dst + + // If necessary, perform any data post-processing. + if c.postProcess != nil { + dst = new(bytes.Buffer) + if err := c.postProcess(src, dst, o.UI); err != nil { + return err + } + src = dst + } + + // If no output is specified, use default visualizer. + output := cfg.Output + if output == "" { + if c.visualizer != nil { + return c.visualizer(src, os.Stdout, o.UI) + } + _, err := src.WriteTo(os.Stdout) + return err + } + + // Output to specified file. + o.UI.PrintErr("Generating report in ", output) + out, err := o.Writer.Open(output) + if err != nil { + return err + } + if _, err := src.WriteTo(out); err != nil { + out.Close() + return err + } + return out.Close() +} + +func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { + // Some report types override the trim flag to false below. This is to make + // sure the default heuristics of excluding insignificant nodes and edges + // from the call graph do not apply. One example where it is important is + // annotated source or disassembly listing. Those reports run on a specific + // function (or functions), but the trimming is applied before the function + // data is selected. So, with trimming enabled, the report could end up + // showing no data if the specified function is "uninteresting" as far as the + // trimming is concerned. + trim := cfg.Trim + + switch cmd { + case "disasm": + trim = false + cfg.Granularity = "addresses" + // Force the 'noinlines' mode so that source locations for a given address + // collapse and there is only one for the given address. Without this + // cumulative metrics would be double-counted when annotating the assembly. + // This is because the merge is done by address and in case of an inlined + // stack each of the inlined entries is a separate callgraph node. + cfg.NoInlines = true + case "weblist": + trim = false + cfg.Granularity = "addresses" + cfg.NoInlines = false // Need inline info to support call expansion + case "peek": + trim = false + case "list": + trim = false + cfg.Granularity = "lines" + // Do not force 'noinlines' to be false so that specifying + // "-list foo -noinlines" is supported and works as expected. + case "text", "top", "topproto": + if cfg.NodeCount == -1 { + cfg.NodeCount = 0 + } + default: + if cfg.NodeCount == -1 { + cfg.NodeCount = 80 + } + } + + switch outputFormat { + case report.Proto, report.Raw, report.Callgrind: + trim = false + cfg.Granularity = "addresses" + } + + if !trim { + cfg.NodeCount = 0 + cfg.NodeFraction = 0 + cfg.EdgeFraction = 0 + } + return cfg +} + +// generateTagRootsLeaves generates extra nodes from the tagroot and tagleaf options. +func generateTagRootsLeaves(prof *profile.Profile, cfg config, ui plugin.UI) { + tagRootLabelKeys := dropEmptyStrings(strings.Split(cfg.TagRoot, ",")) + tagLeafLabelKeys := dropEmptyStrings(strings.Split(cfg.TagLeaf, ",")) + rootm, leafm := addLabelNodes(prof, tagRootLabelKeys, tagLeafLabelKeys, cfg.Unit) + warnNoMatches(cfg.TagRoot == "" || rootm, "TagRoot", ui) + warnNoMatches(cfg.TagLeaf == "" || leafm, "TagLeaf", ui) +} + +// dropEmptyStrings filters a slice to only non-empty strings +func dropEmptyStrings(in []string) (out []string) { + for _, s := range in { + if s != "" { + out = append(out, s) + } + } + return +} + +func aggregate(prof *profile.Profile, cfg config) error { + var function, filename, linenumber, address bool + inlines := !cfg.NoInlines + switch cfg.Granularity { + case "addresses": + if inlines { + return nil + } + function = true + filename = true + linenumber = true + address = true + case "lines": + function = true + filename = true + linenumber = true + case "files": + filename = true + case "functions": + function = true + case "filefunctions": + function = true + filename = true + default: + return fmt.Errorf("unexpected granularity") + } + return prof.Aggregate(inlines, function, filename, linenumber, address) +} + +func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) { + si, mean := cfg.SampleIndex, cfg.Mean + value, meanDiv, sample, err := sampleFormat(p, si, mean) + if err != nil { + return nil, err + } + + stype := sample.Type + if mean { + stype = "mean_" + stype + } + + if cfg.DivideBy == 0 { + return nil, fmt.Errorf("zero divisor specified") + } + + var filters []string + addFilter := func(k string, v string) { + if v != "" { + filters = append(filters, k+"="+v) + } + } + addFilter("focus", cfg.Focus) + addFilter("ignore", cfg.Ignore) + addFilter("hide", cfg.Hide) + addFilter("show", cfg.Show) + addFilter("show_from", cfg.ShowFrom) + addFilter("tagfocus", cfg.TagFocus) + addFilter("tagignore", cfg.TagIgnore) + addFilter("tagshow", cfg.TagShow) + addFilter("taghide", cfg.TagHide) + + ropt := &report.Options{ + CumSort: cfg.Sort == "cum", + CallTree: cfg.CallTree, + DropNegative: cfg.DropNegative, + + CompactLabels: cfg.CompactLabels, + Ratio: 1 / cfg.DivideBy, + + NodeCount: cfg.NodeCount, + NodeFraction: cfg.NodeFraction, + EdgeFraction: cfg.EdgeFraction, + + ActiveFilters: filters, + NumLabelUnits: numLabelUnits, + + SampleValue: value, + SampleMeanDivisor: meanDiv, + SampleType: stype, + SampleUnit: sample.Unit, + + OutputUnit: cfg.Unit, + + SourcePath: cfg.SourcePath, + TrimPath: cfg.TrimPath, + + IntelSyntax: cfg.IntelSyntax, + } + + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + ropt.Title = filepath.Base(p.Mapping[0].File) + } + + return ropt, nil +} + +// identifyNumLabelUnits returns a map of numeric label keys to the units +// associated with those keys. +func identifyNumLabelUnits(p *profile.Profile, ui plugin.UI) map[string]string { + numLabelUnits, ignoredUnits := p.NumLabelUnits() + + // Print errors for tags with multiple units associated with + // a single key. + for k, units := range ignoredUnits { + ui.PrintErr(fmt.Sprintf("For tag %s used unit %s, also encountered unit(s) %s", k, numLabelUnits[k], strings.Join(units, ", "))) + } + return numLabelUnits +} + +type sampleValueFunc func([]int64) int64 + +// sampleFormat returns a function to extract values out of a profile.Sample, +// and the type/units of those values. +func sampleFormat(p *profile.Profile, sampleIndex string, mean bool) (value, meanDiv sampleValueFunc, v *profile.ValueType, err error) { + if len(p.SampleType) == 0 { + return nil, nil, nil, fmt.Errorf("profile has no samples") + } + index, err := p.SampleIndexByName(sampleIndex) + if err != nil { + return nil, nil, nil, err + } + value = valueExtractor(index) + if mean { + meanDiv = valueExtractor(0) + } + v = p.SampleType[index] + return +} + +func valueExtractor(ix int) sampleValueFunc { + return func(v []int64) int64 { + return v[ix] + } +} + +// profileCopier can be used to obtain a fresh copy of a profile. +// It is useful since reporting code may mutate the profile handed to it. +type profileCopier []byte + +func makeProfileCopier(src *profile.Profile) profileCopier { + // Pre-serialize the profile. We will deserialize every time a fresh copy is needed. + var buf bytes.Buffer + src.WriteUncompressed(&buf) + return profileCopier(buf.Bytes()) +} + +// newCopy returns a new copy of the profile. +func (c profileCopier) newCopy() *profile.Profile { + p, err := profile.ParseUncompressed([]byte(c)) + if err != nil { + panic(err) + } + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go new file mode 100644 index 0000000000000000000000000000000000000000..fd05adb1469993d1214ddf576a652768b68c6199 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go @@ -0,0 +1,219 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +var tagFilterRangeRx = regexp.MustCompile("([+-]?[[:digit:]]+)([[:alpha:]]+)?") + +// applyFocus filters samples based on the focus/ignore options +func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, cfg config, ui plugin.UI) error { + focus, err := compileRegexOption("focus", cfg.Focus, nil) + ignore, err := compileRegexOption("ignore", cfg.Ignore, err) + hide, err := compileRegexOption("hide", cfg.Hide, err) + show, err := compileRegexOption("show", cfg.Show, err) + showfrom, err := compileRegexOption("show_from", cfg.ShowFrom, err) + tagfocus, err := compileTagFilter("tagfocus", cfg.TagFocus, numLabelUnits, ui, err) + tagignore, err := compileTagFilter("tagignore", cfg.TagIgnore, numLabelUnits, ui, err) + prunefrom, err := compileRegexOption("prune_from", cfg.PruneFrom, err) + if err != nil { + return err + } + + fm, im, hm, hnm := prof.FilterSamplesByName(focus, ignore, hide, show) + warnNoMatches(focus == nil || fm, "Focus", ui) + warnNoMatches(ignore == nil || im, "Ignore", ui) + warnNoMatches(hide == nil || hm, "Hide", ui) + warnNoMatches(show == nil || hnm, "Show", ui) + + sfm := prof.ShowFrom(showfrom) + warnNoMatches(showfrom == nil || sfm, "ShowFrom", ui) + + tfm, tim := prof.FilterSamplesByTag(tagfocus, tagignore) + warnNoMatches(tagfocus == nil || tfm, "TagFocus", ui) + warnNoMatches(tagignore == nil || tim, "TagIgnore", ui) + + tagshow, err := compileRegexOption("tagshow", cfg.TagShow, err) + taghide, err := compileRegexOption("taghide", cfg.TagHide, err) + tns, tnh := prof.FilterTagsByName(tagshow, taghide) + warnNoMatches(tagshow == nil || tns, "TagShow", ui) + warnNoMatches(taghide == nil || tnh, "TagHide", ui) + + if prunefrom != nil { + prof.PruneFrom(prunefrom) + } + return err +} + +func compileRegexOption(name, value string, err error) (*regexp.Regexp, error) { + if value == "" || err != nil { + return nil, err + } + rx, err := regexp.Compile(value) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + return rx, nil +} + +func compileTagFilter(name, value string, numLabelUnits map[string]string, ui plugin.UI, err error) (func(*profile.Sample) bool, error) { + if value == "" || err != nil { + return nil, err + } + + tagValuePair := strings.SplitN(value, "=", 2) + var wantKey string + if len(tagValuePair) == 2 { + wantKey = tagValuePair[0] + value = tagValuePair[1] + } + + if numFilter := parseTagFilterRange(value); numFilter != nil { + ui.PrintErr(name, ":Interpreted '", value, "' as range, not regexp") + labelFilter := func(vals []int64, unit string) bool { + for _, val := range vals { + if numFilter(val, unit) { + return true + } + } + return false + } + numLabelUnit := func(key string) string { + return numLabelUnits[key] + } + if wantKey == "" { + return func(s *profile.Sample) bool { + for key, vals := range s.NumLabel { + if labelFilter(vals, numLabelUnit(key)) { + return true + } + } + return false + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.NumLabel[wantKey]; ok { + return labelFilter(vals, numLabelUnit(wantKey)) + } + return false + }, nil + } + + var rfx []*regexp.Regexp + for _, tagf := range strings.Split(value, ",") { + fx, err := regexp.Compile(tagf) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + rfx = append(rfx, fx) + } + if wantKey == "" { + return func(s *profile.Sample) bool { + matchedrx: + for _, rx := range rfx { + for key, vals := range s.Label { + for _, val := range vals { + // TODO: Match against val, not key:val in future + if rx.MatchString(key + ":" + val) { + continue matchedrx + } + } + } + return false + } + return true + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.Label[wantKey]; ok { + for _, rx := range rfx { + for _, val := range vals { + if rx.MatchString(val) { + return true + } + } + } + } + return false + }, nil +} + +// parseTagFilterRange returns a function to checks if a value is +// contained on the range described by a string. It can recognize +// strings of the form: +// "32kb" -- matches values == 32kb +// ":64kb" -- matches values <= 64kb +// "4mb:" -- matches values >= 4mb +// "12kb:64mb" -- matches values between 12kb and 64mb (both included). +func parseTagFilterRange(filter string) func(int64, string) bool { + ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2) + if len(ranges) == 0 { + return nil // No ranges were identified + } + v, err := strconv.ParseInt(ranges[0][1], 10, 64) + if err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[0][1], err)) + } + scaledValue, unit := measurement.Scale(v, ranges[0][2], ranges[0][2]) + if len(ranges) == 1 { + switch match := ranges[0][0]; filter { + case match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv == scaledValue + } + case match + ":": + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue + } + case ":" + match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv <= scaledValue + } + } + return nil + } + if filter != ranges[0][0]+":"+ranges[1][0] { + return nil + } + if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[1][1], err)) + } + scaledValue2, unit2 := measurement.Scale(v, ranges[1][2], unit) + if unit != unit2 { + return nil + } + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue && sv <= scaledValue2 + } +} + +func warnNoMatches(match bool, option string, ui plugin.UI) { + if !match { + ui.PrintErr(option + " expression matched no samples") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go new file mode 100644 index 0000000000000000000000000000000000000000..584c5d85e0e123c1c8dae0190054ca7bf15a78da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go @@ -0,0 +1,616 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// fetchProfiles fetches and symbolizes the profiles specified by s. +// It will merge all the profiles it is able to retrieve, even if +// there are some failures. It will return an error if it is unable to +// fetch any profiles. +func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) { + sources := make([]profileSource, 0, len(s.Sources)) + for _, src := range s.Sources { + sources = append(sources, profileSource{ + addr: src, + source: s, + }) + } + + bases := make([]profileSource, 0, len(s.Base)) + for _, src := range s.Base { + bases = append(bases, profileSource{ + addr: src, + source: s, + }) + } + + p, pbase, m, mbase, save, err := grabSourcesAndBases(sources, bases, o.Fetch, o.Obj, o.UI, o.HTTPTransport) + if err != nil { + return nil, err + } + + if pbase != nil { + if s.DiffBase { + pbase.SetLabel("pprof::base", []string{"true"}) + } + if s.Normalize { + err := p.Normalize(pbase) + if err != nil { + return nil, err + } + } + pbase.Scale(-1) + p, m, err = combineProfiles([]*profile.Profile{p, pbase}, []plugin.MappingSources{m, mbase}) + if err != nil { + return nil, err + } + } + + // Symbolize the merged profile. + if err := o.Sym.Symbolize(s.Symbolize, m, p); err != nil { + return nil, err + } + p.RemoveUninteresting() + unsourceMappings(p) + + if s.Comment != "" { + p.Comments = append(p.Comments, s.Comment) + } + + // Save a copy of the merged profile if there is at least one remote source. + if save { + dir, err := setTmpDir(o.UI) + if err != nil { + return nil, err + } + + prefix := "pprof." + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + prefix += filepath.Base(p.Mapping[0].File) + "." + } + for _, s := range p.SampleType { + prefix += s.Type + "." + } + + tempFile, err := newTempFile(dir, prefix, ".pb.gz") + if err == nil { + if err = p.Write(tempFile); err == nil { + o.UI.PrintErr("Saved profile in ", tempFile.Name()) + } + } + if err != nil { + o.UI.PrintErr("Could not save profile: ", err) + } + } + + if err := p.CheckValid(); err != nil { + return nil, err + } + + return p, nil +} + +func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, *profile.Profile, plugin.MappingSources, plugin.MappingSources, bool, error) { + wg := sync.WaitGroup{} + wg.Add(2) + var psrc, pbase *profile.Profile + var msrc, mbase plugin.MappingSources + var savesrc, savebase bool + var errsrc, errbase error + var countsrc, countbase int + go func() { + defer wg.Done() + psrc, msrc, savesrc, countsrc, errsrc = chunkedGrab(sources, fetch, obj, ui, tr) + }() + go func() { + defer wg.Done() + pbase, mbase, savebase, countbase, errbase = chunkedGrab(bases, fetch, obj, ui, tr) + }() + wg.Wait() + save := savesrc || savebase + + if errsrc != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching source profiles: %v", errsrc) + } + if errbase != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching base profiles: %v,", errbase) + } + if countsrc == 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any source profiles") + } + if countbase == 0 && len(bases) > 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any base profiles") + } + if want, got := len(sources), countsrc; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d source profiles out of %d", got, want)) + } + if want, got := len(bases), countbase; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d base profiles out of %d", got, want)) + } + + return psrc, pbase, msrc, mbase, save, nil +} + +// chunkedGrab fetches the profiles described in source and merges them into +// a single profile. It fetches a chunk of profiles concurrently, with a maximum +// chunk size to limit its memory usage. +func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + const chunkSize = 128 + + var p *profile.Profile + var msrc plugin.MappingSources + var save bool + var count int + + for start := 0; start < len(sources); start += chunkSize { + end := start + chunkSize + if end > len(sources) { + end = len(sources) + } + chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui, tr) + switch { + case chunkErr != nil: + return nil, nil, false, 0, chunkErr + case chunkP == nil: + continue + case p == nil: + p, msrc, save, count = chunkP, chunkMsrc, chunkSave, chunkCount + default: + p, msrc, chunkErr = combineProfiles([]*profile.Profile{p, chunkP}, []plugin.MappingSources{msrc, chunkMsrc}) + if chunkErr != nil { + return nil, nil, false, 0, chunkErr + } + if chunkSave { + save = true + } + count += chunkCount + } + } + + return p, msrc, save, count, nil +} + +// concurrentGrab fetches multiple profiles concurrently +func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + wg := sync.WaitGroup{} + wg.Add(len(sources)) + for i := range sources { + go func(s *profileSource) { + defer wg.Done() + s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, fetch, obj, ui, tr) + }(&sources[i]) + } + wg.Wait() + + var save bool + profiles := make([]*profile.Profile, 0, len(sources)) + msrcs := make([]plugin.MappingSources, 0, len(sources)) + for i := range sources { + s := &sources[i] + if err := s.err; err != nil { + ui.PrintErr(s.addr + ": " + err.Error()) + continue + } + save = save || s.remote + profiles = append(profiles, s.p) + msrcs = append(msrcs, s.msrc) + *s = profileSource{} + } + + if len(profiles) == 0 { + return nil, nil, false, 0, nil + } + + p, msrc, err := combineProfiles(profiles, msrcs) + if err != nil { + return nil, nil, false, 0, err + } + return p, msrc, save, len(profiles), nil +} + +func combineProfiles(profiles []*profile.Profile, msrcs []plugin.MappingSources) (*profile.Profile, plugin.MappingSources, error) { + // Merge profiles. + // + // The merge call below only treats exactly matching sample type lists as + // compatible and will fail otherwise. Make the profiles' sample types + // compatible for the merge, see CompatibilizeSampleTypes() doc for details. + if err := profile.CompatibilizeSampleTypes(profiles); err != nil { + return nil, nil, err + } + if err := measurement.ScaleProfiles(profiles); err != nil { + return nil, nil, err + } + + // Avoid expensive work for the common case of a single profile/src. + if len(profiles) == 1 && len(msrcs) == 1 { + return profiles[0], msrcs[0], nil + } + + p, err := profile.Merge(profiles) + if err != nil { + return nil, nil, err + } + + // Combine mapping sources. + msrc := make(plugin.MappingSources) + for _, ms := range msrcs { + for m, s := range ms { + msrc[m] = append(msrc[m], s...) + } + } + return p, msrc, nil +} + +type profileSource struct { + addr string + source *source + + p *profile.Profile + msrc plugin.MappingSources + remote bool + err error +} + +func homeEnv() string { + switch runtime.GOOS { + case "windows": + return "USERPROFILE" + case "plan9": + return "home" + default: + return "HOME" + } +} + +// setTmpDir prepares the directory to use to save profiles retrieved +// remotely. It is selected from PPROF_TMPDIR, defaults to $HOME/pprof, and, if +// $HOME is not set, falls back to os.TempDir(). +func setTmpDir(ui plugin.UI) (string, error) { + var dirs []string + if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir != "" { + dirs = append(dirs, profileDir) + } + if homeDir := os.Getenv(homeEnv()); homeDir != "" { + dirs = append(dirs, filepath.Join(homeDir, "pprof")) + } + dirs = append(dirs, os.TempDir()) + for _, tmpDir := range dirs { + if err := os.MkdirAll(tmpDir, 0755); err != nil { + ui.PrintErr("Could not use temp dir ", tmpDir, ": ", err.Error()) + continue + } + return tmpDir, nil + } + return "", fmt.Errorf("failed to identify temp dir") +} + +const testSourceAddress = "pproftest.local" + +// grabProfile fetches a profile. Returns the profile, sources for the +// profile mappings, a bool indicating if the profile was fetched +// remotely, and an error. +func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) { + var src string + duration, timeout := time.Duration(s.Seconds)*time.Second, time.Duration(s.Timeout)*time.Second + if fetcher != nil { + p, src, err = fetcher.Fetch(source, duration, timeout) + if err != nil { + return + } + } + if err != nil || p == nil { + // Fetch the profile over HTTP or from a file. + p, src, err = fetch(source, duration, timeout, ui, tr) + if err != nil { + return + } + } + + if err = p.CheckValid(); err != nil { + return + } + + // Update the binary locations from command line and paths. + locateBinaries(p, s, obj, ui) + + // Collect the source URL for all mappings. + if src != "" { + msrc = collectMappingSources(p, src) + remote = true + if strings.HasPrefix(src, "http://"+testSourceAddress) { + // Treat test inputs as local to avoid saving + // testcase profiles during driver testing. + remote = false + } + } + return +} + +// collectMappingSources saves the mapping sources of a profile. +func collectMappingSources(p *profile.Profile, source string) plugin.MappingSources { + ms := plugin.MappingSources{} + for _, m := range p.Mapping { + src := struct { + Source string + Start uint64 + }{ + source, m.Start, + } + key := m.BuildID + if key == "" { + key = m.File + } + if key == "" { + // If there is no build id or source file, use the source as the + // mapping file. This will enable remote symbolization for this + // mapping, in particular for Go profiles on the legacy format. + // The source is reset back to empty string by unsourceMapping + // which is called after symbolization is finished. + m.File = source + key = source + } + ms[key] = append(ms[key], src) + } + return ms +} + +// unsourceMappings iterates over the mappings in a profile and replaces file +// set to the remote source URL by collectMappingSources back to empty string. +func unsourceMappings(p *profile.Profile) { + for _, m := range p.Mapping { + if m.BuildID == "" && filepath.VolumeName(m.File) == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() { + m.File = "" + } + } + } +} + +// locateBinaries searches for binary files listed in the profile and, if found, +// updates the profile accordingly. +func locateBinaries(p *profile.Profile, s *source, obj plugin.ObjTool, ui plugin.UI) { + // Construct search path to examine + searchPath := os.Getenv("PPROF_BINARY_PATH") + if searchPath == "" { + // Use $HOME/pprof/binaries as default directory for local symbolization binaries + searchPath = filepath.Join(os.Getenv(homeEnv()), "pprof", "binaries") + } +mapping: + for _, m := range p.Mapping { + var noVolumeFile string + var baseName string + var dirName string + if m.File != "" { + noVolumeFile = strings.TrimPrefix(m.File, filepath.VolumeName(m.File)) + baseName = filepath.Base(m.File) + dirName = filepath.Dir(noVolumeFile) + } + + for _, path := range filepath.SplitList(searchPath) { + var fileNames []string + if m.BuildID != "" { + fileNames = []string{filepath.Join(path, m.BuildID, baseName)} + if matches, err := filepath.Glob(filepath.Join(path, m.BuildID, "*")); err == nil { + fileNames = append(fileNames, matches...) + } + fileNames = append(fileNames, filepath.Join(path, noVolumeFile, m.BuildID)) // perf path format + // Llvm buildid protocol: the first two characters of the build id + // are used as directory, and the remaining part is in the filename. + // e.g. `/ab/cdef0123456.debug` + fileNames = append(fileNames, filepath.Join(path, m.BuildID[:2], m.BuildID[2:]+".debug")) + } + if m.File != "" { + // Try both the basename and the full path, to support the same directory + // structure as the perf symfs option. + fileNames = append(fileNames, filepath.Join(path, baseName)) + fileNames = append(fileNames, filepath.Join(path, noVolumeFile)) + // Other locations: use the same search paths as GDB, according to + // https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html + fileNames = append(fileNames, filepath.Join(path, noVolumeFile+".debug")) + fileNames = append(fileNames, filepath.Join(path, dirName, ".debug", baseName+".debug")) + fileNames = append(fileNames, filepath.Join(path, "usr", "lib", "debug", dirName, baseName+".debug")) + } + for _, name := range fileNames { + if f, err := obj.Open(name, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol); err == nil { + defer f.Close() + fileBuildID := f.BuildID() + if m.BuildID != "" && m.BuildID != fileBuildID { + ui.PrintErr("Ignoring local file " + name + ": build-id mismatch (" + m.BuildID + " != " + fileBuildID + ")") + } else { + // Explicitly do not update KernelRelocationSymbol -- + // the new local file name is most likely missing it. + m.File = name + continue mapping + } + } + } + } + } + if len(p.Mapping) == 0 { + // If there are no mappings, add a fake mapping to attempt symbolization. + // This is useful for some profiles generated by the golang runtime, which + // do not include any mappings. Symbolization with a fake mapping will only + // be successful against a non-PIE binary. + m := &profile.Mapping{ID: 1} + p.Mapping = []*profile.Mapping{m} + for _, l := range p.Location { + l.Mapping = m + } + } + // If configured, apply executable filename override and (maybe, see below) + // build ID override from source. Assume the executable is the first mapping. + if execName, buildID := s.ExecName, s.BuildID; execName != "" || buildID != "" { + m := p.Mapping[0] + if execName != "" { + // Explicitly do not update KernelRelocationSymbol -- + // the source override is most likely missing it. + m.File = execName + } + // Only apply the build ID override if the build ID in the main mapping is + // missing. Overwriting the build ID in case it's present is very likely a + // wrong thing to do so we refuse to do that. + if buildID != "" && m.BuildID == "" { + m.BuildID = buildID + } + } +} + +// fetch fetches a profile from source, within the timeout specified, +// producing messages through the ui. It returns the profile and the +// url of the actual source of the profile for remote profiles. +func fetch(source string, duration, timeout time.Duration, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, src string, err error) { + var f io.ReadCloser + + if sourceURL, timeout := adjustURL(source, duration, timeout); sourceURL != "" { + ui.Print("Fetching profile over HTTP from " + sourceURL) + if duration > 0 { + ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) + } + f, err = fetchURL(sourceURL, timeout, tr) + src = sourceURL + } else if isPerfFile(source) { + f, err = convertPerfData(source, ui) + } else { + f, err = os.Open(source) + } + if err == nil { + defer f.Close() + p, err = profile.Parse(f) + } + return +} + +// fetchURL fetches a profile from a URL using HTTP. +func fetchURL(source string, timeout time.Duration, tr http.RoundTripper) (io.ReadCloser, error) { + client := &http.Client{ + Transport: tr, + Timeout: timeout + 5*time.Second, + } + resp, err := client.Get(source) + if err != nil { + return nil, fmt.Errorf("http fetch: %v", err) + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + return nil, statusCodeError(resp) + } + + return resp.Body, nil +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// isPerfFile checks if a file is in perf.data format. It also returns false +// if it encounters an error during the check. +func isPerfFile(path string) bool { + sourceFile, openErr := os.Open(path) + if openErr != nil { + return false + } + defer sourceFile.Close() + + // If the file is the output of a perf record command, it should begin + // with the string PERFILE2. + perfHeader := []byte("PERFILE2") + actualHeader := make([]byte, len(perfHeader)) + if _, readErr := sourceFile.Read(actualHeader); readErr != nil { + return false + } + return bytes.Equal(actualHeader, perfHeader) +} + +// convertPerfData converts the file at path which should be in perf.data format +// using the perf_to_profile tool and returns the file containing the +// profile.proto formatted data. +func convertPerfData(perfPath string, ui plugin.UI) (*os.File, error) { + ui.Print(fmt.Sprintf( + "Converting %s to a profile.proto... (May take a few minutes)", + perfPath)) + profile, err := newTempFile(os.TempDir(), "pprof_", ".pb.gz") + if err != nil { + return nil, err + } + deferDeleteTempFile(profile.Name()) + cmd := exec.Command("perf_to_profile", "-i", perfPath, "-o", profile.Name(), "-f") + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + if err := cmd.Run(); err != nil { + profile.Close() + return nil, fmt.Errorf("failed to convert perf.data file. Try github.com/google/perf_data_converter: %v", err) + } + return profile, nil +} + +// adjustURL validates if a profile source is a URL and returns an +// cleaned up URL and the timeout to use for retrieval over HTTP. +// If the source cannot be recognized as a URL it returns an empty string. +func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) { + u, err := url.Parse(source) + if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") { + // Try adding http:// to catch sources of the form hostname:port/path. + // url.Parse treats "hostname" as the scheme. + u, err = url.Parse("http://" + source) + } + if err != nil || u.Host == "" { + return "", 0 + } + + // Apply duration/timeout overrides to URL. + values := u.Query() + if duration > 0 { + values.Set("seconds", fmt.Sprint(int(duration.Seconds()))) + } else { + if urlSeconds := values.Get("seconds"); urlSeconds != "" { + if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil { + duration = time.Duration(us) * time.Second + } + } + } + if timeout <= 0 { + if duration > 0 { + timeout = duration + duration/2 + } else { + timeout = 60 * time.Second + } + } + u.RawQuery = values.Encode() + return u.String(), timeout +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go new file mode 100644 index 0000000000000000000000000000000000000000..539031916630b66bf0c01e1f74f293e5a28a0d2b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "flag" + "strings" +) + +// GoFlags implements the plugin.FlagSet interface. +type GoFlags struct { + UsageMsgs []string +} + +// Bool implements the plugin.FlagSet interface. +func (*GoFlags) Bool(o string, d bool, c string) *bool { + return flag.Bool(o, d, c) +} + +// Int implements the plugin.FlagSet interface. +func (*GoFlags) Int(o string, d int, c string) *int { + return flag.Int(o, d, c) +} + +// Float64 implements the plugin.FlagSet interface. +func (*GoFlags) Float64(o string, d float64, c string) *float64 { + return flag.Float64(o, d, c) +} + +// String implements the plugin.FlagSet interface. +func (*GoFlags) String(o, d, c string) *string { + return flag.String(o, d, c) +} + +// StringList implements the plugin.FlagSet interface. +func (*GoFlags) StringList(o, d, c string) *[]*string { + return &[]*string{flag.String(o, d, c)} +} + +// ExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) ExtraUsage() string { + return strings.Join(f.UsageMsgs, "\n") +} + +// AddExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) AddExtraUsage(eu string) { + f.UsageMsgs = append(f.UsageMsgs, eu) +} + +// Parse implements the plugin.FlagSet interface. +func (*GoFlags) Parse(usage func()) []string { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage() + } + return args +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go new file mode 100644 index 0000000000000000000000000000000000000000..fbeb765dbcb227a249211627b8d99e00e4aa8f28 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go @@ -0,0 +1,106 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "encoding/json" + "html/template" + "net/http" + "strings" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/report" +) + +type treeNode struct { + Name string `json:"n"` + FullName string `json:"f"` + Cum int64 `json:"v"` + CumFormat string `json:"l"` + Percent string `json:"p"` + Children []*treeNode `json:"c"` +} + +// flamegraph generates a web page containing a flamegraph. +func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { + // Force the call tree so that the graph is a tree. + // Also do not trim the tree so that the flame graph contains all functions. + rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { + cfg.CallTree = true + cfg.Trim = false + }) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + var nodes []*treeNode + nroots := 0 + rootValue := int64(0) + nodeArr := []string{} + nodeMap := map[*graph.Node]*treeNode{} + // Make all nodes and the map, collect the roots. + for _, n := range g.Nodes { + v := n.CumValue() + fullName := n.Info.PrintableName() + node := &treeNode{ + Name: graph.ShortenFunctionName(fullName), + FullName: fullName, + Cum: v, + CumFormat: config.FormatValue(v), + Percent: strings.TrimSpace(measurement.Percentage(v, config.Total)), + } + nodes = append(nodes, node) + if len(n.In) == 0 { + nodes[nroots], nodes[len(nodes)-1] = nodes[len(nodes)-1], nodes[nroots] + nroots++ + rootValue += v + } + nodeMap[n] = node + // Get all node names into an array. + nodeArr = append(nodeArr, n.Info.Name) + } + // Populate the child links. + for _, n := range g.Nodes { + node := nodeMap[n] + for child := range n.Out { + node.Children = append(node.Children, nodeMap[child]) + } + } + + rootNode := &treeNode{ + Name: "root", + FullName: "root", + Cum: rootValue, + CumFormat: config.FormatValue(rootValue), + Percent: strings.TrimSpace(measurement.Percentage(rootValue, config.Total)), + Children: nodes[0:nroots], + } + + // JSON marshalling flame graph + b, err := json.Marshal(rootNode) + if err != nil { + http.Error(w, "error serializing flame graph", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + + ui.render(w, req, "flamegraph", rpt, errList, config.Labels, webArgs{ + FlameGraph: template.JS(b), + Nodes: nodeArr, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css new file mode 100644 index 0000000000000000000000000000000000000000..e0de53c1e1406c3c639902d7d302b6d5d7269730 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css @@ -0,0 +1,273 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} +html, body { + height: 100%; +} +body { + font-family: 'Roboto', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + font-size: 13px; + line-height: 1.4; + display: flex; + flex-direction: column; +} +a { + color: #2a66d9; +} +.header { + display: flex; + align-items: center; + height: 44px; + min-height: 44px; + background-color: #eee; + color: #212121; + padding: 0 1rem; +} +.header > div { + margin: 0 0.125em; +} +.header .title h1 { + font-size: 1.75em; + margin-right: 1rem; + margin-bottom: 4px; +} +.header .title a { + color: #212121; + text-decoration: none; +} +.header .title a:hover { + text-decoration: underline; +} +.header .description { + width: 100%; + text-align: right; + white-space: nowrap; +} +@media screen and (max-width: 799px) { + .header input { + display: none; + } +} +#detailsbox { + display: none; + z-index: 1; + position: fixed; + top: 40px; + right: 20px; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + line-height: 24px; + padding: 1em; + text-align: left; +} +.header input { + background: white url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' style='pointer-events:none;display:block;width:100%25;height:100%25;fill:%23757575'%3E%3Cpath d='M15.5 14h-.79l-.28-.27C15.41 12.59 16 11.11 16 9.5 16 5.91 13.09 3 9.5 3S3 5.91 3 9.5 5.91 16 9.5 16c1.61.0 3.09-.59 4.23-1.57l.27.28v.79l5 4.99L20.49 19l-4.99-5zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E") no-repeat 4px center/20px 20px; + border: 1px solid #d1d2d3; + border-radius: 2px 0 0 2px; + padding: 0.25em; + padding-left: 28px; + margin-left: 1em; + font-family: 'Roboto', 'Noto', sans-serif; + font-size: 1em; + line-height: 24px; + color: #212121; +} +.downArrow { + border-top: .36em solid #ccc; + border-left: .36em solid transparent; + border-right: .36em solid transparent; + margin-bottom: .05em; + margin-left: .5em; + transition: border-top-color 200ms; +} +.menu-item { + height: 100%; + text-transform: uppercase; + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + position: relative; +} +.menu-item .menu-name:hover { + opacity: 0.75; +} +.menu-item .menu-name:hover .downArrow { + border-top-color: #666; +} +.menu-name { + height: 100%; + padding: 0 0.5em; + display: flex; + align-items: center; + justify-content: center; +} +.menu-name a { + text-decoration: none; + color: #212121; +} +.submenu { + display: none; + z-index: 1; + margin-top: -4px; + min-width: 10em; + position: absolute; + left: 0px; + background-color: white; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + font-size: 100%; + text-transform: none; + white-space: nowrap; +} +.menu-item, .submenu { + user-select: none; + -moz-user-select: none; + -ms-user-select: none; + -webkit-user-select: none; +} +.submenu hr { + border: 0; + border-top: 2px solid #eee; +} +.submenu a { + display: block; + padding: .5em 1em; + text-decoration: none; +} +.submenu a:hover, .submenu a.active { + color: white; + background-color: #6b82d6; +} +.submenu a.disabled { + color: gray; + pointer-events: none; +} +.menu-check-mark { + position: absolute; + left: 2px; +} +.menu-delete-btn { + position: absolute; + right: 2px; +} + +{{/* Used to disable events when a modal dialog is displayed */}} +#dialog-overlay { + display: none; + position: fixed; + left: 0px; + top: 0px; + width: 100%; + height: 100%; + background-color: rgba(1,1,1,0.1); +} + +.dialog { + {{/* Displayed centered horizontally near the top */}} + display: none; + position: fixed; + margin: 0px; + top: 60px; + left: 50%; + transform: translateX(-50%); + + z-index: 3; + font-size: 125%; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); +} +.dialog-header { + font-size: 120%; + border-bottom: 1px solid #CCCCCC; + width: 100%; + text-align: center; + background: #EEEEEE; + user-select: none; +} +.dialog-footer { + border-top: 1px solid #CCCCCC; + width: 100%; + text-align: right; + padding: 10px; +} +.dialog-error { + margin: 10px; + color: red; +} +.dialog input { + margin: 10px; + font-size: inherit; +} +.dialog button { + margin-left: 10px; + font-size: inherit; +} +#save-dialog, #delete-dialog { + width: 50%; + max-width: 20em; +} +#delete-prompt { + padding: 10px; +} + +#content { + overflow-y: scroll; + padding: 1em; +} +#top { + overflow-y: scroll; +} +#graph { + overflow: hidden; +} +#graph svg { + width: 100%; + height: auto; + padding: 10px; +} +#content.source .filename { + margin-top: 0; + margin-bottom: 1em; + font-size: 120%; +} +#content.source pre { + margin-bottom: 3em; +} +table { + border-spacing: 0px; + width: 100%; + padding-bottom: 1em; + white-space: nowrap; +} +table thead { + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; +} +table tr th { + position: sticky; + top: 0; + background-color: #ddd; + text-align: right; + padding: .3em .5em; +} +table tr td { + padding: .3em .5em; + text-align: right; +} +#top table tr th:nth-child(6), +#top table tr th:nth-child(7), +#top table tr td:nth-child(6), +#top table tr td:nth-child(7) { + text-align: left; +} +#top table tr td:nth-child(6) { + width: 100%; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; +} +#flathdr1, #flathdr2, #cumhdr1, #cumhdr2, #namehdr { + cursor: ns-resize; +} +.hilite { + background-color: #ebf5fb; + font-weight: bold; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js new file mode 100644 index 0000000000000000000000000000000000000000..ff980f66de4254e157241beb070f4e2ceca6cd67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js @@ -0,0 +1,714 @@ +// Make svg pannable and zoomable. +// Call clickHandler(t) if a click event is caught by the pan event handlers. +function initPanAndZoom(svg, clickHandler) { + 'use strict'; + + // Current mouse/touch handling mode + const IDLE = 0; + const MOUSEPAN = 1; + const TOUCHPAN = 2; + const TOUCHZOOM = 3; + let mode = IDLE; + + // State needed to implement zooming. + let currentScale = 1.0; + const initWidth = svg.viewBox.baseVal.width; + const initHeight = svg.viewBox.baseVal.height; + + // State needed to implement panning. + let panLastX = 0; // Last event X coordinate + let panLastY = 0; // Last event Y coordinate + let moved = false; // Have we seen significant movement + let touchid = null; // Current touch identifier + + // State needed for pinch zooming + let touchid2 = null; // Second id for pinch zooming + let initGap = 1.0; // Starting gap between two touches + let initScale = 1.0; // currentScale when pinch zoom started + let centerPoint = null; // Center point for scaling + + // Convert event coordinates to svg coordinates. + function toSvg(x, y) { + const p = svg.createSVGPoint(); + p.x = x; + p.y = y; + let m = svg.getCTM(); + if (m == null) m = svg.getScreenCTM(); // Firefox workaround. + return p.matrixTransform(m.inverse()); + } + + // Change the scaling for the svg to s, keeping the point denoted + // by u (in svg coordinates]) fixed at the same screen location. + function rescale(s, u) { + // Limit to a good range. + if (s < 0.2) s = 0.2; + if (s > 10.0) s = 10.0; + + currentScale = s; + + // svg.viewBox defines the visible portion of the user coordinate + // system. So to magnify by s, divide the visible portion by s, + // which will then be stretched to fit the viewport. + const vb = svg.viewBox; + const w1 = vb.baseVal.width; + const w2 = initWidth / s; + const h1 = vb.baseVal.height; + const h2 = initHeight / s; + vb.baseVal.width = w2; + vb.baseVal.height = h2; + + // We also want to adjust vb.baseVal.x so that u.x remains at same + // screen X coordinate. In other words, want to change it from x1 to x2 + // so that: + // (u.x - x1) / w1 = (u.x - x2) / w2 + // Simplifying that, we get + // (u.x - x1) * (w2 / w1) = u.x - x2 + // x2 = u.x - (u.x - x1) * (w2 / w1) + vb.baseVal.x = u.x - (u.x - vb.baseVal.x) * (w2 / w1); + vb.baseVal.y = u.y - (u.y - vb.baseVal.y) * (h2 / h1); + } + + function handleWheel(e) { + if (e.deltaY == 0) return; + // Change scale factor by 1.1 or 1/1.1 + rescale(currentScale * (e.deltaY < 0 ? 1.1 : (1/1.1)), + toSvg(e.offsetX, e.offsetY)); + } + + function setMode(m) { + mode = m; + touchid = null; + touchid2 = null; + } + + function panStart(x, y) { + moved = false; + panLastX = x; + panLastY = y; + } + + function panMove(x, y) { + let dx = x - panLastX; + let dy = y - panLastY; + if (Math.abs(dx) <= 2 && Math.abs(dy) <= 2) return; // Ignore tiny moves + + moved = true; + panLastX = x; + panLastY = y; + + // Firefox workaround: get dimensions from parentNode. + const swidth = svg.clientWidth || svg.parentNode.clientWidth; + const sheight = svg.clientHeight || svg.parentNode.clientHeight; + + // Convert deltas from screen space to svg space. + dx *= (svg.viewBox.baseVal.width / swidth); + dy *= (svg.viewBox.baseVal.height / sheight); + + svg.viewBox.baseVal.x -= dx; + svg.viewBox.baseVal.y -= dy; + } + + function handleScanStart(e) { + if (e.button != 0) return; // Do not catch right-clicks etc. + setMode(MOUSEPAN); + panStart(e.clientX, e.clientY); + e.preventDefault(); + svg.addEventListener('mousemove', handleScanMove); + } + + function handleScanMove(e) { + if (e.buttons == 0) { + // Missed an end event, perhaps because mouse moved outside window. + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + return; + } + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + } + + function handleScanEnd(e) { + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + if (!moved) clickHandler(e.target); + } + + // Find touch object with specified identifier. + function findTouch(tlist, id) { + for (const t of tlist) { + if (t.identifier == id) return t; + } + return null; + } + + // Return distance between two touch points + function touchGap(t1, t2) { + const dx = t1.clientX - t2.clientX; + const dy = t1.clientY - t2.clientY; + return Math.hypot(dx, dy); + } + + function handleTouchStart(e) { + if (mode == IDLE && e.changedTouches.length == 1) { + // Start touch based panning + const t = e.changedTouches[0]; + setMode(TOUCHPAN); + touchid = t.identifier; + panStart(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHPAN && e.touches.length == 2) { + // Start pinch zooming + setMode(TOUCHZOOM); + const t1 = e.touches[0]; + const t2 = e.touches[1]; + touchid = t1.identifier; + touchid2 = t2.identifier; + initScale = currentScale; + initGap = touchGap(t1, t2); + centerPoint = toSvg((t1.clientX + t2.clientX) / 2, + (t1.clientY + t2.clientY) / 2); + e.preventDefault(); + } + } + + function handleTouchMove(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + if (e.touches.length != 1) { + setMode(IDLE); + return; + } + panMove(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHZOOM) { + // Get two touches; new gap; rescale to ratio. + const t1 = findTouch(e.touches, touchid); + const t2 = findTouch(e.touches, touchid2); + if (t1 == null || t2 == null) return; + const gap = touchGap(t1, t2); + rescale(initScale * gap / initGap, centerPoint); + e.preventDefault(); + } + } + + function handleTouchEnd(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + panMove(t.clientX, t.clientY); + setMode(IDLE); + e.preventDefault(); + if (!moved) clickHandler(t.target); + } else if (mode == TOUCHZOOM) { + setMode(IDLE); + e.preventDefault(); + } + } + + svg.addEventListener('mousedown', handleScanStart); + svg.addEventListener('mouseup', handleScanEnd); + svg.addEventListener('touchstart', handleTouchStart); + svg.addEventListener('touchmove', handleTouchMove); + svg.addEventListener('touchend', handleTouchEnd); + svg.addEventListener('wheel', handleWheel, true); +} + +function initMenus() { + 'use strict'; + + let activeMenu = null; + let activeMenuHdr = null; + + function cancelActiveMenu() { + if (activeMenu == null) return; + activeMenu.style.display = 'none'; + activeMenu = null; + activeMenuHdr = null; + } + + // Set click handlers on every menu header. + for (const menu of document.getElementsByClassName('submenu')) { + const hdr = menu.parentElement; + if (hdr == null) return; + if (hdr.classList.contains('disabled')) return; + function showMenu(e) { + // menu is a child of hdr, so this event can fire for clicks + // inside menu. Ignore such clicks. + if (e.target.parentElement != hdr) return; + activeMenu = menu; + activeMenuHdr = hdr; + menu.style.display = 'block'; + } + hdr.addEventListener('mousedown', showMenu); + hdr.addEventListener('touchstart', showMenu); + } + + // If there is an active menu and a down event outside, retract the menu. + for (const t of ['mousedown', 'touchstart']) { + document.addEventListener(t, (e) => { + // Note: to avoid unnecessary flicker, if the down event is inside + // the active menu header, do not retract the menu. + if (activeMenuHdr != e.target.closest('.menu-item')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); + } + + // If there is an active menu and an up event inside, retract the menu. + document.addEventListener('mouseup', (e) => { + if (activeMenu == e.target.closest('.submenu')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); +} + +function sendURL(method, url, done) { + fetch(url.toString(), {method: method}) + .then((response) => { done(response.ok); }) + .catch((error) => { done(false); }); +} + +// Initialize handlers for saving/loading configurations. +function initConfigManager() { + 'use strict'; + + // Initialize various elements. + function elem(id) { + const result = document.getElementById(id); + if (!result) console.warn('element ' + id + ' not found'); + return result; + } + const overlay = elem('dialog-overlay'); + const saveDialog = elem('save-dialog'); + const saveInput = elem('save-name'); + const saveError = elem('save-error'); + const delDialog = elem('delete-dialog'); + const delPrompt = elem('delete-prompt'); + const delError = elem('delete-error'); + + let currentDialog = null; + let currentDeleteTarget = null; + + function showDialog(dialog) { + if (currentDialog != null) { + overlay.style.display = 'none'; + currentDialog.style.display = 'none'; + } + currentDialog = dialog; + if (dialog != null) { + overlay.style.display = 'block'; + dialog.style.display = 'block'; + } + } + + function cancelDialog(e) { + showDialog(null); + } + + // Show dialog for saving the current config. + function showSaveDialog(e) { + saveError.innerText = ''; + showDialog(saveDialog); + saveInput.focus(); + } + + // Commit save config. + function commitSave(e) { + const name = saveInput.value; + const url = new URL(document.URL); + // Set path relative to existing path. + url.pathname = new URL('./saveconfig', document.URL).pathname; + url.searchParams.set('config', name); + saveError.innerText = ''; + sendURL('POST', url, (ok) => { + if (!ok) { + saveError.innerText = 'Save failed'; + } else { + showDialog(null); + location.reload(); // Reload to show updated config menu + } + }); + } + + function handleSaveInputKey(e) { + if (e.key === 'Enter') commitSave(e); + } + + function deleteConfig(e, elem) { + e.preventDefault(); + const config = elem.dataset.config; + delPrompt.innerText = 'Delete ' + config + '?'; + currentDeleteTarget = elem; + showDialog(delDialog); + } + + function commitDelete(e, elem) { + if (!currentDeleteTarget) return; + const config = currentDeleteTarget.dataset.config; + const url = new URL('./deleteconfig', document.URL); + url.searchParams.set('config', config); + delError.innerText = ''; + sendURL('DELETE', url, (ok) => { + if (!ok) { + delError.innerText = 'Delete failed'; + return; + } + showDialog(null); + // Remove menu entry for this config. + if (currentDeleteTarget && currentDeleteTarget.parentElement) { + currentDeleteTarget.parentElement.remove(); + } + }); + } + + // Bind event on elem to fn. + function bind(event, elem, fn) { + if (elem == null) return; + elem.addEventListener(event, fn); + if (event == 'click') { + // Also enable via touch. + elem.addEventListener('touchstart', fn); + } + } + + bind('click', elem('save-config'), showSaveDialog); + bind('click', elem('save-cancel'), cancelDialog); + bind('click', elem('save-confirm'), commitSave); + bind('keydown', saveInput, handleSaveInputKey); + + bind('click', elem('delete-cancel'), cancelDialog); + bind('click', elem('delete-confirm'), commitDelete); + + // Activate deletion button for all config entries in menu. + for (const del of Array.from(document.getElementsByClassName('menu-delete-btn'))) { + bind('click', del, (e) => { + deleteConfig(e, del); + }); + } +} + +// options if present can contain: +// hiliter: function(Number, Boolean): Boolean +// Overridable mechanism for highlighting/unhighlighting specified node. +// current: function() Map[Number,Boolean] +// Overridable mechanism for fetching set of currently selected nodes. +function viewer(baseUrl, nodes, options) { + 'use strict'; + + // Elements + const search = document.getElementById('search'); + const graph0 = document.getElementById('graph0'); + const svg = (graph0 == null ? null : graph0.parentElement); + const toptable = document.getElementById('toptable'); + + let regexpActive = false; + let selected = new Map(); + let origFill = new Map(); + let searchAlarm = null; + let buttonsEnabled = true; + + // Return current selection. + function getSelection() { + if (selected.size > 0) { + return selected; + } else if (options && options.current) { + return options.current(); + } + return new Map(); + } + + function handleDetails(e) { + e.preventDefault(); + const detailsText = document.getElementById('detailsbox'); + if (detailsText != null) { + if (detailsText.style.display === 'block') { + detailsText.style.display = 'none'; + } else { + detailsText.style.display = 'block'; + } + } + } + + function handleKey(e) { + if (e.keyCode != 13) return; + setHrefParams(window.location, function (params) { + params.set('f', search.value); + }); + e.preventDefault(); + } + + function handleSearch() { + // Delay expensive processing so a flurry of key strokes is handled once. + if (searchAlarm != null) { + clearTimeout(searchAlarm); + } + searchAlarm = setTimeout(selectMatching, 300); + + regexpActive = true; + updateButtons(); + } + + function selectMatching() { + searchAlarm = null; + let re = null; + if (search.value != '') { + try { + re = new RegExp(search.value); + } catch (e) { + // TODO: Display error state in search box + return; + } + } + + function match(text) { + return re != null && re.test(text); + } + + // drop currently selected items that do not match re. + selected.forEach(function(v, n) { + if (!match(nodes[n])) { + unselect(n); + } + }) + + // add matching items that are not currently selected. + if (nodes) { + for (let n = 0; n < nodes.length; n++) { + if (!selected.has(n) && match(nodes[n])) { + select(n); + } + } + } + + updateButtons(); + } + + function toggleSvgSelect(elem) { + // Walk up to immediate child of graph0 + while (elem != null && elem.parentElement != graph0) { + elem = elem.parentElement; + } + if (!elem) return; + + // Disable regexp mode. + regexpActive = false; + + const n = nodeId(elem); + if (n < 0) return; + if (selected.has(n)) { + unselect(n); + } else { + select(n); + } + updateButtons(); + } + + function unselect(n) { + if (setNodeHighlight(n, false)) selected.delete(n); + } + + function select(n, elem) { + if (setNodeHighlight(n, true)) selected.set(n, true); + } + + function nodeId(elem) { + const id = elem.id; + if (!id) return -1; + if (!id.startsWith('node')) return -1; + const n = parseInt(id.slice(4), 10); + if (isNaN(n)) return -1; + if (n < 0 || n >= nodes.length) return -1; + return n; + } + + // Change highlighting of node (returns true if node was found). + function setNodeHighlight(n, set) { + if (options && options.hiliter) return options.hiliter(n, set); + + const elem = document.getElementById('node' + n); + if (!elem) return false; + + // Handle table row highlighting. + if (elem.nodeName == 'TR') { + elem.classList.toggle('hilite', set); + return true; + } + + // Handle svg element highlighting. + const p = findPolygon(elem); + if (p != null) { + if (set) { + origFill.set(p, p.style.fill); + p.style.fill = '#ccccff'; + } else if (origFill.has(p)) { + p.style.fill = origFill.get(p); + } + } + + return true; + } + + function findPolygon(elem) { + if (elem.localName == 'polygon') return elem; + for (const c of elem.children) { + const p = findPolygon(c); + if (p != null) return p; + } + return null; + } + + // convert a string to a regexp that matches that string. + function quotemeta(str) { + return str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1'); + } + + function setSampleIndexLink(si) { + const elem = document.getElementById('sampletype-' + si); + if (elem != null) { + setHrefParams(elem, function (params) { + params.set("si", si); + }); + } + } + + // Update id's href to reflect current selection whenever it is + // liable to be followed. + function makeSearchLinkDynamic(id) { + const elem = document.getElementById(id); + if (elem == null) return; + + // Most links copy current selection into the 'f' parameter, + // but Refine menu links are different. + let param = 'f'; + if (id == 'ignore') param = 'i'; + if (id == 'hide') param = 'h'; + if (id == 'show') param = 's'; + if (id == 'show-from') param = 'sf'; + + // We update on mouseenter so middle-click/right-click work properly. + elem.addEventListener('mouseenter', updater); + elem.addEventListener('touchstart', updater); + + function updater() { + // The selection can be in one of two modes: regexp-based or + // list-based. Construct regular expression depending on mode. + let re = regexpActive + ? search.value + : Array.from(getSelection().keys()).map(key => quotemeta(nodes[key])).join('|'); + + setHrefParams(elem, function (params) { + if (re != '') { + // For focus/show/show-from, forget old parameter. For others, add to re. + if (param != 'f' && param != 's' && param != 'sf' && params.has(param)) { + const old = params.get(param); + if (old != '') { + re += '|' + old; + } + } + params.set(param, re); + } else { + params.delete(param); + } + }); + } + } + + function setHrefParams(elem, paramSetter) { + let url = new URL(elem.href); + url.hash = ''; + + // Copy params from this page's URL. + const params = url.searchParams; + for (const p of new URLSearchParams(window.location.search)) { + params.set(p[0], p[1]); + } + + // Give the params to the setter to modify. + paramSetter(params); + + elem.href = url.toString(); + } + + function handleTopClick(e) { + // Walk back until we find TR and then get the Name column (index 5) + let elem = e.target; + while (elem != null && elem.nodeName != 'TR') { + elem = elem.parentElement; + } + if (elem == null || elem.children.length < 6) return; + + e.preventDefault(); + const tr = elem; + const td = elem.children[5]; + if (td.nodeName != 'TD') return; + const name = td.innerText; + const index = nodes.indexOf(name); + if (index < 0) return; + + // Disable regexp mode. + regexpActive = false; + + if (selected.has(index)) { + unselect(index, elem); + } else { + select(index, elem); + } + updateButtons(); + } + + function updateButtons() { + const enable = (search.value != '' || getSelection().size != 0); + if (buttonsEnabled == enable) return; + buttonsEnabled = enable; + for (const id of ['focus', 'ignore', 'hide', 'show', 'show-from']) { + const link = document.getElementById(id); + if (link != null) { + link.classList.toggle('disabled', !enable); + } + } + } + + // Initialize button states + updateButtons(); + + // Setup event handlers + initMenus(); + if (svg != null) { + initPanAndZoom(svg, toggleSvgSelect); + } + if (toptable != null) { + toptable.addEventListener('mousedown', handleTopClick); + toptable.addEventListener('touchstart', handleTopClick); + } + + const ids = ['topbtn', 'graphbtn', + 'flamegraph', 'flamegraph2', 'flamegraphold', + 'peek', 'list', + 'disasm', 'focus', 'ignore', 'hide', 'show', 'show-from']; + ids.forEach(makeSearchLinkDynamic); + + const sampleIDs = [{{range .SampleTypes}}'{{.}}', {{end}}]; + sampleIDs.forEach(setSampleIndexLink); + + // Bind action to button with specified id. + function addAction(id, action) { + const btn = document.getElementById(id); + if (btn != null) { + btn.addEventListener('click', action); + btn.addEventListener('touchstart', action); + } + } + + addAction('details', handleDetails); + initConfigManager(); + + search.addEventListener('input', handleSearch); + search.addEventListener('keydown', handleKey); + + // Give initial focus to main container so it can be scrolled using keys. + const main = document.getElementById('bodycontainer'); + if (main) { + main.focus(); + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html new file mode 100644 index 0000000000000000000000000000000000000000..9866755bcd7e9ac3ecd4078fb4363163af810c03 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html @@ -0,0 +1,103 @@ + + + + + {{.Title}} + {{template "css" .}} + + + + + {{template "header" .}} +
+
+
+
+
+
+ {{template "script" .}} + + + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html new file mode 100644 index 0000000000000000000000000000000000000000..a113549fc4c7a82ca073ef9083f45856016f4cdc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html @@ -0,0 +1,16 @@ + + + + + {{.Title}} + {{template "css" .}} + + + {{template "header" .}} +
+ {{.HTMLBody}} +
+ {{template "script" .}} + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html new file mode 100644 index 0000000000000000000000000000000000000000..42cb7960e6325d6d313ab276ea89c6579ffae487 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html @@ -0,0 +1,114 @@ +
+
+

pprof

+
+ + + + {{$sampleLen := len .SampleTypes}} + {{if gt $sampleLen 1}} + + {{end}} + + + + + + + +
+ +
+ +
+ {{.Title}} +
+ {{range .Legend}}
{{.}}
{{end}} +
+
+
+ +
+ +
+
Save options as
+ + {{range .Configs}}{{if .UserConfig}} + + +
+ +
+
Delete config
+
+ +
+ +
{{range .Errors}}
{{.}}
{{end}}
diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html new file mode 100644 index 0000000000000000000000000000000000000000..9791cc7718981c238224b2fb3459e5df813611f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html @@ -0,0 +1,18 @@ + + + + + {{.Title}} + {{template "css" .}} + + + {{template "header" .}} +
+
+      {{.TextBody}}
+    
+
+ {{template "script" .}} + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html new file mode 100644 index 0000000000000000000000000000000000000000..3212bee4a084d4a2d402719dc888c87e0b4cea99 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html @@ -0,0 +1,18 @@ + + + + + {{.Title}} + {{template "css" .}} + {{template "weblistcss" .}} + {{template "weblistjs" .}} + + + {{template "header" .}} +
+ {{.HTMLBody}} +
+ {{template "script" .}} + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css new file mode 100644 index 0000000000000000000000000000000000000000..f5aeb9857a6d54f46bd90d129c0f49e4a989df6a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css @@ -0,0 +1,78 @@ +body { + overflow: hidden; /* Want scrollbar not here, but in #stack-holder */ +} +/* Scrollable container for flame graph */ +#stack-holder { + width: 100%; + flex-grow: 1; + overflow-y: auto; + background: #eee; /* Light grey gives better contrast with boxes */ + position: relative; /* Allows absolute positioning of child boxes */ +} +/* Flame graph */ +#stack-chart { + width: 100%; + position: relative; /* Allows absolute positioning of child boxes */ +} +/* Shows details of frame that is under the mouse */ +#current-details { + position: absolute; + top: 5px; + right: 5px; + z-index: 2; + font-size: 12pt; +} +/* Background of a single flame-graph frame */ +.boxbg { + border-width: 0px; + position: absolute; + overflow: hidden; + box-sizing: border-box; + background: #d8d8d8; +} +.positive { position: absolute; background: #caa; } +.negative { position: absolute; background: #aca; } +/* Not-inlined frames are visually separated from their caller. */ +.not-inlined { + border-top: 1px solid black; +} +/* Function name */ +.boxtext { + position: absolute; + width: 100%; + padding-left: 2px; + line-height: 18px; + cursor: default; + font-family: "Google Sans", Arial, sans-serif; + font-size: 12pt; + z-index: 2; +} +/* Box highlighting via shadows to avoid size changes */ +.hilite { box-shadow: 0px 0px 0px 2px #000; z-index: 1; } +.hilite2 { box-shadow: 0px 0px 0px 2px #000; z-index: 1; } +/* Gap left between callers and callees */ +.separator { + position: absolute; + text-align: center; + font-size: 12pt; + font-weight: bold; +} +/* Ensure that pprof menu is above boxes */ +.submenu { z-index: 3; } +/* Right-click menu */ +#action-menu { + max-width: 15em; +} +/* Right-click menu title */ +#action-title { + display: block; + padding: 0.5em 1em; + background: #888; + text-overflow: ellipsis; + overflow: hidden; +} +/* Internal canvas used to measure text size when picking fonts */ +#textsizer { + position: absolute; + bottom: -100px; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.html new file mode 100644 index 0000000000000000000000000000000000000000..1ddb7a3a1cfcf35daefea3ed2be7c0ab3e46a0c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.html @@ -0,0 +1,32 @@ + + + + + {{.Title}} + {{template "css" .}} + {{template "stacks_css"}} + + + {{template "header" .}} +
+
+
+
+ + {{template "script" .}} + {{template "stacks_js"}} + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js new file mode 100644 index 0000000000000000000000000000000000000000..be78edd553f7456aff9c8ef2c4acfad28f68e155 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js @@ -0,0 +1,600 @@ +// stackViewer displays a flame-graph like view (extended to show callers). +// stacks - report.StackSet +// nodes - List of names for each source in report.StackSet +function stackViewer(stacks, nodes) { + 'use strict'; + + // Constants used in rendering. + const ROW = 20; + const PADDING = 2; + const MIN_WIDTH = 4; + const MIN_TEXT_WIDTH = 16; + const TEXT_MARGIN = 2; + const FONT_SIZE = 12; + const MIN_FONT_SIZE = 8; + + // Mapping from unit to a list of display scales/labels. + // List should be ordered by increasing unit size. + const UNITS = new Map([ + ['B', [ + ['B', 1], + ['kB', Math.pow(2, 10)], + ['MB', Math.pow(2, 20)], + ['GB', Math.pow(2, 30)], + ['TB', Math.pow(2, 40)], + ['PB', Math.pow(2, 50)]]], + ['s', [ + ['ns', 1e-9], + ['µs', 1e-6], + ['ms', 1e-3], + ['s', 1], + ['hrs', 60*60]]]]); + + // Fields + let pivots = []; // Indices of currently selected data.Sources entries. + let matches = new Set(); // Indices of sources that match search + let elems = new Map(); // Mapping from source index to display elements + let displayList = []; // List of boxes to display. + let actionMenuOn = false; // Is action menu visible? + let actionTarget = null; // Box on which action menu is operating. + let diff = false; // Are we displaying a diff? + + for (const stack of stacks.Stacks) { + if (stack.Value < 0) { + diff = true; + break; + } + } + + // Setup to allow measuring text width. + const textSizer = document.createElement('canvas'); + textSizer.id = 'textsizer'; + const textContext = textSizer.getContext('2d'); + + // Get DOM elements. + const chart = find('stack-chart'); + const search = find('search'); + const actions = find('action-menu'); + const actionTitle = find('action-title'); + const detailBox = find('current-details'); + + window.addEventListener('resize', render); + window.addEventListener('popstate', render); + search.addEventListener('keydown', handleSearchKey); + + // Withdraw action menu when clicking outside, or when item selected. + document.addEventListener('mousedown', (e) => { + if (!actions.contains(e.target)) { + hideActionMenu(); + } + }); + actions.addEventListener('click', hideActionMenu); + + // Initialize menus and other general UI elements. + viewer(new URL(window.location.href), nodes, { + hiliter: (n, on) => { return hilite(n, on); }, + current: () => { + let r = new Map(); + for (let p of pivots) { + r.set(p, true); + } + return r; + }}); + + render(); + + // Helper functions follow: + + // hilite changes the highlighting of elements corresponding to specified src. + function hilite(src, on) { + if (on) { + matches.add(src); + } else { + matches.delete(src); + } + toggleClass(src, 'hilite', on); + return true; + } + + // Display action menu (triggered by right-click on a frame) + function showActionMenu(e, box) { + if (box.src == 0) return; // No action menu for root + e.preventDefault(); // Disable browser context menu + const src = stacks.Sources[box.src]; + actionTitle.innerText = src.Display[src.Display.length-1]; + const menu = actions; + menu.style.display = 'block'; + // Compute position so menu stays visible and near the mouse. + const x = Math.min(e.clientX - 10, document.body.clientWidth - menu.clientWidth); + const y = Math.min(e.clientY - 10, document.body.clientHeight - menu.clientHeight); + menu.style.left = x + 'px'; + menu.style.top = y + 'px'; + // Set menu links to operate on clicked box. + setHrefParam('action-source', 'f', box.src); + setHrefParam('action-source-tab', 'f', box.src); + setHrefParam('action-focus', 'f', box.src); + setHrefParam('action-ignore', 'i', box.src); + setHrefParam('action-hide', 'h', box.src); + setHrefParam('action-showfrom', 'sf', box.src); + toggleClass(box.src, 'hilite2', true); + actionTarget = box; + actionMenuOn = true; + } + + function hideActionMenu() { + actions.style.display = 'none'; + actionMenuOn = false; + if (actionTarget != null) { + toggleClass(actionTarget.src, 'hilite2', false); + } + } + + // setHrefParam updates the specified parameter in the href of an + // element to make it operate on the specified src. + function setHrefParam(id, param, src) { + const elem = document.getElementById(id); + if (!elem) return; + + let url = new URL(elem.href); + url.hash = ''; + + // Copy params from this page's URL. + const params = url.searchParams; + for (const p of new URLSearchParams(window.location.search)) { + params.set(p[0], p[1]); + } + + // Update params to include src. + let v = stacks.Sources[src].RE; + if (param != 'f' && param != 'sf') { // old f,sf values are overwritten + // Add new source to current parameter value. + const old = params.get(param); + if (old && old != '') { + v += '|' + old; + } + } + params.set(param, v); + + elem.href = url.toString(); + } + + // Capture Enter key in the search box to make it pivot instead of focus. + function handleSearchKey(e) { + if (e.key != 'Enter') return; + e.stopImmediatePropagation(); // Disable normal enter key handling + const val = search.value; + try { + new RegExp(search.value); + } catch (error) { + return; // TODO: Display error state in search box + } + switchPivots(val); + } + + function switchPivots(regexp) { + // Switch URL without hitting the server. + const url = new URL(document.URL); + url.searchParams.set('p', regexp); + history.pushState('', '', url.toString()); // Makes back-button work + matches = new Set(); + search.value = ''; + render(); + } + + function handleEnter(box, div) { + if (actionMenuOn) return; + const src = stacks.Sources[box.src]; + div.title = details(box) + ' │ ' + src.FullName + (src.Inlined ? "\n(inlined)" : ""); + detailBox.innerText = summary(box.sumpos, box.sumneg); + // Highlight all boxes that have the same source as box. + toggleClass(box.src, 'hilite2', true); + } + + function handleLeave(box) { + if (actionMenuOn) return; + detailBox.innerText = ''; + toggleClass(box.src, 'hilite2', false); + } + + // Return list of sources that match the regexp given by the 'p' URL parameter. + function urlPivots() { + const pivots = []; + const params = (new URL(document.URL)).searchParams; + const val = params.get('p'); + if (val !== null && val != '') { + try { + const re = new RegExp(val); + for (let i = 0; i < stacks.Sources.length; i++) { + const src = stacks.Sources[i]; + if (re.test(src.UniqueName) || re.test(src.FileName)) { + pivots.push(i); + } + } + } catch (error) {} + } + if (pivots.length == 0) { + pivots.push(0); + } + return pivots; + } + + // render re-generates the stack display. + function render() { + pivots = urlPivots(); + + // Get places where pivots occur. + let places = []; + for (let pivot of pivots) { + const src = stacks.Sources[pivot]; + for (let p of src.Places) { + places.push(p); + } + } + + const width = chart.clientWidth; + elems.clear(); + actionTarget = null; + const [pos, neg] = totalValue(places); + const total = pos + neg; + const xscale = (width-2*PADDING) / total; // Converts from profile value to X pixels + const x = PADDING; + const y = 0; + + displayList.length = 0; + renderStacks(0, xscale, x, y, places, +1); // Callees + renderStacks(0, xscale, x, y-ROW, places, -1); // Callers (ROW left for separator) + display(xscale, pos, neg, displayList); + } + + // renderStacks creates boxes with top-left at x,y with children drawn as + // nested stacks (below or above based on the sign of direction). + // Returns the largest y coordinate filled. + function renderStacks(depth, xscale, x, y, places, direction) { + // Example: suppose we are drawing the following stacks: + // a->b->c + // a->b->d + // a->e->f + // After rendering a, we will call renderStacks, with places pointing to + // the preceding stacks. + // + // We first group all places with the same leading entry. In this example + // we get [b->c, b->d] and [e->f]. We render the two groups side-by-side. + const groups = partitionPlaces(places); + for (const g of groups) { + renderGroup(depth, xscale, x, y, g, direction); + x += groupWidth(xscale, g); + } + } + + // Some of the types used below: + // + // // Group represents a displayed (sub)tree. + // interface Group { + // name: string; // Full name of source + // src: number; // Index in stacks.Sources + // self: number; // Contribution as leaf (may be < 0 for diffs) + // sumpos: number; // Sum of |self| of positive nodes in tree (>= 0) + // sumneg: number; // Sum of |self| of negative nodes in tree (>= 0) + // places: Place[]; // Stack slots that contributed to this group + // } + // + // // Box is a rendered item. + // interface Box { + // x: number; // X coordinate of top-left + // y: number; // Y coordinate of top-left + // width: number; // Width of box to display + // src: number; // Index in stacks.Sources + // sumpos: number; // From corresponding Group + // sumneg: number; // From corresponding Group + // self: number; // From corresponding Group + // }; + + function groupWidth(xscale, g) { + return xscale * (g.sumpos + g.sumneg); + } + + function renderGroup(depth, xscale, x, y, g, direction) { + // Skip if not wide enough. + const width = groupWidth(xscale, g); + if (width < MIN_WIDTH) return; + + // Draw the box for g.src (except for selected element in upwards direction + // since that duplicates the box we added in downwards direction). + if (depth != 0 || direction > 0) { + const box = { + x: x, + y: y, + width: width, + src: g.src, + sumpos: g.sumpos, + sumneg: g.sumneg, + self: g.self, + }; + displayList.push(box); + if (direction > 0) { + // Leave gap on left hand side to indicate self contribution. + x += xscale*Math.abs(g.self); + } + } + y += direction * ROW; + + // Find child or parent stacks. + const next = []; + for (const place of g.places) { + const stack = stacks.Stacks[place.Stack]; + const nextSlot = place.Pos + direction; + if (nextSlot >= 0 && nextSlot < stack.Sources.length) { + next.push({Stack: place.Stack, Pos: nextSlot}); + } + } + renderStacks(depth+1, xscale, x, y, next, direction); + } + + // partitionPlaces partitions a set of places into groups where each group + // contains places with the same source. If a stack occurs multiple times + // in places, only the outer-most occurrence is kept. + function partitionPlaces(places) { + // Find outer-most slot per stack (used later to elide duplicate stacks). + const stackMap = new Map(); // Map from stack index to outer-most slot# + for (const place of places) { + const prevSlot = stackMap.get(place.Stack); + if (prevSlot && prevSlot <= place.Pos) { + // We already have a higher slot in this stack. + } else { + stackMap.set(place.Stack, place.Pos); + } + } + + // Now partition the stacks. + const groups = []; // Array of Group {name, src, sum, self, places} + const groupMap = new Map(); // Map from Source to Group + for (const place of places) { + if (stackMap.get(place.Stack) != place.Pos) { + continue; + } + + const stack = stacks.Stacks[place.Stack]; + const src = stack.Sources[place.Pos]; + let group = groupMap.get(src); + if (!group) { + const name = stacks.Sources[src].FullName; + group = {name: name, src: src, sumpos: 0, sumneg: 0, self: 0, places: []}; + groupMap.set(src, group); + groups.push(group); + } + if (stack.Value < 0) { + group.sumneg += -stack.Value; + } else { + group.sumpos += stack.Value; + } + group.self += (place.Pos == stack.Sources.length-1) ? stack.Value : 0; + group.places.push(place); + } + + // Order by decreasing cost (makes it easier to spot heavy functions). + // Though alphabetical ordering is a potential alternative that will make + // profile comparisons easier. + groups.sort(function(a, b) { + return (b.sumpos + b.sumneg) - (a.sumpos + a.sumneg); + }); + + return groups; + } + + function display(xscale, posTotal, negTotal, list) { + // Sort boxes so that text selection follows a predictable order. + list.sort(function(a, b) { + if (a.y != b.y) return a.y - b.y; + return a.x - b.x; + }); + + // Adjust Y coordinates so that zero is at top. + let adjust = (list.length > 0) ? list[0].y : 0; + adjust -= ROW + 2*PADDING; // Room for details + + const divs = []; + for (const box of list) { + box.y -= adjust; + divs.push(drawBox(xscale, box)); + } + divs.push(drawSep(-adjust, posTotal, negTotal)); + + const h = (list.length > 0 ? list[list.length-1].y : 0) + 4*ROW; + chart.style.height = h+'px'; + chart.replaceChildren(...divs); + } + + function drawBox(xscale, box) { + const srcIndex = box.src; + const src = stacks.Sources[srcIndex]; + + function makeRect(cl, x, y, w, h) { + const r = document.createElement('div'); + r.style.left = x+'px'; + r.style.top = y+'px'; + r.style.width = w+'px'; + r.style.height = h+'px'; + r.classList.add(cl); + return r; + } + + // Background + const w = box.width - 1; // Leave 1px gap + const r = makeRect('boxbg', box.x, box.y, w, ROW); + if (!diff) r.style.background = makeColor(src.Color); + addElem(srcIndex, r); + if (!src.Inlined) { + r.classList.add('not-inlined'); + } + + // Positive/negative indicator for diff mode. + if (diff) { + const delta = box.sumpos - box.sumneg; + const partWidth = xscale * Math.abs(delta); + if (partWidth >= MIN_WIDTH) { + r.appendChild(makeRect((delta < 0 ? 'negative' : 'positive'), + 0, 0, partWidth, ROW-1)); + } + } + + // Label + if (box.width >= MIN_TEXT_WIDTH) { + const t = document.createElement('div'); + t.classList.add('boxtext'); + fitText(t, box.width-2*TEXT_MARGIN, src.Display); + r.appendChild(t); + } + + r.addEventListener('click', () => { switchPivots(src.RE); }); + r.addEventListener('mouseenter', () => { handleEnter(box, r); }); + r.addEventListener('mouseleave', () => { handleLeave(box); }); + r.addEventListener('contextmenu', (e) => { showActionMenu(e, box); }); + return r; + } + + function drawSep(y, posTotal, negTotal) { + const m = document.createElement('div'); + m.innerText = summary(posTotal, negTotal); + m.style.top = (y-ROW) + 'px'; + m.style.left = PADDING + 'px'; + m.style.width = (chart.clientWidth - PADDING*2) + 'px'; + m.classList.add('separator'); + return m; + } + + // addElem registers an element that belongs to the specified src. + function addElem(src, elem) { + let list = elems.get(src); + if (!list) { + list = []; + elems.set(src, list); + } + list.push(elem); + elem.classList.toggle('hilite', matches.has(src)); + } + + // Adds or removes cl from classList of all elements for the specified source. + function toggleClass(src, cl, value) { + const list = elems.get(src); + if (list) { + for (const elem of list) { + elem.classList.toggle(cl, value); + } + } + } + + // fitText sets text and font-size clipped to the specified width w. + function fitText(t, avail, textList) { + // Find first entry in textList that fits. + let width = avail; + textContext.font = FONT_SIZE + 'pt Arial'; + for (let i = 0; i < textList.length; i++) { + let text = textList[i]; + width = textContext.measureText(text).width; + if (width <= avail) { + t.innerText = text; + return; + } + } + + // Try to fit by dropping font size. + let text = textList[textList.length-1]; + const fs = Math.max(MIN_FONT_SIZE, FONT_SIZE * (avail / width)); + t.style.fontSize = fs + 'pt'; + t.innerText = text; + } + + // totalValue returns the positive and negative sums of the Values of stacks + // listed in places. + function totalValue(places) { + const seen = new Set(); + let pos = 0; + let neg = 0; + for (const place of places) { + if (seen.has(place.Stack)) continue; // Do not double-count stacks + seen.add(place.Stack); + const stack = stacks.Stacks[place.Stack]; + if (stack.Value < 0) { + neg += -stack.Value; + } else { + pos += stack.Value; + } + } + return [pos, neg]; + } + + function summary(pos, neg) { + // Examples: + // 6s (10%) + // 12s (20%) 🠆 18s (30%) + return diff ? diffText(neg, pos) : percentText(pos); + } + + function details(box) { + // Examples: + // 6s (10%) + // 6s (10%) │ self 3s (5%) + // 6s (10%) │ 12s (20%) 🠆 18s (30%) + let result = percentText(box.sumpos - box.sumneg); + if (box.self != 0) { + result += " │ self " + unitText(box.self); + } + if (diff && box.sumpos > 0 && box.sumneg > 0) { + result += " │ " + diffText(box.sumneg, box.sumpos); + } + return result; + } + + // diffText returns text that displays from and to alongside their percentages. + // E.g., 9s (45%) 🠆 10s (50%) + function diffText(from, to) { + return percentText(from) + " 🠆 " + percentText(to); + } + + // percentText returns text that displays v in appropriate units alongside its + // percentange. + function percentText(v) { + function percent(v, total) { + return Number(((100.0 * v) / total).toFixed(1)) + '%'; + } + return unitText(v) + " (" + percent(v, stacks.Total) + ")"; + } + + // unitText returns a formatted string to display for value. + function unitText(value) { + const sign = (value < 0) ? "-" : ""; + let v = Math.abs(value) * stacks.Scale; + // Rescale to appropriate display unit. + let unit = stacks.Unit; + const list = UNITS.get(unit); + if (list) { + // Find first entry in list that is not too small. + for (const [name, scale] of list) { + if (v <= 100*scale) { + v /= scale; + unit = name; + break; + } + } + } + return sign + Number(v.toFixed(2)) + unit; + } + + function find(name) { + const elem = document.getElementById(name); + if (!elem) { + throw 'element not found: ' + name + } + return elem; + } + + function makeColor(index) { + // Rotate hue around a circle. Multiple by phi to spread things + // out better. Use 50% saturation to make subdued colors, and + // 80% lightness to have good contrast with black foreground text. + const PHI = 1.618033988; + const hue = (index+1) * PHI * 2 * Math.PI; // +1 to avoid 0 + const hsl = `hsl(${hue}rad 50% 80%)`; + return hsl; + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html new file mode 100644 index 0000000000000000000000000000000000000000..86d9fcbdb0d62f2cb97d280d26e7f65fde10dc02 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html @@ -0,0 +1,114 @@ + + + + + {{.Title}} + {{template "css" .}} + + + + {{template "header" .}} +
+
WhenElapsedGoroutineEvents
{{$el.WhenString}}{{$el.Duration}} + Task {{$el.ID}} + (goroutine view) + ({{if .Complete}}complete{{else}}incomplete{{end}}) +
{{.WhenString}}{{elapsed .Elapsed}}{{.Goroutine}}{{.What}}
+ + + + + + + + + + + + +
FlatFlat%Sum%CumCum%NameInlined?
+ + {{template "script" .}} + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go new file mode 100644 index 0000000000000000000000000000000000000000..e6e865f385d7083503f012879e9dc4c4689bf202 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go @@ -0,0 +1,419 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +var commentStart = "//:" // Sentinel for comments on options +var tailDigitsRE = regexp.MustCompile("[0-9]+$") + +// interactive starts a shell to read pprof commands. +func interactive(p *profile.Profile, o *plugin.Options) error { + // Enter command processing loop. + o.UI.SetAutoComplete(newCompleter(functionNames(p))) + configure("compact_labels", "true") + configHelp["sample_index"] += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p)) + + // Do not wait for the visualizer to complete, to allow multiple + // graphs to be visualized simultaneously. + interactiveMode = true + shortcuts := profileShortcuts(p) + + copier := makeProfileCopier(p) + greetings(p, o.UI) + for { + input, err := o.UI.ReadLine("(pprof) ") + if err != nil { + if err != io.EOF { + return err + } + if input == "" { + return nil + } + } + + for _, input := range shortcuts.expand(input) { + // Process assignments of the form variable=value + if s := strings.SplitN(input, "=", 2); len(s) > 0 { + name := strings.TrimSpace(s[0]) + var value string + if len(s) == 2 { + value = s[1] + if comment := strings.LastIndex(value, commentStart); comment != -1 { + value = value[:comment] + } + value = strings.TrimSpace(value) + } + if isConfigurable(name) { + // All non-bool options require inputs + if len(s) == 1 && !isBoolConfig(name) { + o.UI.PrintErr(fmt.Errorf("please specify a value, e.g. %s=", name)) + continue + } + if name == "sample_index" { + // Error check sample_index=xxx to ensure xxx is a valid sample type. + index, err := p.SampleIndexByName(value) + if err != nil { + o.UI.PrintErr(err) + continue + } + if index < 0 || index >= len(p.SampleType) { + o.UI.PrintErr(fmt.Errorf("invalid sample_index %q", value)) + continue + } + value = p.SampleType[index].Type + } + if err := configure(name, value); err != nil { + o.UI.PrintErr(err) + } + continue + } + } + + tokens := strings.Fields(input) + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "o", "options": + printCurrentOptions(p, o.UI) + continue + case "exit", "quit", "q": + return nil + case "help": + commandHelp(strings.Join(tokens[1:], " "), o.UI) + continue + } + + args, cfg, err := parseCommandLine(tokens) + if err == nil { + err = generateReportWrapper(copier.newCopy(), args, cfg, o) + } + + if err != nil { + o.UI.PrintErr(err) + } + } + } +} + +var generateReportWrapper = generateReport // For testing purposes. + +// greetings prints a brief welcome and some overall profile +// information before accepting interactive commands. +func greetings(p *profile.Profile, ui plugin.UI) { + numLabelUnits := identifyNumLabelUnits(p, ui) + ropt, err := reportOptions(p, numLabelUnits, currentConfig()) + if err == nil { + rpt := report.New(p, ropt) + ui.Print(strings.Join(report.ProfileLabels(rpt), "\n")) + if rpt.Total() == 0 && len(p.SampleType) > 1 { + ui.Print(`No samples were found with the default sample value type.`) + ui.Print(`Try "sample_index" command to analyze different sample values.`, "\n") + } + } + ui.Print(`Entering interactive mode (type "help" for commands, "o" for options)`) +} + +// shortcuts represents composite commands that expand into a sequence +// of other commands. +type shortcuts map[string][]string + +func (a shortcuts) expand(input string) []string { + input = strings.TrimSpace(input) + if a != nil { + if r, ok := a[input]; ok { + return r + } + } + return []string{input} +} + +var pprofShortcuts = shortcuts{ + ":": []string{"focus=", "ignore=", "hide=", "tagfocus=", "tagignore="}, +} + +// profileShortcuts creates macros for convenience and backward compatibility. +func profileShortcuts(p *profile.Profile) shortcuts { + s := pprofShortcuts + // Add shortcuts for sample types + for _, st := range p.SampleType { + command := fmt.Sprintf("sample_index=%s", st.Type) + s[st.Type] = []string{command} + s["total_"+st.Type] = []string{"mean=0", command} + s["mean_"+st.Type] = []string{"mean=1", command} + } + return s +} + +func sampleTypes(p *profile.Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} + +func printCurrentOptions(p *profile.Profile, ui plugin.UI) { + var args []string + current := currentConfig() + for _, f := range configFields { + n := f.name + v := current.get(f) + comment := "" + switch { + case len(f.choices) > 0: + values := append([]string{}, f.choices...) + sort.Strings(values) + comment = "[" + strings.Join(values, " | ") + "]" + case n == "sample_index": + st := sampleTypes(p) + if v == "" { + // Apply default (last sample index). + v = st[len(st)-1] + } + // Add comments for all sample types in profile. + comment = "[" + strings.Join(st, " | ") + "]" + case n == "source_path": + continue + case n == "nodecount" && v == "-1": + comment = "default" + case v == "": + // Add quotes for empty values. + v = `""` + } + if comment != "" { + comment = commentStart + " " + comment + } + args = append(args, fmt.Sprintf(" %-25s = %-20s %s", n, v, comment)) + } + sort.Strings(args) + ui.Print(strings.Join(args, "\n")) +} + +// parseCommandLine parses a command and returns the pprof command to +// execute and the configuration to use for the report. +func parseCommandLine(input []string) ([]string, config, error) { + cmd, args := input[:1], input[1:] + name := cmd[0] + + c := pprofCommands[name] + if c == nil { + // Attempt splitting digits on abbreviated commands (eg top10) + if d := tailDigitsRE.FindString(name); d != "" && d != name { + name = name[:len(name)-len(d)] + cmd[0], args = name, append([]string{d}, args...) + c = pprofCommands[name] + } + } + if c == nil { + if _, ok := configHelp[name]; ok { + value := "" + if len(args) > 0 { + value = args[0] + } + return nil, config{}, fmt.Errorf("did you mean: %s=%s", name, value) + } + return nil, config{}, fmt.Errorf("unrecognized command: %q", name) + } + + if c.hasParam { + if len(args) == 0 { + return nil, config{}, fmt.Errorf("command %s requires an argument", name) + } + cmd = append(cmd, args[0]) + args = args[1:] + } + + // Copy config since options set in the command line should not persist. + vcopy := currentConfig() + + var focus, ignore string + for i := 0; i < len(args); i++ { + t := args[i] + if n, err := strconv.ParseInt(t, 10, 32); err == nil { + vcopy.NodeCount = int(n) + continue + } + switch t[0] { + case '>': + outputFile := t[1:] + if outputFile == "" { + i++ + if i >= len(args) { + return nil, config{}, fmt.Errorf("unexpected end of line after >") + } + outputFile = args[i] + } + vcopy.Output = outputFile + case '-': + if t == "--cum" || t == "-cum" { + vcopy.Sort = "cum" + continue + } + ignore = catRegex(ignore, t[1:]) + default: + focus = catRegex(focus, t) + } + } + + if name == "tags" { + if focus != "" { + vcopy.TagFocus = focus + } + if ignore != "" { + vcopy.TagIgnore = ignore + } + } else { + if focus != "" { + vcopy.Focus = focus + } + if ignore != "" { + vcopy.Ignore = ignore + } + } + if vcopy.NodeCount == -1 && (name == "text" || name == "top") { + vcopy.NodeCount = 10 + } + + return cmd, vcopy, nil +} + +func catRegex(a, b string) string { + if a != "" && b != "" { + return a + "|" + b + } + return a + b +} + +// commandHelp displays help and usage information for all Commands +// and Variables or a specific Command or Variable. +func commandHelp(args string, ui plugin.UI) { + if args == "" { + help := usage(false) + help = help + ` + : Clear focus/ignore/hide/tagfocus/tagignore + + type "help " for more information +` + + ui.Print(help) + return + } + + if c := pprofCommands[args]; c != nil { + ui.Print(c.help(args)) + return + } + + if help, ok := configHelp[args]; ok { + ui.Print(help + "\n") + return + } + + ui.PrintErr("Unknown command: " + args) +} + +// newCompleter creates an autocompletion function for a set of commands. +func newCompleter(fns []string) func(string) string { + return func(line string) string { + switch tokens := strings.Fields(line); len(tokens) { + case 0: + // Nothing to complete + case 1: + // Single token -- complete command name + if match := matchVariableOrCommand(tokens[0]); match != "" { + return match + } + case 2: + if tokens[0] == "help" { + if match := matchVariableOrCommand(tokens[1]); match != "" { + return tokens[0] + " " + match + } + return line + } + fallthrough + default: + // Multiple tokens -- complete using functions, except for tags + if cmd := pprofCommands[tokens[0]]; cmd != nil && tokens[0] != "tags" { + lastTokenIdx := len(tokens) - 1 + lastToken := tokens[lastTokenIdx] + if strings.HasPrefix(lastToken, "-") { + lastToken = "-" + functionCompleter(lastToken[1:], fns) + } else { + lastToken = functionCompleter(lastToken, fns) + } + return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ") + } + } + return line + } +} + +// matchVariableOrCommand attempts to match a string token to the prefix of a Command. +func matchVariableOrCommand(token string) string { + token = strings.ToLower(token) + var matches []string + for cmd := range pprofCommands { + if strings.HasPrefix(cmd, token) { + matches = append(matches, cmd) + } + } + matches = append(matches, completeConfig(token)...) + if len(matches) == 1 { + return matches[0] + } + return "" +} + +// functionCompleter replaces provided substring with a function +// name retrieved from a profile if a single match exists. Otherwise, +// it returns unchanged substring. It defaults to no-op if the profile +// is not specified. +func functionCompleter(substring string, fns []string) string { + found := "" + for _, fName := range fns { + if strings.Contains(fName, substring) { + if found != "" { + return substring + } + found = fName + } + } + if found != "" { + return found + } + return substring +} + +func functionNames(p *profile.Profile) []string { + var fns []string + for _, fn := range p.Function { + fns = append(fns, fn.Name) + } + return fns +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go new file mode 100644 index 0000000000000000000000000000000000000000..6e8f9fca254937932a288f5f136384919595bf5b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/symbolizer" + "github.com/google/pprof/internal/transport" +) + +// setDefaults returns a new plugin.Options with zero fields sets to +// sensible defaults. +func setDefaults(o *plugin.Options) *plugin.Options { + d := &plugin.Options{} + if o != nil { + *d = *o + } + if d.Writer == nil { + d.Writer = oswriter{} + } + if d.Flagset == nil { + d.Flagset = &GoFlags{} + } + if d.Obj == nil { + d.Obj = &binutils.Binutils{} + } + if d.UI == nil { + d.UI = &stdUI{r: bufio.NewReader(os.Stdin)} + } + if d.HTTPTransport == nil { + d.HTTPTransport = transport.New(d.Flagset) + } + if d.Sym == nil { + d.Sym = &symbolizer.Symbolizer{Obj: d.Obj, UI: d.UI, Transport: d.HTTPTransport} + } + return d +} + +type stdUI struct { + r *bufio.Reader +} + +func (ui *stdUI) ReadLine(prompt string) (string, error) { + os.Stdout.WriteString(prompt) + return ui.r.ReadString('\n') +} + +func (ui *stdUI) Print(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) PrintErr(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) IsTerminal() bool { + return false +} + +func (ui *stdUI) WantBrowser() bool { + return true +} + +func (ui *stdUI) SetAutoComplete(func(string) string) { +} + +func (ui *stdUI) fprint(f *os.File, args []interface{}) { + text := fmt.Sprint(args...) + if !strings.HasSuffix(text, "\n") { + text += "\n" + } + f.WriteString(text) +} + +// oswriter implements the Writer interface using a regular file. +type oswriter struct{} + +func (oswriter) Open(name string) (io.WriteCloser, error) { + f, err := os.Create(name) + return f, err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go new file mode 100644 index 0000000000000000000000000000000000000000..b784618aca9b33523baaf7a4c660c14199c53f3b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go @@ -0,0 +1,158 @@ +package driver + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" +) + +// settings holds pprof settings. +type settings struct { + // Configs holds a list of named UI configurations. + Configs []namedConfig `json:"configs"` +} + +// namedConfig associates a name with a config. +type namedConfig struct { + Name string `json:"name"` + config +} + +// settingsFileName returns the name of the file where settings should be saved. +func settingsFileName() (string, error) { + // Return "pprof/settings.json" under os.UserConfigDir(). + dir, err := os.UserConfigDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "pprof", "settings.json"), nil +} + +// readSettings reads settings from fname. +func readSettings(fname string) (*settings, error) { + data, err := os.ReadFile(fname) + if err != nil { + if os.IsNotExist(err) { + return &settings{}, nil + } + return nil, fmt.Errorf("could not read settings: %w", err) + } + settings := &settings{} + if err := json.Unmarshal(data, settings); err != nil { + return nil, fmt.Errorf("could not parse settings: %w", err) + } + for i := range settings.Configs { + settings.Configs[i].resetTransient() + } + return settings, nil +} + +// writeSettings saves settings to fname. +func writeSettings(fname string, settings *settings) error { + data, err := json.MarshalIndent(settings, "", " ") + if err != nil { + return fmt.Errorf("could not encode settings: %w", err) + } + + // create the settings directory if it does not exist + // XDG specifies permissions 0700 when creating settings dirs: + // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return fmt.Errorf("failed to create settings directory: %w", err) + } + + if err := os.WriteFile(fname, data, 0644); err != nil { + return fmt.Errorf("failed to write settings: %w", err) + } + return nil +} + +// configMenuEntry holds information for a single config menu entry. +type configMenuEntry struct { + Name string + URL string + Current bool // Is this the currently selected config? + UserConfig bool // Is this a user-provided config? +} + +// configMenu returns a list of items to add to a menu in the web UI. +func configMenu(fname string, u url.URL) []configMenuEntry { + // Start with system configs. + configs := []namedConfig{{Name: "Default", config: defaultConfig()}} + if settings, err := readSettings(fname); err == nil { + // Add user configs. + configs = append(configs, settings.Configs...) + } + + // Convert to menu entries. + result := make([]configMenuEntry, len(configs)) + lastMatch := -1 + for i, cfg := range configs { + dst, changed := cfg.config.makeURL(u) + if !changed { + lastMatch = i + } + // Use a relative URL to work in presence of stripping/redirects in webui.go. + rel := &url.URL{RawQuery: dst.RawQuery, ForceQuery: true} + result[i] = configMenuEntry{ + Name: cfg.Name, + URL: rel.String(), + UserConfig: (i != 0), + } + } + // Mark the last matching config as currennt + if lastMatch >= 0 { + result[lastMatch].Current = true + } + return result +} + +// editSettings edits settings by applying fn to them. +func editSettings(fname string, fn func(s *settings) error) error { + settings, err := readSettings(fname) + if err != nil { + return err + } + if err := fn(settings); err != nil { + return err + } + return writeSettings(fname, settings) +} + +// setConfig saves the config specified in request to fname. +func setConfig(fname string, request url.URL) error { + q := request.Query() + name := q.Get("config") + if name == "" { + return fmt.Errorf("invalid config name") + } + cfg := currentConfig() + if err := cfg.applyURL(q); err != nil { + return err + } + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == name { + s.Configs[i].config = cfg + return nil + } + } + s.Configs = append(s.Configs, namedConfig{Name: name, config: cfg}) + return nil + }) +} + +// removeConfig removes config from fname. +func removeConfig(fname, config string) error { + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == config { + s.Configs = append(s.Configs[:i], s.Configs[i+1:]...) + return nil + } + } + return fmt.Errorf("config %s not found", config) + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go new file mode 100644 index 0000000000000000000000000000000000000000..249dfe0742ecab18a893625dbe6164dda10389cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go @@ -0,0 +1,58 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "encoding/json" + "html/template" + "net/http" + + "github.com/google/pprof/internal/report" +) + +// stackView generates the new flamegraph view. +func (ui *webInterface) stackView(w http.ResponseWriter, req *http.Request) { + // Get all data in a report. + rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { + cfg.CallTree = true + cfg.Trim = false + cfg.Granularity = "filefunctions" + }) + if rpt == nil { + return // error already reported + } + + // Make stack data and generate corresponding JSON. + stacks := rpt.Stacks() + b, err := json.Marshal(stacks) + if err != nil { + http.Error(w, "error serializing stacks for flame graph", + http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + + nodes := make([]string, len(stacks.Sources)) + for i, src := range stacks.Sources { + nodes[i] = src.FullName + } + nodes[0] = "" // root is not a real node + + _, legend := report.TextItems(rpt) + ui.render(w, req, "stacks", rpt, errList, legend, webArgs{ + Stacks: template.JS(b), + Nodes: nodes, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go new file mode 100644 index 0000000000000000000000000000000000000000..62767e726d573c3342db1b1c16709be2eb2ea99d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go @@ -0,0 +1,80 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "regexp" + "strings" + + "github.com/google/pprof/third_party/svgpan" +) + +var ( + viewBox = regexp.MustCompile(``) +) + +// massageSVG enhances the SVG output from DOT to provide better +// panning inside a web browser. It uses the svgpan library, which is +// embedded into the svgpan.JSSource variable. +func massageSVG(svg string) string { + // Work around for dot bug which misses quoting some ampersands, + // resulting on unparsable SVG. + svg = strings.Replace(svg, "&;", "&;", -1) + + // Dot's SVG output is + // + // + // + // ... + // + // + // + // Change it to + // + // + + // ` + // + // + // ... + // + // + // + + if loc := viewBox.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + string(svgpan.JSSource) + `` + + `` + + svg[loc[0]:] + } + + if loc := svgClose.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + + svg[loc[0]:] + } + + return svg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go new file mode 100644 index 0000000000000000000000000000000000000000..76a594d9f7ccf299670a7b619878bb249d0d575c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go @@ -0,0 +1,133 @@ +package driver + +import ( + "strings" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/profile" +) + +// addLabelNodes adds pseudo stack frames "label:value" to each Sample with +// labels matching the supplied keys. +// +// rootKeys adds frames at the root of the callgraph (first key becomes new root). +// leafKeys adds frames at the leaf of the callgraph (last key becomes new leaf). +// +// Returns whether there were matches found for the label keys. +func addLabelNodes(p *profile.Profile, rootKeys, leafKeys []string, outputUnit string) (rootm, leafm bool) { + // Find where to insert the new locations and functions at the end of + // their ID spaces. + var maxLocID uint64 + var maxFunctionID uint64 + for _, loc := range p.Location { + if loc.ID > maxLocID { + maxLocID = loc.ID + } + } + for _, f := range p.Function { + if f.ID > maxFunctionID { + maxFunctionID = f.ID + } + } + nextLocID := maxLocID + 1 + nextFuncID := maxFunctionID + 1 + + // Intern the new locations and functions we are generating. + type locKey struct { + functionName, fileName string + } + locs := map[locKey]*profile.Location{} + + internLoc := func(locKey locKey) *profile.Location { + loc, found := locs[locKey] + if found { + return loc + } + + function := &profile.Function{ + ID: nextFuncID, + Name: locKey.functionName, + Filename: locKey.fileName, + } + nextFuncID++ + p.Function = append(p.Function, function) + + loc = &profile.Location{ + ID: nextLocID, + Line: []profile.Line{ + { + Function: function, + }, + }, + } + nextLocID++ + p.Location = append(p.Location, loc) + locs[locKey] = loc + return loc + } + + makeLabelLocs := func(s *profile.Sample, keys []string) ([]*profile.Location, bool) { + var locs []*profile.Location + var match bool + for i := range keys { + // Loop backwards, ensuring the first tag is closest to the root, + // and the last tag is closest to the leaves. + k := keys[len(keys)-1-i] + values := formatLabelValues(s, k, outputUnit) + if len(values) > 0 { + match = true + } + locKey := locKey{ + functionName: strings.Join(values, ","), + fileName: k, + } + loc := internLoc(locKey) + locs = append(locs, loc) + } + return locs, match + } + + for _, s := range p.Sample { + rootsToAdd, sampleMatchedRoot := makeLabelLocs(s, rootKeys) + if sampleMatchedRoot { + rootm = true + } + leavesToAdd, sampleMatchedLeaf := makeLabelLocs(s, leafKeys) + if sampleMatchedLeaf { + leafm = true + } + + if len(leavesToAdd)+len(rootsToAdd) == 0 { + continue + } + + var newLocs []*profile.Location + newLocs = append(newLocs, leavesToAdd...) + newLocs = append(newLocs, s.Location...) + newLocs = append(newLocs, rootsToAdd...) + s.Location = newLocs + } + return +} + +// formatLabelValues returns all the string and numeric labels in Sample, with +// the numeric labels formatted according to outputUnit. +func formatLabelValues(s *profile.Sample, k string, outputUnit string) []string { + var values []string + values = append(values, s.Label[k]...) + numLabels := s.NumLabel[k] + numUnits := s.NumUnit[k] + if len(numLabels) != len(numUnits) && len(numUnits) != 0 { + return values + } + for i, numLabel := range numLabels { + var value string + if len(numUnits) != 0 { + value = measurement.ScaledLabel(numLabel, numUnits[i], outputUnit) + } else { + value = measurement.ScaledLabel(numLabel, "", "") + } + values = append(values, value) + } + return values +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go new file mode 100644 index 0000000000000000000000000000000000000000..b6c8776ff83ab90b58a66ad2733f76f6c23a7028 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +// newTempFile returns a new output file in dir with the provided prefix and suffix. +func newTempFile(dir, prefix, suffix string) (*os.File, error) { + for index := 1; index < 10000; index++ { + switch f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix)), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666); { + case err == nil: + return f, nil + case !os.IsExist(err): + return nil, err + } + } + // Give up + return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix) +} + +var tempFiles []string +var tempFilesMu = sync.Mutex{} + +// deferDeleteTempFile marks a file to be deleted by next call to Cleanup() +func deferDeleteTempFile(path string) { + tempFilesMu.Lock() + tempFiles = append(tempFiles, path) + tempFilesMu.Unlock() +} + +// cleanupTempFiles removes any temporary files selected for deferred cleaning. +func cleanupTempFiles() error { + tempFilesMu.Lock() + defer tempFilesMu.Unlock() + var lastErr error + for _, f := range tempFiles { + if err := os.Remove(f); err != nil { + lastErr = err + } + } + tempFiles = nil + return lastErr +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go new file mode 100644 index 0000000000000000000000000000000000000000..55973ffb9f390af184488371104205cc9ee626c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -0,0 +1,71 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "embed" + "fmt" + "html/template" + "os" + + "github.com/google/pprof/third_party/d3flamegraph" +) + +//go:embed html +var embeddedFiles embed.FS + +// addTemplates adds a set of template definitions to templates. +func addTemplates(templates *template.Template) { + // Load specified file. + loadFile := func(fname string) string { + data, err := embeddedFiles.ReadFile(fname) + if err != nil { + fmt.Fprintf(os.Stderr, "internal/driver: embedded file %q not found\n", + fname) + os.Exit(1) + } + return string(data) + } + loadCSS := func(fname string) string { + return `` + "\n" + } + loadJS := func(fname string) string { + return `` + "\n" + } + + // Define a named template with specified contents. + def := func(name, contents string) { + sub := template.New(name) + template.Must(sub.Parse(contents)) + template.Must(templates.AddParseTree(name, sub.Tree)) + } + + // Pre-packaged third-party files. + def("d3flamegraphscript", d3flamegraph.JSSource) + def("d3flamegraphcss", d3flamegraph.CSSSource) + + // Embeded files. + def("css", loadCSS("html/common.css")) + def("header", loadFile("html/header.html")) + def("graph", loadFile("html/graph.html")) + def("script", loadJS("html/common.js")) + def("top", loadFile("html/top.html")) + def("sourcelisting", loadFile("html/source.html")) + def("plaintext", loadFile("html/plaintext.html")) + def("flamegraph", loadFile("html/flamegraph.html")) + def("stacks", loadFile("html/stacks.html")) + def("stacks_css", loadCSS("html/stacks.css")) + def("stacks_js", loadJS("html/stacks.js")) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go new file mode 100644 index 0000000000000000000000000000000000000000..41b30021f5c5651aadbc21477ef542e8a33943b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go @@ -0,0 +1,473 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "html/template" + "net" + "net/http" + gourl "net/url" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +// webInterface holds the state needed for serving a browser based interface. +type webInterface struct { + prof *profile.Profile + copier profileCopier + options *plugin.Options + help map[string]string + templates *template.Template + settingsFile string +} + +func makeWebInterface(p *profile.Profile, copier profileCopier, opt *plugin.Options) (*webInterface, error) { + settingsFile, err := settingsFileName() + if err != nil { + return nil, err + } + templates := template.New("templategroup") + addTemplates(templates) + report.AddSourceTemplates(templates) + return &webInterface{ + prof: p, + copier: copier, + options: opt, + help: make(map[string]string), + templates: templates, + settingsFile: settingsFile, + }, nil +} + +// maxEntries is the maximum number of entries to print for text interfaces. +const maxEntries = 50 + +// errorCatcher is a UI that captures errors for reporting to the browser. +type errorCatcher struct { + plugin.UI + errors []string +} + +func (ec *errorCatcher) PrintErr(args ...interface{}) { + ec.errors = append(ec.errors, strings.TrimSuffix(fmt.Sprintln(args...), "\n")) + ec.UI.PrintErr(args...) +} + +// webArgs contains arguments passed to templates in webhtml.go. +type webArgs struct { + Title string + Errors []string + Total int64 + SampleTypes []string + Legend []string + Help map[string]string + Nodes []string + HTMLBody template.HTML + TextBody string + Top []report.TextItem + FlameGraph template.JS + Stacks template.JS + Configs []configMenuEntry +} + +func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, disableBrowser bool) error { + host, port, err := getHostAndPort(hostport) + if err != nil { + return err + } + interactiveMode = true + copier := makeProfileCopier(p) + ui, err := makeWebInterface(p, copier, o) + if err != nil { + return err + } + for n, c := range pprofCommands { + ui.help[n] = c.description + } + for n, help := range configHelp { + ui.help[n] = help + } + ui.help["details"] = "Show information about the profile and this view" + ui.help["graph"] = "Display profile as a directed graph" + ui.help["flamegraph"] = "Display profile as a flame graph" + ui.help["flamegraphold"] = "Display profile as a flame graph (old version; slated for removal)" + ui.help["reset"] = "Show the entire profile" + ui.help["save_config"] = "Save current settings" + + server := o.HTTPServer + if server == nil { + server = defaultWebServer + } + args := &plugin.HTTPServerArgs{ + Hostport: net.JoinHostPort(host, strconv.Itoa(port)), + Host: host, + Port: port, + Handlers: map[string]http.Handler{ + "/": http.HandlerFunc(ui.dot), + "/top": http.HandlerFunc(ui.top), + "/disasm": http.HandlerFunc(ui.disasm), + "/source": http.HandlerFunc(ui.source), + "/peek": http.HandlerFunc(ui.peek), + "/flamegraphold": http.HandlerFunc(ui.flamegraph), + "/flamegraph": http.HandlerFunc(ui.stackView), + "/flamegraph2": http.HandlerFunc(ui.stackView), // Support older URL + "/saveconfig": http.HandlerFunc(ui.saveConfig), + "/deleteconfig": http.HandlerFunc(ui.deleteConfig), + "/download": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/vnd.google.protobuf+gzip") + w.Header().Set("Content-Disposition", "attachment;filename=profile.pb.gz") + p.Write(w) + }), + }, + } + + url := "http://" + args.Hostport + + o.UI.Print("Serving web UI on ", url) + + if o.UI.WantBrowser() && !disableBrowser { + go openBrowser(url, o) + } + return server(args) +} + +func getHostAndPort(hostport string) (string, int, error) { + host, portStr, err := net.SplitHostPort(hostport) + if err != nil { + return "", 0, fmt.Errorf("could not split http address: %v", err) + } + if host == "" { + host = "localhost" + } + var port int + if portStr == "" { + ln, err := net.Listen("tcp", net.JoinHostPort(host, "0")) + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + port = ln.Addr().(*net.TCPAddr).Port + err = ln.Close() + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + } else { + port, err = strconv.Atoi(portStr) + if err != nil { + return "", 0, fmt.Errorf("invalid port number: %v", err) + } + } + return host, port, nil +} +func defaultWebServer(args *plugin.HTTPServerArgs) error { + ln, err := net.Listen("tcp", args.Hostport) + if err != nil { + return err + } + isLocal := isLocalhost(args.Host) + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if isLocal { + // Only allow local clients + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil || !isLocalhost(host) { + http.Error(w, "permission denied", http.StatusForbidden) + return + } + } + h := args.Handlers[req.URL.Path] + if h == nil { + // Fall back to default behavior + h = http.DefaultServeMux + } + h.ServeHTTP(w, req) + }) + + // We serve the ui at /ui/ and redirect there from the root. This is done + // to surface any problems with serving the ui at a non-root early. See: + // + // https://github.com/google/pprof/pull/348 + mux := http.NewServeMux() + mux.Handle("/ui/", http.StripPrefix("/ui", handler)) + mux.Handle("/", redirectWithQuery("/ui")) + s := &http.Server{Handler: mux} + return s.Serve(ln) +} + +func redirectWithQuery(path string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + pathWithQuery := &gourl.URL{Path: path, RawQuery: r.URL.RawQuery} + http.Redirect(w, r, pathWithQuery.String(), http.StatusTemporaryRedirect) + } +} + +func isLocalhost(host string) bool { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + if host == v { + return true + } + } + return false +} + +func openBrowser(url string, o *plugin.Options) { + // Construct URL. + baseURL, _ := gourl.Parse(url) + current := currentConfig() + u, _ := current.makeURL(*baseURL) + + // Give server a little time to get ready. + time.Sleep(time.Millisecond * 500) + + for _, b := range browsers() { + args := strings.Split(b, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], u.String())...) + viewer.Stderr = os.Stderr + if err := viewer.Start(); err == nil { + return + } + } + // No visualizer succeeded, so just print URL. + o.UI.PrintErr(u.String()) +} + +// makeReport generates a report for the specified command. +// If configEditor is not null, it is used to edit the config used for the report. +func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, + cmd []string, configEditor func(*config)) (*report.Report, []string) { + cfg := currentConfig() + if err := cfg.applyURL(req.URL.Query()); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + if configEditor != nil { + configEditor(&cfg) + } + catcher := &errorCatcher{UI: ui.options.UI} + options := *ui.options + options.UI = catcher + _, rpt, err := generateRawReport(ui.copier.newCopy(), cmd, cfg, &options) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + return rpt, catcher.errors +} + +// render generates html using the named template based on the contents of data. +func (ui *webInterface) render(w http.ResponseWriter, req *http.Request, tmpl string, + rpt *report.Report, errList, legend []string, data webArgs) { + file := getFromLegend(legend, "File: ", "unknown") + profile := getFromLegend(legend, "Type: ", "unknown") + data.Title = file + " " + profile + data.Errors = errList + data.Total = rpt.Total() + data.SampleTypes = sampleTypes(ui.prof) + data.Legend = legend + data.Help = ui.help + data.Configs = configMenu(ui.settingsFile, *req.URL) + + html := &bytes.Buffer{} + if err := ui.templates.ExecuteTemplate(html, tmpl, data); err != nil { + http.Error(w, "internal template error", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Write(html.Bytes()) +} + +// dot generates a web page containing an svg diagram. +func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"svg"}, nil) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + legend := config.Labels + config.Labels = nil + dot := &bytes.Buffer{} + graph.ComposeDot(dot, g, &graph.DotAttributes{}, config) + + // Convert to svg. + svg, err := dotToSvg(dot.Bytes()) + if err != nil { + http.Error(w, "Could not execute dot; may need to install graphviz.", + http.StatusNotImplemented) + ui.options.UI.PrintErr("Failed to execute dot. Is Graphviz installed?\n", err) + return + } + + // Get all node names into an array. + nodes := []string{""} // dot starts with node numbered 1 + for _, n := range g.Nodes { + nodes = append(nodes, n.Info.Name) + } + + ui.render(w, req, "graph", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(string(svg)), + Nodes: nodes, + }) +} + +func dotToSvg(dot []byte) ([]byte, error) { + cmd := exec.Command("dot", "-Tsvg") + out := &bytes.Buffer{} + cmd.Stdin, cmd.Stdout, cmd.Stderr = bytes.NewBuffer(dot), out, os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + // Fix dot bug related to unquoted ampersands. + svg := bytes.Replace(out.Bytes(), []byte("&;"), []byte("&;"), -1) + + // Cleanup for embedding by dropping stuff before the start. + if pos := bytes.Index(svg, []byte("= 0 { + svg = svg[pos:] + } + return svg, nil +} + +func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"top"}, func(cfg *config) { + cfg.NodeCount = 500 + }) + if rpt == nil { + return // error already reported + } + top, legend := report.TextItems(rpt) + var nodes []string + for _, item := range top { + nodes = append(nodes, item.Name) + } + + ui.render(w, req, "top", rpt, errList, legend, webArgs{ + Top: top, + Nodes: nodes, + }) +} + +// disasm generates a web page containing disassembly. +func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { + args := []string{"disasm", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.PrintAssembly(out, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) + +} + +// source generates a web page containing source code annotated with profile +// data. +func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { + args := []string{"weblist", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + // Generate source listing. + var body bytes.Buffer + if err := report.PrintWebList(&body, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "sourcelisting", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(body.String()), + }) +} + +// peek generates a web page listing callers/callers. +func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { + args := []string{"peek", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, func(cfg *config) { + cfg.Granularity = "lines" + }) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.Generate(out, rpt, ui.options.Obj); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) +} + +// saveConfig saves URL configuration. +func (ui *webInterface) saveConfig(w http.ResponseWriter, req *http.Request) { + if err := setConfig(ui.settingsFile, *req.URL); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// deleteConfig deletes a configuration. +func (ui *webInterface) deleteConfig(w http.ResponseWriter, req *http.Request) { + name := req.URL.Query().Get("config") + if err := removeConfig(ui.settingsFile, name); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// getFromLegend returns the suffix of an entry in legend that starts +// with param. It returns def if no such entry is found. +func getFromLegend(legend []string, param, def string) string { + for _, s := range legend { + if strings.HasPrefix(s, param) { + return s[len(param):] + } + } + return def +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go new file mode 100644 index 0000000000000000000000000000000000000000..718481b078856f9fd373091876343548437439dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go @@ -0,0 +1,383 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package elfexec provides utility routines to examine ELF binaries. +package elfexec + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "fmt" + "io" +) + +const ( + maxNoteSize = 1 << 20 // in bytes + noteTypeGNUBuildID = 3 +) + +// elfNote is the payload of a Note Section in an ELF file. +type elfNote struct { + Name string // Contents of the "name" field, omitting the trailing zero byte. + Desc []byte // Contents of the "desc" field. + Type uint32 // Contents of the "type" field. +} + +// parseNotes returns the notes from a SHT_NOTE section or PT_NOTE segment. +func parseNotes(reader io.Reader, alignment int, order binary.ByteOrder) ([]elfNote, error) { + r := bufio.NewReader(reader) + + // padding returns the number of bytes required to pad the given size to an + // alignment boundary. + padding := func(size int) int { + return ((size + (alignment - 1)) &^ (alignment - 1)) - size + } + + var notes []elfNote + for { + noteHeader := make([]byte, 12) // 3 4-byte words + if _, err := io.ReadFull(r, noteHeader); err == io.EOF { + break + } else if err != nil { + return nil, err + } + namesz := order.Uint32(noteHeader[0:4]) + descsz := order.Uint32(noteHeader[4:8]) + typ := order.Uint32(noteHeader[8:12]) + + if uint64(namesz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note name too long (%d bytes)", namesz) + } + var name string + if namesz > 0 { + // Documentation differs as to whether namesz is meant to include the + // trailing zero, but everyone agrees that name is null-terminated. + // So we'll just determine the actual length after the fact. + var err error + name, err = r.ReadString('\x00') + if err == io.EOF { + return nil, fmt.Errorf("missing note name (want %d bytes)", namesz) + } else if err != nil { + return nil, err + } + namesz = uint32(len(name)) + name = name[:len(name)-1] + } + + // Drop padding bytes until the desc field. + for n := padding(len(noteHeader) + int(namesz)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + return nil, fmt.Errorf( + "missing %d bytes of padding after note name", n) + } else if err != nil { + return nil, err + } + } + + if uint64(descsz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note desc too long (%d bytes)", descsz) + } + desc := make([]byte, int(descsz)) + if _, err := io.ReadFull(r, desc); err == io.EOF { + return nil, fmt.Errorf("missing desc (want %d bytes)", len(desc)) + } else if err != nil { + return nil, err + } + + notes = append(notes, elfNote{Name: name, Desc: desc, Type: typ}) + + // Drop padding bytes until the next note or the end of the section, + // whichever comes first. + for n := padding(len(desc)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + // We hit the end of the section before an alignment boundary. + // This can happen if this section is at the end of the file or the next + // section has a smaller alignment requirement. + break + } else if err != nil { + return nil, err + } + } + } + return notes, nil +} + +// GetBuildID returns the GNU build-ID for an ELF binary. +// +// If no build-ID was found but the binary was read without error, it returns +// (nil, nil). +func GetBuildID(binary io.ReaderAt) ([]byte, error) { + f, err := elf.NewFile(binary) + if err != nil { + return nil, err + } + + findBuildID := func(notes []elfNote) ([]byte, error) { + var buildID []byte + for _, note := range notes { + if note.Name == "GNU" && note.Type == noteTypeGNUBuildID { + if buildID == nil { + buildID = note.Desc + } else { + return nil, fmt.Errorf("multiple build ids found, don't know which to use") + } + } + } + return buildID, nil + } + + for _, p := range f.Progs { + if p.Type != elf.PT_NOTE { + continue + } + notes, err := parseNotes(p.Open(), int(p.Align), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + for _, s := range f.Sections { + if s.Type != elf.SHT_NOTE { + continue + } + notes, err := parseNotes(s.Open(), int(s.Addralign), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + return nil, nil +} + +// kernelBase calculates the base for kernel mappings, which usually require +// special handling. For kernel mappings, tools (like perf) use the address of +// the kernel relocation symbol (_text or _stext) as the mmap start. Additionally, +// for obfuscation, ChromeOS profiles have the kernel image remapped to the 0-th page. +func kernelBase(loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, bool) { + const ( + // PAGE_OFFSET for PowerPC64, see arch/powerpc/Kconfig in the kernel sources. + pageOffsetPpc64 = 0xc000000000000000 + pageSize = 4096 + ) + + if loadSegment.Vaddr == start-offset { + return offset, true + } + if start == 0 && limit != 0 && stextOffset != nil { + // ChromeOS remaps its kernel to 0. Nothing else should come + // down this path. Empirical values: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + return start - *stextOffset, true + } + if start >= loadSegment.Vaddr && limit > start && (offset == 0 || offset == pageOffsetPpc64 || offset == start) { + // Some kernels look like: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + // Start=0xffffffff83200000 + // Limit=0xffffffff84200000 + // Offset=0 (0xc000000000000000 for PowerPC64) (== Start for ASLR kernel) + // So the base should be: + if stextOffset != nil && (start%pageSize) == (*stextOffset%pageSize) { + // perf uses the address of _stext as start. Some tools may + // adjust for this before calling GetBase, in which case the page + // alignment should be different from that of stextOffset. + return start - *stextOffset, true + } + + return start - loadSegment.Vaddr, true + } + if start%pageSize != 0 && stextOffset != nil && *stextOffset%pageSize == start%pageSize { + // ChromeOS remaps its kernel to 0 + start%pageSize. Nothing + // else should come down this path. Empirical values: + // start=0x198 limit=0x2f9fffff offset=0 + // VADDR=0xffffffff81000000 + // stextOffset=0xffffffff81000198 + return start - *stextOffset, true + } + return 0, false +} + +// GetBase determines the base address to subtract from virtual +// address to get symbol table address. For an executable, the base +// is 0. Otherwise, it's a shared library, and the base is the +// address where the mapping starts. The kernel needs special handling. +func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, error) { + + if start == 0 && offset == 0 && (limit == ^uint64(0) || limit == 0) { + // Some tools may introduce a fake mapping that spans the entire + // address space. Assume that the address has already been + // adjusted, so no additional base adjustment is necessary. + return 0, nil + } + + switch fh.Type { + case elf.ET_EXEC: + if loadSegment == nil { + // Assume fixed-address executable and so no adjustment. + return 0, nil + } + if stextOffset == nil && start > 0 && start < 0x8000000000000000 { + // A regular user-mode executable. Compute the base offset using same + // arithmetics as in ET_DYN case below, see the explanation there. + // Ideally, the condition would just be "stextOffset == nil" as that + // represents the address of _stext symbol in the vmlinux image. Alas, + // the caller may skip reading it from the binary (it's expensive to scan + // all the symbols) and so it may be nil even for the kernel executable. + // So additionally check that the start is within the user-mode half of + // the 64-bit address space. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + // Various kernel heuristics and cases are handled separately. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // ChromeOS can remap its kernel to 0, and the caller might have not found + // the _stext symbol. Split this case from kernelBase() above, since we don't + // want to apply it to an ET_DYN user-mode executable. + if start == 0 && limit != 0 && stextOffset == nil { + return start - loadSegment.Vaddr, nil + } + + return 0, fmt.Errorf("don't know how to handle EXEC segment: %v start=0x%x limit=0x%x offset=0x%x", *loadSegment, start, limit, offset) + case elf.ET_REL: + if offset != 0 { + return 0, fmt.Errorf("don't know how to handle mapping.Offset") + } + return start, nil + case elf.ET_DYN: + // The process mapping information, start = start of virtual address range, + // and offset = offset in the executable file of the start address, tells us + // that a runtime virtual address x maps to a file offset + // fx = x - start + offset. + if loadSegment == nil { + return start - offset, nil + } + // Kernels compiled as PIE can be ET_DYN as well. Use heuristic, similar to + // the ET_EXEC case above. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // The program header, if not nil, indicates the offset in the file where + // the executable segment is located (loadSegment.Off), and the base virtual + // address where the first byte of the segment is loaded + // (loadSegment.Vaddr). A file offset fx maps to a virtual (symbol) address + // sx = fx - loadSegment.Off + loadSegment.Vaddr. + // + // Thus, a runtime virtual address x maps to a symbol address + // sx = x - start + offset - loadSegment.Off + loadSegment.Vaddr. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + return 0, fmt.Errorf("don't know how to handle FileHeader.Type %v", fh.Type) +} + +// FindTextProgHeader finds the program segment header containing the .text +// section or nil if the segment cannot be found. +func FindTextProgHeader(f *elf.File) *elf.ProgHeader { + for _, s := range f.Sections { + if s.Name == ".text" { + // Find the LOAD segment containing the .text section. + for _, p := range f.Progs { + if p.Type == elf.PT_LOAD && p.Flags&elf.PF_X != 0 && s.Addr >= p.Vaddr && s.Addr < p.Vaddr+p.Memsz { + return &p.ProgHeader + } + } + } + } + return nil +} + +// ProgramHeadersForMapping returns the program segment headers that overlap +// the runtime mapping with file offset mapOff and memory size mapSz. We skip +// over segments zero file size because their file offset values are unreliable. +// Even if overlapping, a segment is not selected if its aligned file offset is +// greater than the mapping file offset, or if the mapping includes the last +// page of the segment, but not the full segment and the mapping includes +// additional pages after the segment end. +// The function returns a slice of pointers to the headers in the input +// slice, which are valid only while phdrs is not modified or discarded. +func ProgramHeadersForMapping(phdrs []elf.ProgHeader, mapOff, mapSz uint64) []*elf.ProgHeader { + const ( + // pageSize defines the virtual memory page size used by the loader. This + // value is dependent on the memory management unit of the CPU. The page + // size is 4KB virtually on all the architectures that we care about, so we + // define this metric as a constant. If we encounter architectures where + // page sie is not 4KB, we must try to guess the page size on the system + // where the profile was collected, possibly using the architecture + // specified in the ELF file header. + pageSize = 4096 + pageOffsetMask = pageSize - 1 + ) + mapLimit := mapOff + mapSz + var headers []*elf.ProgHeader + for i := range phdrs { + p := &phdrs[i] + // Skip over segments with zero file size. Their file offsets can have + // arbitrary values, see b/195427553. + if p.Filesz == 0 { + continue + } + segLimit := p.Off + p.Memsz + // The segment must overlap the mapping. + if p.Type == elf.PT_LOAD && mapOff < segLimit && p.Off < mapLimit { + // If the mapping offset is strictly less than the page aligned segment + // offset, then this mapping comes from a different segment, fixes + // b/179920361. + alignedSegOffset := uint64(0) + if p.Off > (p.Vaddr & pageOffsetMask) { + alignedSegOffset = p.Off - (p.Vaddr & pageOffsetMask) + } + if mapOff < alignedSegOffset { + continue + } + // If the mapping starts in the middle of the segment, it covers less than + // one page of the segment, and it extends at least one page past the + // segment, then this mapping comes from a different segment. + if mapOff > p.Off && (segLimit < mapOff+pageSize) && (mapLimit >= segLimit+pageSize) { + continue + } + headers = append(headers, p) + } + } + return headers +} + +// HeaderForFileOffset attempts to identify a unique program header that +// includes the given file offset. It returns an error if it cannot identify a +// unique header. +func HeaderForFileOffset(headers []*elf.ProgHeader, fileOffset uint64) (*elf.ProgHeader, error) { + var ph *elf.ProgHeader + for _, h := range headers { + if fileOffset >= h.Off && fileOffset < h.Off+h.Memsz { + if ph != nil { + // Assuming no other bugs, this can only happen if we have two or + // more small program segments that fit on the same page, and a + // segment other than the last one includes uninitialized data, or + // if the debug binary used for symbolization is stripped of some + // sections, so segment file sizes are smaller than memory sizes. + return nil, fmt.Errorf("found second program header (%#v) that matches file offset %x, first program header is %#v. Is this a stripped binary, or does the first program segment contain uninitialized data?", *h, fileOffset, *ph) + } + ph = h + } + } + if ph == nil { + return nil, fmt.Errorf("no program header matches file offset %x", fileOffset) + } + return ph, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go new file mode 100644 index 0000000000000000000000000000000000000000..09d40fd2c9901773ece5de9201a4c7ab9236eb4b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go @@ -0,0 +1,494 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + "io" + "math" + "path/filepath" + "strings" + + "github.com/google/pprof/internal/measurement" +) + +// DotAttributes contains details about the graph itself, giving +// insight into how its elements should be rendered. +type DotAttributes struct { + Nodes map[*Node]*DotNodeAttributes // A map allowing each Node to have its own visualization option +} + +// DotNodeAttributes contains Node specific visualization options. +type DotNodeAttributes struct { + Shape string // The optional shape of the node when rendered visually + Bold bool // If the node should be bold or not + Peripheries int // An optional number of borders to place around a node + URL string // An optional url link to add to a node + Formatter func(*NodeInfo) string // An optional formatter for the node's label +} + +// DotConfig contains attributes about how a graph should be +// constructed and how it should look. +type DotConfig struct { + Title string // The title of the DOT graph + LegendURL string // The URL to link to from the legend. + Labels []string // The labels for the DOT's legend + + FormatValue func(int64) string // A formatting function for values + Total int64 // The total weight of the graph, used to compute percentages +} + +const maxNodelets = 4 // Number of nodelets for labels (both numeric and non) + +// ComposeDot creates and writes a in the DOT format to the writer, using +// the configurations given. +func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) { + builder := &builder{w, a, c} + + // Begin constructing DOT by adding a title and legend. + builder.start() + defer builder.finish() + builder.addLegend() + + if len(g.Nodes) == 0 { + return + } + + // Preprocess graph to get id map and find max flat. + nodeIDMap := make(map[*Node]int) + hasNodelets := make(map[*Node]bool) + + maxFlat := float64(abs64(g.Nodes[0].FlatValue())) + for i, n := range g.Nodes { + nodeIDMap[n] = i + 1 + if float64(abs64(n.FlatValue())) > maxFlat { + maxFlat = float64(abs64(n.FlatValue())) + } + } + + edges := EdgeMap{} + + // Add nodes and nodelets to DOT builder. + for _, n := range g.Nodes { + builder.addNode(n, nodeIDMap[n], maxFlat) + hasNodelets[n] = builder.addNodelets(n, nodeIDMap[n]) + + // Collect all edges. Use a fake node to support multiple incoming edges. + for _, e := range n.Out { + edges[&Node{}] = e + } + } + + // Add edges to DOT builder. Sort edges by frequency as a hint to the graph layout engine. + for _, e := range edges.Sort() { + builder.addEdge(e, nodeIDMap[e.Src], nodeIDMap[e.Dest], hasNodelets[e.Src]) + } +} + +// builder wraps an io.Writer and understands how to compose DOT formatted elements. +type builder struct { + io.Writer + attributes *DotAttributes + config *DotConfig +} + +// start generates a title and initial node in DOT format. +func (b *builder) start() { + graphname := "unnamed" + if b.config.Title != "" { + graphname = b.config.Title + } + fmt.Fprintln(b, `digraph "`+graphname+`" {`) + fmt.Fprintln(b, `node [style=filled fillcolor="#f8f8f8"]`) +} + +// finish closes the opening curly bracket in the constructed DOT buffer. +func (b *builder) finish() { + fmt.Fprintln(b, "}") +} + +// addLegend generates a legend in DOT format. +func (b *builder) addLegend() { + labels := b.config.Labels + if len(labels) == 0 { + return + } + title := labels[0] + fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16`, escapeForDot(title)) + fmt.Fprintf(b, ` label="%s\l"`, strings.Join(escapeAllForDot(labels), `\l`)) + if b.config.LegendURL != "" { + fmt.Fprintf(b, ` URL="%s" target="_blank"`, b.config.LegendURL) + } + if b.config.Title != "" { + fmt.Fprintf(b, ` tooltip="%s"`, b.config.Title) + } + fmt.Fprintf(b, "] }\n") +} + +// addNode generates a graph node in DOT format. +func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) { + flat, cum := node.FlatValue(), node.CumValue() + attrs := b.attributes.Nodes[node] + + // Populate label for node. + var label string + if attrs != nil && attrs.Formatter != nil { + label = attrs.Formatter(&node.Info) + } else { + label = multilinePrintableName(&node.Info) + } + + flatValue := b.config.FormatValue(flat) + if flat != 0 { + label = label + fmt.Sprintf(`%s (%s)`, + flatValue, + strings.TrimSpace(measurement.Percentage(flat, b.config.Total))) + } else { + label = label + "0" + } + cumValue := flatValue + if cum != flat { + if flat != 0 { + label = label + `\n` + } else { + label = label + " " + } + cumValue = b.config.FormatValue(cum) + label = label + fmt.Sprintf(`of %s (%s)`, + cumValue, + strings.TrimSpace(measurement.Percentage(cum, b.config.Total))) + } + + // Scale font sizes from 8 to 24 based on percentage of flat frequency. + // Use non linear growth to emphasize the size difference. + baseFontSize, maxFontGrowth := 8, 16.0 + fontSize := baseFontSize + if maxFlat != 0 && flat != 0 && float64(abs64(flat)) <= maxFlat { + fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(abs64(flat))/maxFlat))) + } + + // Determine node shape. + shape := "box" + if attrs != nil && attrs.Shape != "" { + shape = attrs.Shape + } + + // Create DOT attribute for node. + attr := fmt.Sprintf(`label="%s" id="node%d" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`, + label, nodeID, fontSize, shape, escapeForDot(node.Info.PrintableName()), cumValue, + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false), + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true)) + + // Add on extra attributes if provided. + if attrs != nil { + // Make bold if specified. + if attrs.Bold { + attr += ` style="bold,filled"` + } + + // Add peripheries if specified. + if attrs.Peripheries != 0 { + attr += fmt.Sprintf(` peripheries=%d`, attrs.Peripheries) + } + + // Add URL if specified. target="_blank" forces the link to open in a new tab. + if attrs.URL != "" { + attr += fmt.Sprintf(` URL="%s" target="_blank"`, attrs.URL) + } + } + + fmt.Fprintf(b, "N%d [%s]\n", nodeID, attr) +} + +// addNodelets generates the DOT boxes for the node tags if they exist. +func (b *builder) addNodelets(node *Node, nodeID int) bool { + var nodelets string + + // Populate two Tag slices, one for LabelTags and one for NumericTags. + var ts []*Tag + lnts := make(map[string][]*Tag) + for _, t := range node.LabelTags { + ts = append(ts, t) + } + for l, tm := range node.NumericTags { + for _, t := range tm { + lnts[l] = append(lnts[l], t) + } + } + + // For leaf nodes, print cumulative tags (includes weight from + // children that have been deleted). + // For internal nodes, print only flat tags. + flatTags := len(node.Out) > 0 + + // Select the top maxNodelets alphanumeric labels by weight. + SortTags(ts, flatTags) + if len(ts) > maxNodelets { + ts = ts[:maxNodelets] + } + for i, t := range ts { + w := t.CumValue() + if flatTags { + w = t.FlatValue() + } + if w == 0 { + continue + } + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%d_%d [label = "%s" id="N%d_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, nodeID, i, weight) + nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight) + if nts := lnts[t.Name]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i)) + } + } + + if nts := lnts[""]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID)) + } + + fmt.Fprint(b, nodelets) + return nodelets != "" +} + +func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, source string) string { + nodelets := "" + + // Collapse numeric labels into maxNumNodelets buckets, of the form: + // 1MB..2MB, 3MB..5MB, ... + for j, t := range b.collapsedTags(nts, maxNumNodelets, flatTags) { + w, attr := t.CumValue(), ` style="dotted"` + if flatTags || t.FlatValue() == t.CumValue() { + w, attr = t.FlatValue(), "" + } + if w != 0 { + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%s_%d [label = "%s" id="N%s_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, source, j, weight) + nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr) + } + } + return nodelets +} + +// addEdge generates a graph edge in DOT format. +func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) { + var inline string + if edge.Inline { + inline = `\n (inline)` + } + w := b.config.FormatValue(edge.WeightValue()) + attr := fmt.Sprintf(`label=" %s%s"`, w, inline) + if b.config.Total != 0 { + // Note: edge.weight > b.config.Total is possible for profile diffs. + if weight := 1 + int(min64(abs64(edge.WeightValue()*100/b.config.Total), 100)); weight > 1 { + attr = fmt.Sprintf(`%s weight=%d`, attr, weight) + } + if width := 1 + int(min64(abs64(edge.WeightValue()*5/b.config.Total), 5)); width > 1 { + attr = fmt.Sprintf(`%s penwidth=%d`, attr, width) + } + attr = fmt.Sprintf(`%s color="%s"`, attr, + dotColor(float64(edge.WeightValue())/float64(abs64(b.config.Total)), false)) + } + arrow := "->" + if edge.Residual { + arrow = "..." + } + tooltip := fmt.Sprintf(`"%s %s %s (%s)"`, + escapeForDot(edge.Src.Info.PrintableName()), arrow, + escapeForDot(edge.Dest.Info.PrintableName()), w) + attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`, attr, tooltip, tooltip) + + if edge.Residual { + attr = attr + ` style="dotted"` + } + + if hasNodelets { + // Separate children further if source has tags. + attr = attr + " minlen=2" + } + + fmt.Fprintf(b, "N%d -> N%d [%s]\n", from, to, attr) +} + +// dotColor returns a color for the given score (between -1.0 and +// 1.0), with -1.0 colored green, 0.0 colored grey, and 1.0 colored +// red. If isBackground is true, then a light (low-saturation) +// color is returned (suitable for use as a background color); +// otherwise, a darker color is returned (suitable for use as a +// foreground color). +func dotColor(score float64, isBackground bool) string { + // A float between 0.0 and 1.0, indicating the extent to which + // colors should be shifted away from grey (to make positive and + // negative values easier to distinguish, and to make more use of + // the color range.) + const shift = 0.7 + + // Saturation and value (in hsv colorspace) for background colors. + const bgSaturation = 0.1 + const bgValue = 0.93 + + // Saturation and value (in hsv colorspace) for foreground colors. + const fgSaturation = 1.0 + const fgValue = 0.7 + + // Choose saturation and value based on isBackground. + var saturation float64 + var value float64 + if isBackground { + saturation = bgSaturation + value = bgValue + } else { + saturation = fgSaturation + value = fgValue + } + + // Limit the score values to the range [-1.0, 1.0]. + score = math.Max(-1.0, math.Min(1.0, score)) + + // Reduce saturation near score=0 (so it is colored grey, rather than yellow). + if math.Abs(score) < 0.2 { + saturation *= math.Abs(score) / 0.2 + } + + // Apply 'shift' to move scores away from 0.0 (grey). + if score > 0.0 { + score = math.Pow(score, (1.0 - shift)) + } + if score < 0.0 { + score = -math.Pow(-score, (1.0 - shift)) + } + + var r, g, b float64 // red, green, blue + if score < 0.0 { + g = value + r = value * (1 + saturation*score) + } else { + r = value + g = value * (1 - saturation*score) + } + b = value * (1 - saturation) + return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0)) +} + +func multilinePrintableName(info *NodeInfo) string { + infoCopy := *info + infoCopy.Name = escapeForDot(ShortenFunctionName(infoCopy.Name)) + infoCopy.Name = strings.Replace(infoCopy.Name, "::", `\n`, -1) + // Go type parameters are reported as "[...]" by Go pprof profiles. + // Keep this ellipsis rather than replacing with newlines below. + infoCopy.Name = strings.Replace(infoCopy.Name, "[...]", "[…]", -1) + infoCopy.Name = strings.Replace(infoCopy.Name, ".", `\n`, -1) + if infoCopy.File != "" { + infoCopy.File = filepath.Base(infoCopy.File) + } + return strings.Join(infoCopy.NameComponents(), `\n`) + `\n` +} + +// collapsedTags trims and sorts a slice of tags. +func (b *builder) collapsedTags(ts []*Tag, count int, flatTags bool) []*Tag { + ts = SortTags(ts, flatTags) + if len(ts) <= count { + return ts + } + + tagGroups := make([][]*Tag, count) + for i, t := range (ts)[:count] { + tagGroups[i] = []*Tag{t} + } + for _, t := range (ts)[count:] { + g, d := 0, tagDistance(t, tagGroups[0][0]) + for i := 1; i < count; i++ { + if nd := tagDistance(t, tagGroups[i][0]); nd < d { + g, d = i, nd + } + } + tagGroups[g] = append(tagGroups[g], t) + } + + var nts []*Tag + for _, g := range tagGroups { + l, w, c := b.tagGroupLabel(g) + nts = append(nts, &Tag{ + Name: l, + Flat: w, + Cum: c, + }) + } + return SortTags(nts, flatTags) +} + +func tagDistance(t, u *Tag) float64 { + v, _ := measurement.Scale(u.Value, u.Unit, t.Unit) + if v < float64(t.Value) { + return float64(t.Value) - v + } + return v - float64(t.Value) +} + +func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) { + if len(g) == 1 { + t := g[0] + return measurement.Label(t.Value, t.Unit), t.FlatValue(), t.CumValue() + } + min := g[0] + max := g[0] + df, f := min.FlatDiv, min.Flat + dc, c := min.CumDiv, min.Cum + for _, t := range g[1:] { + if v, _ := measurement.Scale(t.Value, t.Unit, min.Unit); int64(v) < min.Value { + min = t + } + if v, _ := measurement.Scale(t.Value, t.Unit, max.Unit); int64(v) > max.Value { + max = t + } + f += t.Flat + df += t.FlatDiv + c += t.Cum + dc += t.CumDiv + } + if df != 0 { + f = f / df + } + if dc != 0 { + c = c / dc + } + + // Tags are not scaled with the selected output unit because tags are often + // much smaller than other values which appear, so the range of tag sizes + // sometimes would appear to be "0..0" when scaled to the selected output unit. + return measurement.Label(min.Value, min.Unit) + ".." + measurement.Label(max.Value, max.Unit), f, c +} + +func min64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// escapeAllForDot applies escapeForDot to all strings in the given slice. +func escapeAllForDot(in []string) []string { + var out = make([]string, len(in)) + for i := range in { + out[i] = escapeForDot(in[i]) + } + return out +} + +// escapeForDot escapes double quotes and backslashes, and replaces Graphviz's +// "center" character (\n) with a left-justified character. +// See https://graphviz.org/docs/attr-types/escString/ for more info. +func escapeForDot(str string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(str, `\`, `\\`), `"`, `\"`), "\n", `\l`) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go new file mode 100644 index 0000000000000000000000000000000000000000..b64ef27991a4ca1d364faa036d77e77c197fc929 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go @@ -0,0 +1,1170 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graph collects a set of samples into a directed graph. +package graph + +import ( + "fmt" + "math" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/profile" +) + +var ( + // Removes package name and method arguments for Java method names. + // See tests for examples. + javaRegExp = regexp.MustCompile(`^(?:[a-z]\w*\.)*([A-Z][\w\$]*\.(?:|[a-z][\w\$]*(?:\$\d+)?))(?:(?:\()|$)`) + // Removes package name and method arguments for Go function names. + // See tests for examples. + goRegExp = regexp.MustCompile(`^(?:[\w\-\.]+\/)+([^.]+\..+)`) + // Removes potential module versions in a package path. + goVerRegExp = regexp.MustCompile(`^(.*?)/v(?:[2-9]|[1-9][0-9]+)([./].*)$`) + // Strips C++ namespace prefix from a C++ function / method name. + // NOTE: Make sure to keep the template parameters in the name. Normally, + // template parameters are stripped from the C++ names but when + // -symbolize=demangle=templates flag is used, they will not be. + // See tests for examples. + cppRegExp = regexp.MustCompile(`^(?:[_a-zA-Z]\w*::)+(_*[A-Z]\w*::~?[_a-zA-Z]\w*(?:<.*>)?)`) + cppAnonymousPrefixRegExp = regexp.MustCompile(`^\(anonymous namespace\)::`) +) + +// Graph summarizes a performance profile into a format that is +// suitable for visualization. +type Graph struct { + Nodes Nodes +} + +// Options encodes the options for constructing a graph +type Options struct { + SampleValue func(s []int64) int64 // Function to compute the value of a sample + SampleMeanDivisor func(s []int64) int64 // Function to compute the divisor for mean graphs, or nil + FormatTag func(int64, string) string // Function to format a sample tag value into a string + ObjNames bool // Always preserve obj filename + OrigFnNames bool // Preserve original (eg mangled) function names + + CallTree bool // Build a tree instead of a graph + DropNegative bool // Drop nodes with overall negative values + + KeptNodes NodeSet // If non-nil, only use nodes in this set +} + +// Nodes is an ordered collection of graph nodes. +type Nodes []*Node + +// Node is an entry on a profiling report. It represents a unique +// program location. +type Node struct { + // Info describes the source location associated to this node. + Info NodeInfo + + // Function represents the function that this node belongs to. On + // graphs with sub-function resolution (eg line number or + // addresses), two nodes in a NodeMap that are part of the same + // function have the same value of Node.Function. If the Node + // represents the whole function, it points back to itself. + Function *Node + + // Values associated to this node. Flat is exclusive to this node, + // Cum includes all descendents. + Flat, FlatDiv, Cum, CumDiv int64 + + // In and out Contains the nodes immediately reaching or reached by + // this node. + In, Out EdgeMap + + // LabelTags provide additional information about subsets of a sample. + LabelTags TagMap + + // NumericTags provide additional values for subsets of a sample. + // Numeric tags are optionally associated to a label tag. The key + // for NumericTags is the name of the LabelTag they are associated + // to, or "" for numeric tags not associated to a label tag. + NumericTags map[string]TagMap +} + +// FlatValue returns the exclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) FlatValue() int64 { + if n.FlatDiv == 0 { + return n.Flat + } + return n.Flat / n.FlatDiv +} + +// CumValue returns the inclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) CumValue() int64 { + if n.CumDiv == 0 { + return n.Cum + } + return n.Cum / n.CumDiv +} + +// AddToEdge increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdge(to *Node, v int64, residual, inline bool) { + n.AddToEdgeDiv(to, 0, v, residual, inline) +} + +// AddToEdgeDiv increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdgeDiv(to *Node, dv, v int64, residual, inline bool) { + if n.Out[to] != to.In[n] { + panic(fmt.Errorf("asymmetric edges %v %v", *n, *to)) + } + + if e := n.Out[to]; e != nil { + e.WeightDiv += dv + e.Weight += v + if residual { + e.Residual = true + } + if !inline { + e.Inline = false + } + return + } + + info := &Edge{Src: n, Dest: to, WeightDiv: dv, Weight: v, Residual: residual, Inline: inline} + n.Out[to] = info + to.In[n] = info +} + +// NodeInfo contains the attributes for a node. +type NodeInfo struct { + Name string + OrigName string + Address uint64 + File string + StartLine, Lineno int + Objfile string +} + +// PrintableName calls the Node's Formatter function with a single space separator. +func (i *NodeInfo) PrintableName() string { + return strings.Join(i.NameComponents(), " ") +} + +// NameComponents returns the components of the printable name to be used for a node. +func (i *NodeInfo) NameComponents() []string { + var name []string + if i.Address != 0 { + name = append(name, fmt.Sprintf("%016x", i.Address)) + } + if fun := i.Name; fun != "" { + name = append(name, fun) + } + + switch { + case i.Lineno != 0: + // User requested line numbers, provide what we have. + name = append(name, fmt.Sprintf("%s:%d", i.File, i.Lineno)) + case i.File != "": + // User requested file name, provide it. + name = append(name, i.File) + case i.Name != "": + // User requested function name. It was already included. + case i.Objfile != "": + // Only binary name is available + name = append(name, "["+filepath.Base(i.Objfile)+"]") + default: + // Do not leave it empty if there is no information at all. + name = append(name, "") + } + return name +} + +// NodeMap maps from a node info struct to a node. It is used to merge +// report entries with the same info. +type NodeMap map[NodeInfo]*Node + +// NodeSet is a collection of node info structs. +type NodeSet map[NodeInfo]bool + +// NodePtrSet is a collection of nodes. Trimming a graph or tree requires a set +// of objects which uniquely identify the nodes to keep. In a graph, NodeInfo +// works as a unique identifier; however, in a tree multiple nodes may share +// identical NodeInfos. A *Node does uniquely identify a node so we can use that +// instead. Though a *Node also uniquely identifies a node in a graph, +// currently, during trimming, graphs are rebuilt from scratch using only the +// NodeSet, so there would not be the required context of the initial graph to +// allow for the use of *Node. +type NodePtrSet map[*Node]bool + +// FindOrInsertNode takes the info for a node and either returns a matching node +// from the node map if one exists, or adds one to the map if one does not. +// If kept is non-nil, nodes are only added if they can be located on it. +func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node { + if kept != nil { + if _, ok := kept[info]; !ok { + return nil + } + } + + if n, ok := nm[info]; ok { + return n + } + + n := &Node{ + Info: info, + In: make(EdgeMap), + Out: make(EdgeMap), + LabelTags: make(TagMap), + NumericTags: make(map[string]TagMap), + } + nm[info] = n + if info.Address == 0 && info.Lineno == 0 { + // This node represents the whole function, so point Function + // back to itself. + n.Function = n + return n + } + // Find a node that represents the whole function. + info.Address = 0 + info.Lineno = 0 + n.Function = nm.FindOrInsertNode(info, nil) + return n +} + +// EdgeMap is used to represent the incoming/outgoing edges from a node. +type EdgeMap map[*Node]*Edge + +// Edge contains any attributes to be represented about edges in a graph. +type Edge struct { + Src, Dest *Node + // The summary weight of the edge + Weight, WeightDiv int64 + + // residual edges connect nodes that were connected through a + // separate node, which has been removed from the report. + Residual bool + // An inline edge represents a call that was inlined into the caller. + Inline bool +} + +// WeightValue returns the weight value for this edge, normalizing if a +// divisor is available. +func (e *Edge) WeightValue() int64 { + if e.WeightDiv == 0 { + return e.Weight + } + return e.Weight / e.WeightDiv +} + +// Tag represent sample annotations +type Tag struct { + Name string + Unit string // Describe the value, "" for non-numeric tags + Value int64 + Flat, FlatDiv int64 + Cum, CumDiv int64 +} + +// FlatValue returns the exclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) FlatValue() int64 { + if t.FlatDiv == 0 { + return t.Flat + } + return t.Flat / t.FlatDiv +} + +// CumValue returns the inclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) CumValue() int64 { + if t.CumDiv == 0 { + return t.Cum + } + return t.Cum / t.CumDiv +} + +// TagMap is a collection of tags, classified by their name. +type TagMap map[string]*Tag + +// SortTags sorts a slice of tags based on their weight. +func SortTags(t []*Tag, flat bool) []*Tag { + ts := tags{t, flat} + sort.Sort(ts) + return ts.t +} + +// New summarizes performance data from a profile into a graph. +func New(prof *profile.Profile, o *Options) *Graph { + if o.CallTree { + return newTree(prof, o) + } + g, _ := newGraph(prof, o) + return g +} + +// newGraph computes a graph from a profile. It returns the graph, and +// a map from the profile location indices to the corresponding graph +// nodes. +func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) { + nodes, locationMap := CreateNodes(prof, o) + seenNode := make(map[*Node]bool) + seenEdge := make(map[nodePair]bool) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + for k := range seenNode { + delete(seenNode, k) + } + for k := range seenEdge { + delete(seenEdge, k) + } + var parent *Node + // A residual edge goes over one or more nodes that were not kept. + residual := false + + labels := joinLabels(sample) + // Group the sample frames, based on a global map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + locNodes := locationMap[l.ID] + for ni := len(locNodes) - 1; ni >= 0; ni-- { + n := locNodes[ni] + if n == nil { + residual = true + continue + } + // Add cum weight to all nodes in stack, avoiding double counting. + if _, ok := seenNode[n]; !ok { + seenNode[n] = true + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + } + // Update edge weights for all edges in stack, avoiding double counting. + if _, ok := seenEdge[nodePair{n, parent}]; !ok && parent != nil && n != parent { + seenEdge[nodePair{n, parent}] = true + parent.AddToEdgeDiv(n, dw, w, residual, ni != len(locNodes)-1) + } + parent = n + residual = false + } + } + if parent != nil && !residual { + // Add flat weight to leaf node. + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + return selectNodesForGraph(nodes, o.DropNegative), locationMap +} + +func selectNodesForGraph(nodes Nodes, dropNegative bool) *Graph { + // Collect nodes into a graph. + gNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if n == nil { + continue + } + if n.Cum == 0 && n.Flat == 0 { + continue + } + if dropNegative && isNegative(n) { + continue + } + gNodes = append(gNodes, n) + } + return &Graph{gNodes} +} + +type nodePair struct { + src, dest *Node +} + +func newTree(prof *profile.Profile, o *Options) (g *Graph) { + parentNodeMap := make(map[*Node]NodeMap, len(prof.Sample)) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + var parent *Node + labels := joinLabels(sample) + // Group the sample frames, based on a per-node map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + for lidx := len(lines) - 1; lidx >= 0; lidx-- { + nodeMap := parentNodeMap[parent] + if nodeMap == nil { + nodeMap = make(NodeMap) + parentNodeMap[parent] = nodeMap + } + n := nodeMap.findOrInsertLine(l, lines[lidx], o) + if n == nil { + continue + } + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + if parent != nil { + parent.AddToEdgeDiv(n, dw, w, false, lidx != len(lines)-1) + } + parent = n + } + } + if parent != nil { + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + nodes := make(Nodes, len(prof.Location)) + for _, nm := range parentNodeMap { + nodes = append(nodes, nm.nodes()...) + } + return selectNodesForGraph(nodes, o.DropNegative) +} + +// ShortenFunctionName returns a shortened version of a function's name. +func ShortenFunctionName(f string) string { + f = cppAnonymousPrefixRegExp.ReplaceAllString(f, "") + f = goVerRegExp.ReplaceAllString(f, `${1}${2}`) + for _, re := range []*regexp.Regexp{goRegExp, javaRegExp, cppRegExp} { + if matches := re.FindStringSubmatch(f); len(matches) >= 2 { + return strings.Join(matches[1:], "") + } + } + return f +} + +// TrimTree trims a Graph in forest form, keeping only the nodes in kept. This +// will not work correctly if even a single node has multiple parents. +func (g *Graph) TrimTree(kept NodePtrSet) { + // Creates a new list of nodes + oldNodes := g.Nodes + g.Nodes = make(Nodes, 0, len(kept)) + + for _, cur := range oldNodes { + // A node may not have multiple parents + if len(cur.In) > 1 { + panic("TrimTree only works on trees") + } + + // If a node should be kept, add it to the new list of nodes + if _, ok := kept[cur]; ok { + g.Nodes = append(g.Nodes, cur) + continue + } + + // If a node has no parents, then delete all of the in edges of its + // children to make them each roots of their own trees. + if len(cur.In) == 0 { + for _, outEdge := range cur.Out { + delete(outEdge.Dest.In, cur) + } + continue + } + + // Get the parent. This works since at this point cur.In must contain only + // one element. + if len(cur.In) != 1 { + panic("Get parent assertion failed. cur.In expected to be of length 1.") + } + var parent *Node + for _, edge := range cur.In { + parent = edge.Src + } + + parentEdgeInline := parent.Out[cur].Inline + + // Remove the edge from the parent to this node + delete(parent.Out, cur) + + // Reconfigure every edge from the current node to now begin at the parent. + for _, outEdge := range cur.Out { + child := outEdge.Dest + + delete(child.In, cur) + child.In[parent] = outEdge + parent.Out[child] = outEdge + + outEdge.Src = parent + outEdge.Residual = true + // If the edge from the parent to the current node and the edge from the + // current node to the child are both inline, then this resulting residual + // edge should also be inline + outEdge.Inline = parentEdgeInline && outEdge.Inline + } + } + g.RemoveRedundantEdges() +} + +func joinLabels(s *profile.Sample) string { + if len(s.Label) == 0 { + return "" + } + + var labels []string + for key, vals := range s.Label { + for _, v := range vals { + labels = append(labels, key+":"+v) + } + } + sort.Strings(labels) + return strings.Join(labels, `\n`) +} + +// isNegative returns true if the node is considered as "negative" for the +// purposes of drop_negative. +func isNegative(n *Node) bool { + switch { + case n.Flat < 0: + return true + case n.Flat == 0 && n.Cum < 0: + return true + default: + return false + } +} + +// CreateNodes creates graph nodes for all locations in a profile. It +// returns set of all nodes, plus a mapping of each location to the +// set of corresponding nodes (one per location.Line). +func CreateNodes(prof *profile.Profile, o *Options) (Nodes, map[uint64]Nodes) { + locations := make(map[uint64]Nodes, len(prof.Location)) + nm := make(NodeMap, len(prof.Location)) + for _, l := range prof.Location { + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + nodes := make(Nodes, len(lines)) + for ln := range lines { + nodes[ln] = nm.findOrInsertLine(l, lines[ln], o) + } + locations[l.ID] = nodes + } + return nm.nodes(), locations +} + +func (nm NodeMap) nodes() Nodes { + nodes := make(Nodes, 0, len(nm)) + for _, n := range nm { + nodes = append(nodes, n) + } + return nodes +} + +func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node { + var objfile string + if m := l.Mapping; m != nil && m.File != "" { + objfile = m.File + } + + if ni := nodeInfo(l, li, objfile, o); ni != nil { + return nm.FindOrInsertNode(*ni, o.KeptNodes) + } + return nil +} + +func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo { + if line.Function == nil { + return &NodeInfo{Address: l.Address, Objfile: objfile} + } + ni := &NodeInfo{ + Address: l.Address, + Lineno: int(line.Line), + Name: line.Function.Name, + } + if fname := line.Function.Filename; fname != "" { + ni.File = filepath.Clean(fname) + } + if o.OrigFnNames { + ni.OrigName = line.Function.SystemName + } + if o.ObjNames || (ni.Name == "" && ni.OrigName == "") { + ni.Objfile = objfile + ni.StartLine = int(line.Function.StartLine) + } + return ni +} + +type tags struct { + t []*Tag + flat bool +} + +func (t tags) Len() int { return len(t.t) } +func (t tags) Swap(i, j int) { t.t[i], t.t[j] = t.t[j], t.t[i] } +func (t tags) Less(i, j int) bool { + if !t.flat { + if t.t[i].Cum != t.t[j].Cum { + return abs64(t.t[i].Cum) > abs64(t.t[j].Cum) + } + } + if t.t[i].Flat != t.t[j].Flat { + return abs64(t.t[i].Flat) > abs64(t.t[j].Flat) + } + return t.t[i].Name < t.t[j].Name +} + +// Sum adds the flat and cum values of a set of nodes. +func (ns Nodes) Sum() (flat int64, cum int64) { + for _, n := range ns { + flat += n.Flat + cum += n.Cum + } + return +} + +func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64, numUnit map[string][]string, format func(int64, string) string, flat bool) { + // Update sample value + if flat { + n.FlatDiv += dw + n.Flat += w + } else { + n.CumDiv += dw + n.Cum += w + } + + // Add string tags + if labels != "" { + t := n.LabelTags.findOrAddTag(labels, "", 0) + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + + numericTags := n.NumericTags[labels] + if numericTags == nil { + numericTags = TagMap{} + n.NumericTags[labels] = numericTags + } + // Add numeric tags + if format == nil { + format = defaultLabelFormat + } + for k, nvals := range numLabel { + units := numUnit[k] + for i, v := range nvals { + var t *Tag + if len(units) > 0 { + t = numericTags.findOrAddTag(format(v, units[i]), units[i], v) + } else { + t = numericTags.findOrAddTag(format(v, k), k, v) + } + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + } +} + +func defaultLabelFormat(v int64, key string) string { + return strconv.FormatInt(v, 10) +} + +func (m TagMap) findOrAddTag(label, unit string, value int64) *Tag { + l := m[label] + if l == nil { + l = &Tag{ + Name: label, + Unit: unit, + Value: value, + } + m[label] = l + } + return l +} + +// String returns a text representation of a graph, for debugging purposes. +func (g *Graph) String() string { + var s []string + + nodeIndex := make(map[*Node]int, len(g.Nodes)) + + for i, n := range g.Nodes { + nodeIndex[n] = i + 1 + } + + for i, n := range g.Nodes { + name := n.Info.PrintableName() + var in, out []int + + for _, from := range n.In { + in = append(in, nodeIndex[from.Src]) + } + for _, to := range n.Out { + out = append(out, nodeIndex[to.Dest]) + } + s = append(s, fmt.Sprintf("%d: %s[flat=%d cum=%d] %x -> %v ", i+1, name, n.Flat, n.Cum, in, out)) + } + return strings.Join(s, "\n") +} + +// DiscardLowFrequencyNodes returns a set of the nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodes(nodeCutoff int64) NodeSet { + return makeNodeSet(g.Nodes, nodeCutoff) +} + +// DiscardLowFrequencyNodePtrs returns a NodePtrSet of nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodePtrs(nodeCutoff int64) NodePtrSet { + cutNodes := getNodesAboveCumCutoff(g.Nodes, nodeCutoff) + kept := make(NodePtrSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n] = true + } + return kept +} + +func makeNodeSet(nodes Nodes, nodeCutoff int64) NodeSet { + cutNodes := getNodesAboveCumCutoff(nodes, nodeCutoff) + kept := make(NodeSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n.Info] = true + } + return kept +} + +// getNodesAboveCumCutoff returns all the nodes which have a Cum value greater +// than or equal to cutoff. +func getNodesAboveCumCutoff(nodes Nodes, nodeCutoff int64) Nodes { + cutoffNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if abs64(n.Cum) < nodeCutoff { + continue + } + cutoffNodes = append(cutoffNodes, n) + } + return cutoffNodes +} + +// TrimLowFrequencyTags removes tags that have less than +// the specified weight. +func (g *Graph) TrimLowFrequencyTags(tagCutoff int64) { + // Remove nodes with value <= total*nodeFraction + for _, n := range g.Nodes { + n.LabelTags = trimLowFreqTags(n.LabelTags, tagCutoff) + for s, nt := range n.NumericTags { + n.NumericTags[s] = trimLowFreqTags(nt, tagCutoff) + } + } +} + +func trimLowFreqTags(tags TagMap, minValue int64) TagMap { + kept := TagMap{} + for s, t := range tags { + if abs64(t.Flat) >= minValue || abs64(t.Cum) >= minValue { + kept[s] = t + } + } + return kept +} + +// TrimLowFrequencyEdges removes edges that have less than +// the specified weight. Returns the number of edges removed +func (g *Graph) TrimLowFrequencyEdges(edgeCutoff int64) int { + var droppedEdges int + for _, n := range g.Nodes { + for src, e := range n.In { + if abs64(e.Weight) < edgeCutoff { + delete(n.In, src) + delete(src.Out, n) + droppedEdges++ + } + } + } + return droppedEdges +} + +// SortNodes sorts the nodes in a graph based on a specific heuristic. +func (g *Graph) SortNodes(cum bool, visualMode bool) { + // Sort nodes based on requested mode + switch { + case visualMode: + // Specialized sort to produce a more visually-interesting graph + g.Nodes.Sort(EntropyOrder) + case cum: + g.Nodes.Sort(CumNameOrder) + default: + g.Nodes.Sort(FlatNameOrder) + } +} + +// SelectTopNodePtrs returns a set of the top maxNodes *Node in a graph. +func (g *Graph) SelectTopNodePtrs(maxNodes int, visualMode bool) NodePtrSet { + set := make(NodePtrSet) + for _, node := range g.selectTopNodes(maxNodes, visualMode) { + set[node] = true + } + return set +} + +// SelectTopNodes returns a set of the top maxNodes nodes in a graph. +func (g *Graph) SelectTopNodes(maxNodes int, visualMode bool) NodeSet { + return makeNodeSet(g.selectTopNodes(maxNodes, visualMode), 0) +} + +// selectTopNodes returns a slice of the top maxNodes nodes in a graph. +func (g *Graph) selectTopNodes(maxNodes int, visualMode bool) Nodes { + if maxNodes > 0 { + if visualMode { + var count int + // If generating a visual graph, count tags as nodes. Update + // maxNodes to account for them. + for i, n := range g.Nodes { + tags := countTags(n) + if tags > maxNodelets { + tags = maxNodelets + } + if count += tags + 1; count >= maxNodes { + maxNodes = i + 1 + break + } + } + } + } + if maxNodes > len(g.Nodes) { + maxNodes = len(g.Nodes) + } + return g.Nodes[:maxNodes] +} + +// countTags counts the tags with flat count. This underestimates the +// number of tags being displayed, but in practice is close enough. +func countTags(n *Node) int { + count := 0 + for _, e := range n.LabelTags { + if e.Flat != 0 { + count++ + } + } + for _, t := range n.NumericTags { + for _, e := range t { + if e.Flat != 0 { + count++ + } + } + } + return count +} + +// RemoveRedundantEdges removes residual edges if the destination can +// be reached through another path. This is done to simplify the graph +// while preserving connectivity. +func (g *Graph) RemoveRedundantEdges() { + // Walk the nodes and outgoing edges in reverse order to prefer + // removing edges with the lowest weight. + for i := len(g.Nodes); i > 0; i-- { + n := g.Nodes[i-1] + in := n.In.Sort() + for j := len(in); j > 0; j-- { + e := in[j-1] + if !e.Residual { + // Do not remove edges heavier than a non-residual edge, to + // avoid potential confusion. + break + } + if isRedundantEdge(e) { + delete(e.Src.Out, e.Dest) + delete(e.Dest.In, e.Src) + } + } + } +} + +// isRedundantEdge determines if there is a path that allows e.Src +// to reach e.Dest after removing e. +func isRedundantEdge(e *Edge) bool { + src, n := e.Src, e.Dest + seen := map[*Node]bool{n: true} + queue := Nodes{n} + for len(queue) > 0 { + n := queue[0] + queue = queue[1:] + for _, ie := range n.In { + if e == ie || seen[ie.Src] { + continue + } + if ie.Src == src { + return true + } + seen[ie.Src] = true + queue = append(queue, ie.Src) + } + } + return false +} + +// nodeSorter is a mechanism used to allow a report to be sorted +// in different ways. +type nodeSorter struct { + rs Nodes + less func(l, r *Node) bool +} + +func (s nodeSorter) Len() int { return len(s.rs) } +func (s nodeSorter) Swap(i, j int) { s.rs[i], s.rs[j] = s.rs[j], s.rs[i] } +func (s nodeSorter) Less(i, j int) bool { return s.less(s.rs[i], s.rs[j]) } + +// Sort reorders a slice of nodes based on the specified ordering +// criteria. The result is sorted in decreasing order for (absolute) +// numeric quantities, alphabetically for text, and increasing for +// addresses. +func (ns Nodes) Sort(o NodeOrder) error { + var s nodeSorter + + switch o { + case FlatNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + return compareNodes(l, r) + }, + } + case FlatCumNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case NameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Name, r.Info.Name; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case FileOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.File, r.Info.File; iv != jv { + return iv < jv + } + if iv, jv := l.Info.StartLine, r.Info.StartLine; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case AddressOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Address, r.Info.Address; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case CumNameOrder, EntropyOrder: + // Hold scoring for score-based ordering + var score map[*Node]int64 + scoreOrder := func(l, r *Node) bool { + if iv, jv := abs64(score[l]), abs64(score[r]); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + return compareNodes(l, r) + } + + switch o { + case CumNameOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = n.Cum + } + s = nodeSorter{ns, scoreOrder} + case EntropyOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = entropyScore(n) + } + s = nodeSorter{ns, scoreOrder} + } + default: + return fmt.Errorf("report: unrecognized sort ordering: %d", o) + } + sort.Sort(s) + return nil +} + +// compareNodes compares two nodes to provide a deterministic ordering +// between them. Two nodes cannot have the same Node.Info value. +func compareNodes(l, r *Node) bool { + return fmt.Sprint(l.Info) < fmt.Sprint(r.Info) +} + +// entropyScore computes a score for a node representing how important +// it is to include this node on a graph visualization. It is used to +// sort the nodes and select which ones to display if we have more +// nodes than desired in the graph. This number is computed by looking +// at the flat and cum weights of the node and the incoming/outgoing +// edges. The fundamental idea is to penalize nodes that have a simple +// fallthrough from their incoming to the outgoing edge. +func entropyScore(n *Node) int64 { + score := float64(0) + + if len(n.In) == 0 { + score++ // Favor entry nodes + } else { + score += edgeEntropyScore(n, n.In, 0) + } + + if len(n.Out) == 0 { + score++ // Favor leaf nodes + } else { + score += edgeEntropyScore(n, n.Out, n.Flat) + } + + return int64(score*float64(n.Cum)) + n.Flat +} + +// edgeEntropyScore computes the entropy value for a set of edges +// coming in or out of a node. Entropy (as defined in information +// theory) refers to the amount of information encoded by the set of +// edges. A set of edges that have a more interesting distribution of +// samples gets a higher score. +func edgeEntropyScore(n *Node, edges EdgeMap, self int64) float64 { + score := float64(0) + total := self + for _, e := range edges { + if e.Weight > 0 { + total += abs64(e.Weight) + } + } + if total != 0 { + for _, e := range edges { + frac := float64(abs64(e.Weight)) / float64(total) + score += -frac * math.Log2(frac) + } + if self > 0 { + frac := float64(abs64(self)) / float64(total) + score += -frac * math.Log2(frac) + } + } + return score +} + +// NodeOrder sets the ordering for a Sort operation +type NodeOrder int + +// Sorting options for node sort. +const ( + FlatNameOrder NodeOrder = iota + FlatCumNameOrder + CumNameOrder + NameOrder + FileOrder + AddressOrder + EntropyOrder +) + +// Sort returns a slice of the edges in the map, in a consistent +// order. The sort order is first based on the edge weight +// (higher-to-lower) and then by the node names to avoid flakiness. +func (e EdgeMap) Sort() []*Edge { + el := make(edgeList, 0, len(e)) + for _, w := range e { + el = append(el, w) + } + + sort.Sort(el) + return el +} + +// Sum returns the total weight for a set of nodes. +func (e EdgeMap) Sum() int64 { + var ret int64 + for _, edge := range e { + ret += edge.Weight + } + return ret +} + +type edgeList []*Edge + +func (el edgeList) Len() int { + return len(el) +} + +func (el edgeList) Less(i, j int) bool { + if el[i].Weight != el[j].Weight { + return abs64(el[i].Weight) > abs64(el[j].Weight) + } + + from1 := el[i].Src.Info.PrintableName() + from2 := el[j].Src.Info.PrintableName() + if from1 != from2 { + return from1 < from2 + } + + to1 := el[i].Dest.Info.PrintableName() + to2 := el[j].Dest.Info.PrintableName() + + return to1 < to2 +} + +func (el edgeList) Swap(i, j int) { + el[i], el[j] = el[j], el[i] +} + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go new file mode 100644 index 0000000000000000000000000000000000000000..d9644f9326c49d5d962b40c5163c04d3e9789105 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go @@ -0,0 +1,293 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package measurement export utility functions to manipulate/format performance profile sample values. +package measurement + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/google/pprof/profile" +) + +// ScaleProfiles updates the units in a set of profiles to make them +// compatible. It scales the profiles to the smallest unit to preserve +// data. +func ScaleProfiles(profiles []*profile.Profile) error { + if len(profiles) == 0 { + return nil + } + periodTypes := make([]*profile.ValueType, 0, len(profiles)) + for _, p := range profiles { + if p.PeriodType != nil { + periodTypes = append(periodTypes, p.PeriodType) + } + } + periodType, err := CommonValueType(periodTypes) + if err != nil { + return fmt.Errorf("period type: %v", err) + } + + // Identify common sample types + numSampleTypes := len(profiles[0].SampleType) + for _, p := range profiles[1:] { + if numSampleTypes != len(p.SampleType) { + return fmt.Errorf("inconsistent samples type count: %d != %d", numSampleTypes, len(p.SampleType)) + } + } + sampleType := make([]*profile.ValueType, numSampleTypes) + for i := 0; i < numSampleTypes; i++ { + sampleTypes := make([]*profile.ValueType, len(profiles)) + for j, p := range profiles { + sampleTypes[j] = p.SampleType[i] + } + sampleType[i], err = CommonValueType(sampleTypes) + if err != nil { + return fmt.Errorf("sample types: %v", err) + } + } + + for _, p := range profiles { + if p.PeriodType != nil && periodType != nil { + period, _ := Scale(p.Period, p.PeriodType.Unit, periodType.Unit) + p.Period, p.PeriodType.Unit = int64(period), periodType.Unit + } + ratios := make([]float64, len(p.SampleType)) + for i, st := range p.SampleType { + if sampleType[i] == nil { + ratios[i] = 1 + continue + } + ratios[i], _ = Scale(1, st.Unit, sampleType[i].Unit) + p.SampleType[i].Unit = sampleType[i].Unit + } + if err := p.ScaleN(ratios); err != nil { + return fmt.Errorf("scale: %v", err) + } + } + return nil +} + +// CommonValueType returns the finest type from a set of compatible +// types. +func CommonValueType(ts []*profile.ValueType) (*profile.ValueType, error) { + if len(ts) <= 1 { + return nil, nil + } + minType := ts[0] + for _, t := range ts[1:] { + if !compatibleValueTypes(minType, t) { + return nil, fmt.Errorf("incompatible types: %v %v", *minType, *t) + } + if ratio, _ := Scale(1, t.Unit, minType.Unit); ratio < 1 { + minType = t + } + } + rcopy := *minType + return &rcopy, nil +} + +func compatibleValueTypes(v1, v2 *profile.ValueType) bool { + if v1 == nil || v2 == nil { + return true // No grounds to disqualify. + } + // Remove trailing 's' to permit minor mismatches. + if t1, t2 := strings.TrimSuffix(v1.Type, "s"), strings.TrimSuffix(v2.Type, "s"); t1 != t2 { + return false + } + + if v1.Unit == v2.Unit { + return true + } + for _, ut := range unitTypes { + if ut.sniffUnit(v1.Unit) != nil && ut.sniffUnit(v2.Unit) != nil { + return true + } + } + return false +} + +// Scale a measurement from a unit to a different unit and returns +// the scaled value and the target unit. The returned target unit +// will be empty if uninteresting (could be skipped). +func Scale(value int64, fromUnit, toUnit string) (float64, string) { + // Avoid infinite recursion on overflow. + if value < 0 && -value > 0 { + v, u := Scale(-value, fromUnit, toUnit) + return -v, u + } + for _, ut := range unitTypes { + if v, u, ok := ut.convertUnit(value, fromUnit, toUnit); ok { + return v, u + } + } + // Skip non-interesting units. + switch toUnit { + case "count", "sample", "unit", "minimum", "auto": + return float64(value), "" + default: + return float64(value), toUnit + } +} + +// Label returns the label used to describe a certain measurement. +func Label(value int64, unit string) string { + return ScaledLabel(value, unit, "auto") +} + +// ScaledLabel scales the passed-in measurement (if necessary) and +// returns the label used to describe a float measurement. +func ScaledLabel(value int64, fromUnit, toUnit string) string { + v, u := Scale(value, fromUnit, toUnit) + sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00") + if sv == "0" || sv == "-0" { + return "0" + } + return sv + u +} + +// Percentage computes the percentage of total of a value, and encodes +// it as a string. At least two digits of precision are printed. +func Percentage(value, total int64) string { + var ratio float64 + if total != 0 { + ratio = math.Abs(float64(value)/float64(total)) * 100 + } + switch { + case math.Abs(ratio) >= 99.95 && math.Abs(ratio) <= 100.05: + return " 100%" + case math.Abs(ratio) >= 1.0: + return fmt.Sprintf("%5.2f%%", ratio) + default: + return fmt.Sprintf("%5.2g%%", ratio) + } +} + +// unit includes a list of aliases representing a specific unit and a factor +// which one can multiple a value in the specified unit by to get the value +// in terms of the base unit. +type unit struct { + canonicalName string + aliases []string + factor float64 +} + +// unitType includes a list of units that are within the same category (i.e. +// memory or time units) and a default unit to use for this type of unit. +type unitType struct { + defaultUnit unit + units []unit +} + +// findByAlias returns the unit associated with the specified alias. It returns +// nil if the unit with such alias is not found. +func (ut unitType) findByAlias(alias string) *unit { + for _, u := range ut.units { + for _, a := range u.aliases { + if alias == a { + return &u + } + } + } + return nil +} + +// sniffUnit simpifies the input alias and returns the unit associated with the +// specified alias. It returns nil if the unit with such alias is not found. +func (ut unitType) sniffUnit(unit string) *unit { + unit = strings.ToLower(unit) + if len(unit) > 2 { + unit = strings.TrimSuffix(unit, "s") + } + return ut.findByAlias(unit) +} + +// autoScale takes in the value with units of the base unit and returns +// that value scaled to a reasonable unit if a reasonable unit is +// found. +func (ut unitType) autoScale(value float64) (float64, string, bool) { + var f float64 + var unit string + for _, u := range ut.units { + if u.factor >= f && (value/u.factor) >= 1.0 { + f = u.factor + unit = u.canonicalName + } + } + if f == 0 { + return 0, "", false + } + return value / f, unit, true +} + +// convertUnit converts a value from the fromUnit to the toUnit, autoscaling +// the value if the toUnit is "minimum" or "auto". If the fromUnit is not +// included in the unitType, then a false boolean will be returned. If the +// toUnit is not in the unitType, the value will be returned in terms of the +// default unitType. +func (ut unitType) convertUnit(value int64, fromUnitStr, toUnitStr string) (float64, string, bool) { + fromUnit := ut.sniffUnit(fromUnitStr) + if fromUnit == nil { + return 0, "", false + } + v := float64(value) * fromUnit.factor + if toUnitStr == "minimum" || toUnitStr == "auto" { + if v, u, ok := ut.autoScale(v); ok { + return v, u, true + } + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + toUnit := ut.sniffUnit(toUnitStr) + if toUnit == nil { + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + return v / toUnit.factor, toUnit.canonicalName, true +} + +var unitTypes = []unitType{{ + units: []unit{ + {"B", []string{"b", "byte"}, 1}, + {"kB", []string{"kb", "kbyte", "kilobyte"}, float64(1 << 10)}, + {"MB", []string{"mb", "mbyte", "megabyte"}, float64(1 << 20)}, + {"GB", []string{"gb", "gbyte", "gigabyte"}, float64(1 << 30)}, + {"TB", []string{"tb", "tbyte", "terabyte"}, float64(1 << 40)}, + {"PB", []string{"pb", "pbyte", "petabyte"}, float64(1 << 50)}, + }, + defaultUnit: unit{"B", []string{"b", "byte"}, 1}, +}, { + units: []unit{ + {"ns", []string{"ns", "nanosecond"}, float64(time.Nanosecond)}, + {"us", []string{"μs", "us", "microsecond"}, float64(time.Microsecond)}, + {"ms", []string{"ms", "millisecond"}, float64(time.Millisecond)}, + {"s", []string{"s", "sec", "second"}, float64(time.Second)}, + {"hrs", []string{"hour", "hr"}, float64(time.Hour)}, + }, + defaultUnit: unit{"s", []string{}, float64(time.Second)}, +}, { + units: []unit{ + {"n*GCU", []string{"nanogcu"}, 1e-9}, + {"u*GCU", []string{"microgcu"}, 1e-6}, + {"m*GCU", []string{"milligcu"}, 1e-3}, + {"GCU", []string{"gcu"}, 1}, + {"k*GCU", []string{"kilogcu"}, 1e3}, + {"M*GCU", []string{"megagcu"}, 1e6}, + {"G*GCU", []string{"gigagcu"}, 1e9}, + {"T*GCU", []string{"teragcu"}, 1e12}, + {"P*GCU", []string{"petagcu"}, 1e15}, + }, + defaultUnit: unit{"GCU", []string{}, 1.0}, +}} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..98eb1dd8179bb01bc87658e647f83aec0a9417ed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -0,0 +1,216 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package plugin defines the plugin implementations that the main pprof driver requires. +package plugin + +import ( + "io" + "net/http" + "regexp" + "time" + + "github.com/google/pprof/profile" +) + +// Options groups all the optional plugins into pprof. +type Options struct { + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + + // HTTPServer is a function that should block serving http requests, + // including the handlers specified in args. If non-nil, pprof will + // invoke this function if necessary to provide a web interface. + // + // If HTTPServer is nil, pprof will use its own internal HTTP server. + // + // A common use for a custom HTTPServer is to provide custom + // authentication checks. + HTTPServer func(args *HTTPServerArgs) error + HTTPTransport http.RoundTripper +} + +// Writer provides a mechanism to write data under a certain name, +// typically a filename. +type Writer interface { + Open(name string) (io.WriteCloser, error) +} + +// A FlagSet creates and parses command-line flags. +// It is similar to the standard flag.FlagSet. +type FlagSet interface { + // Bool, Int, Float64, and String define new flags, + // like the functions of the same name in package flag. + Bool(name string, def bool, usage string) *bool + Int(name string, def int, usage string) *int + Float64(name string, def float64, usage string) *float64 + String(name string, def string, usage string) *string + + // StringList is similar to String but allows multiple values for a + // single flag + StringList(name string, def string, usage string) *[]*string + + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. + ExtraUsage() string + + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + + // Parse initializes the flags with their values for this run + // and returns the non-flag command line arguments. + // If an unknown flag is encountered or there are no arguments, + // Parse should call usage and return nil. + Parse(usage func()) []string +} + +// A Fetcher reads and returns the profile named by src. src can be a +// local file path or a URL. duration and timeout are units specified +// by the end user, or 0 by default. duration refers to the length of +// the profile collection, if applicable, and timeout is the amount of +// time to wait for a profile before returning an error. Returns the +// fetched profile, the URL of the actual source of the profile, or an +// error. +type Fetcher interface { + Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) +} + +// A Symbolizer introduces symbol information into a profile. +type Symbolizer interface { + Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error +} + +// MappingSources map each profile.Mapping to the source of the profile. +// The key is either Mapping.File or Mapping.BuildId. +type MappingSources map[string][]struct { + Source string // URL of the source the mapping was collected from + Start uint64 // delta applied to addresses from this source (to represent Merge adjustments) +} + +// An ObjTool inspects shared libraries and executable files. +type ObjTool interface { + // Open opens the named object file. If the object is a shared + // library, start/limit/offset are the addresses where it is mapped + // into memory in the address space being inspected. If the object + // is a linux kernel, relocationSymbol is the name of the symbol + // corresponding to the start address. + Open(file string, start, limit, offset uint64, relocationSymbol string) (ObjFile, error) + + // Disasm disassembles the named object file, starting at + // the start address and stopping at (before) the end address. + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) +} + +// An Inst is a single instruction in an assembly listing. +type Inst struct { + Addr uint64 // virtual address of instruction + Text string // instruction text + Function string // function name + File string // source file + Line int // source line +} + +// An ObjFile is a single object file: a shared library or executable. +type ObjFile interface { + // Name returns the underlyinf file name, if available + Name() string + + // ObjAddr returns the objdump (linker) address corresponding to a runtime + // address, and an error. + ObjAddr(addr uint64) (uint64, error) + + // BuildID returns the GNU build ID of the file, or an empty string. + BuildID() string + + // SourceLine reports the source line information for a given + // address in the file. Due to inlining, the source line information + // is in general a list of positions representing a call stack, + // with the leaf function first. + SourceLine(addr uint64) ([]Frame, error) + + // Symbols returns a list of symbols in the object file. + // If r is not nil, Symbols restricts the list to symbols + // with names matching the regular expression. + // If addr is not zero, Symbols restricts the list to symbols + // containing that address. + Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error) + + // Close closes the file, releasing associated resources. + Close() error +} + +// A Frame describes a single line in a source file. +type Frame struct { + Func string // name of function + File string // source file name + Line int // line in file +} + +// A Sym describes a single symbol in an object file. +type Sym struct { + Name []string // names of symbol (many if symbol was dedup'ed) + File string // object file containing symbol + Start uint64 // start virtual address + End uint64 // virtual address of last byte in sym (Start+size-1) +} + +// A UI manages user interactions. +type UI interface { + // Read returns a line of text (a command) read from the user. + // prompt is printed before reading the command. + ReadLine(prompt string) (string, error) + + // Print shows a message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, Print writes to standard error. + // (Standard output is reserved for report data.) + Print(...interface{}) + + // PrintErr shows an error message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, PrintErr writes to standard error. + PrintErr(...interface{}) + + // IsTerminal returns whether the UI is known to be tied to an + // interactive terminal (as opposed to being redirected to a file). + IsTerminal() bool + + // WantBrowser indicates whether a browser should be opened with the -http option. + WantBrowser() bool + + // SetAutoComplete instructs the UI to call complete(cmd) to obtain + // the auto-completion of cmd, if the UI supports auto-completion at all. + SetAutoComplete(complete func(string) string) +} + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs struct { + // Hostport contains the http server address (derived from flags). + Hostport string + + Host string // Host portion of Hostport + Port int // Port portion of Hostport + + // Handlers maps from URL paths to the handler to invoke to + // serve that path. + Handlers map[string]http.Handler +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/package.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/package.go new file mode 100644 index 0000000000000000000000000000000000000000..0f6dcf53508910fe41c2e3844a5f70c5dced777f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/package.go @@ -0,0 +1,17 @@ +package report + +import "regexp" + +// pkgRE extracts package name, It looks for the first "." or "::" that occurs +// after the last "/". (Searching after the last / allows us to correctly handle +// names that look like "some.url.com/foo.bar".) +var pkgRE = regexp.MustCompile(`^((.*/)?[\w\d_]+)(\.|::)([^/]*)$`) + +// packageName returns the package name of the named symbol, or "" if not found. +func packageName(name string) string { + m := pkgRE.FindStringSubmatch(name) + if m == nil { + return "" + } + return m[1] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/report.go new file mode 100644 index 0000000000000000000000000000000000000000..f73e49a176c5c7d495c30ff742a97ff52d75f92f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -0,0 +1,1334 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package report summarizes a performance profile into a +// human-readable report. +package report + +import ( + "fmt" + "io" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// Output formats. +const ( + Callgrind = iota + Comments + Dis + Dot + List + Proto + Raw + Tags + Text + TopProto + Traces + Tree + WebList +) + +// Options are the formatting and filtering options used to generate a +// profile. +type Options struct { + OutputFormat int + + CumSort bool + CallTree bool + DropNegative bool + CompactLabels bool + Ratio float64 + Title string + ProfileLabels []string + ActiveFilters []string + NumLabelUnits map[string]string + + NodeCount int + NodeFraction float64 + EdgeFraction float64 + + SampleValue func(s []int64) int64 + SampleMeanDivisor func(s []int64) int64 + SampleType string + SampleUnit string // Unit for the sample data from the profile. + + OutputUnit string // Units for data formatting in report. + + Symbol *regexp.Regexp // Symbols to include on disassembly report. + SourcePath string // Search path for source files. + TrimPath string // Paths to trim from source file paths. + + IntelSyntax bool // Whether or not to print assembly in Intel syntax. +} + +// Generate generates a report as directed by the Report. +func Generate(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + o := rpt.options + + switch o.OutputFormat { + case Comments: + return printComments(w, rpt) + case Dot: + return printDOT(w, rpt) + case Tree: + return printTree(w, rpt) + case Text: + return printText(w, rpt) + case Traces: + return printTraces(w, rpt) + case Raw: + fmt.Fprint(w, rpt.prof.String()) + return nil + case Tags: + return printTags(w, rpt) + case Proto: + return printProto(w, rpt) + case TopProto: + return printTopProto(w, rpt) + case Dis: + return printAssembly(w, rpt, obj) + case List: + return printSource(w, rpt) + case WebList: + return printWebSource(w, rpt, obj) + case Callgrind: + return printCallgrind(w, rpt) + } + return fmt.Errorf("unexpected output format") +} + +// newTrimmedGraph creates a graph for this report, trimmed according +// to the report options. +func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, droppedEdges int) { + o := rpt.options + + // Build a graph and refine it. On each refinement step we must rebuild the graph from the samples, + // as the graph itself doesn't contain enough information to preserve full precision. + visualMode := o.OutputFormat == Dot + cumSort := o.CumSort + + // The call_tree option is only honored when generating visual representations of the callgraph. + callTree := o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind) + + // First step: Build complete graph to identify low frequency nodes, based on their cum weight. + g = rpt.newGraph(nil) + totalValue, _ := g.Nodes.Sum() + nodeCutoff := abs64(int64(float64(totalValue) * o.NodeFraction)) + edgeCutoff := abs64(int64(float64(totalValue) * o.EdgeFraction)) + + // Filter out nodes with cum value below nodeCutoff. + if nodeCutoff > 0 { + if callTree { + if nodesKept := g.DiscardLowFrequencyNodePtrs(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g.TrimTree(nodesKept) + } + } else { + if nodesKept := g.DiscardLowFrequencyNodes(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g = rpt.newGraph(nodesKept) + } + } + } + origCount = len(g.Nodes) + + // Second step: Limit the total number of nodes. Apply specialized heuristics to improve + // visualization when generating dot output. + g.SortNodes(cumSort, visualMode) + if nodeCount := o.NodeCount; nodeCount > 0 { + // Remove low frequency tags and edges as they affect selection. + g.TrimLowFrequencyTags(nodeCutoff) + g.TrimLowFrequencyEdges(edgeCutoff) + if callTree { + if nodesKept := g.SelectTopNodePtrs(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g.TrimTree(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } else { + if nodesKept := g.SelectTopNodes(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g = rpt.newGraph(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } + } + + // Final step: Filter out low frequency tags and edges, and remove redundant edges that clutter + // the graph. + g.TrimLowFrequencyTags(nodeCutoff) + droppedEdges = g.TrimLowFrequencyEdges(edgeCutoff) + if visualMode { + g.RemoveRedundantEdges() + } + return +} + +func (rpt *Report) selectOutputUnit(g *graph.Graph) { + o := rpt.options + + // Select best unit for profile output. + // Find the appropriate units for the smallest non-zero sample + if o.OutputUnit != "minimum" || len(g.Nodes) == 0 { + return + } + var minValue int64 + + for _, n := range g.Nodes { + nodeMin := abs64(n.FlatValue()) + if nodeMin == 0 { + nodeMin = abs64(n.CumValue()) + } + if nodeMin > 0 && (minValue == 0 || nodeMin < minValue) { + minValue = nodeMin + } + } + maxValue := rpt.total + if minValue == 0 { + minValue = maxValue + } + + if r := o.Ratio; r > 0 && r != 1 { + minValue = int64(float64(minValue) * r) + maxValue = int64(float64(maxValue) * r) + } + + _, minUnit := measurement.Scale(minValue, o.SampleUnit, "minimum") + _, maxUnit := measurement.Scale(maxValue, o.SampleUnit, "minimum") + + unit := minUnit + if minUnit != maxUnit && minValue*100 < maxValue && o.OutputFormat != Callgrind { + // Minimum and maximum values have different units. Scale + // minimum by 100 to use larger units, allowing minimum value to + // be scaled down to 0.01, except for callgrind reports since + // they can only represent integer values. + _, unit = measurement.Scale(100*minValue, o.SampleUnit, "minimum") + } + + if unit != "" { + o.OutputUnit = unit + } else { + o.OutputUnit = o.SampleUnit + } +} + +// newGraph creates a new graph for this report. If nodes is non-nil, +// only nodes whose info matches are included. Otherwise, all nodes +// are included, without trimming. +func (rpt *Report) newGraph(nodes graph.NodeSet) *graph.Graph { + o := rpt.options + + // Clean up file paths using heuristics. + prof := rpt.prof + for _, f := range prof.Function { + f.Filename = trimPath(f.Filename, o.TrimPath, o.SourcePath) + } + // Removes all numeric tags except for the bytes tag prior + // to making graph. + // TODO: modify to select first numeric tag if no bytes tag + for _, s := range prof.Sample { + numLabels := make(map[string][]int64, len(s.NumLabel)) + numUnits := make(map[string][]string, len(s.NumLabel)) + for k, vs := range s.NumLabel { + if k == "bytes" { + unit := o.NumLabelUnits[k] + numValues := make([]int64, len(vs)) + numUnit := make([]string, len(vs)) + for i, v := range vs { + numValues[i] = v + numUnit[i] = unit + } + numLabels[k] = append(numLabels[k], numValues...) + numUnits[k] = append(numUnits[k], numUnit...) + } + } + s.NumLabel = numLabels + s.NumUnit = numUnits + } + + // Remove label marking samples from the base profiles, so it does not appear + // as a nodelet in the graph view. + prof.RemoveLabel("pprof::base") + + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + gopt := &graph.Options{ + SampleValue: o.SampleValue, + SampleMeanDivisor: o.SampleMeanDivisor, + FormatTag: formatTag, + CallTree: o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind), + DropNegative: o.DropNegative, + KeptNodes: nodes, + } + + // Only keep binary names for disassembly-based reports, otherwise + // remove it to allow merging of functions across binaries. + switch o.OutputFormat { + case Raw, List, WebList, Dis, Callgrind: + gopt.ObjNames = true + } + + return graph.New(rpt.prof, gopt) +} + +// printProto writes the incoming proto via thw writer w. +// If the divide_by option has been specified, samples are scaled appropriately. +func printProto(w io.Writer, rpt *Report) error { + p, o := rpt.prof, rpt.options + + // Apply the sample ratio to all samples before saving the profile. + if r := o.Ratio; r > 0 && r != 1 { + for _, sample := range p.Sample { + for i, v := range sample.Value { + sample.Value[i] = int64(float64(v) * r) + } + } + } + return p.Write(w) +} + +// printTopProto writes a list of the hottest routines in a profile as a profile.proto. +func printTopProto(w io.Writer, rpt *Report) error { + p := rpt.prof + o := rpt.options + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + out := profile.Profile{ + SampleType: []*profile.ValueType{ + {Type: "cum", Unit: o.OutputUnit}, + {Type: "flat", Unit: o.OutputUnit}, + }, + TimeNanos: p.TimeNanos, + DurationNanos: p.DurationNanos, + PeriodType: p.PeriodType, + Period: p.Period, + } + functionMap := make(functionMap) + for i, n := range g.Nodes { + f, added := functionMap.findOrAdd(n.Info) + if added { + out.Function = append(out.Function, f) + } + flat, cum := n.FlatValue(), n.CumValue() + l := &profile.Location{ + ID: uint64(i + 1), + Address: n.Info.Address, + Line: []profile.Line{ + { + Line: int64(n.Info.Lineno), + Function: f, + }, + }, + } + + fv, _ := measurement.Scale(flat, o.SampleUnit, o.OutputUnit) + cv, _ := measurement.Scale(cum, o.SampleUnit, o.OutputUnit) + s := &profile.Sample{ + Location: []*profile.Location{l}, + Value: []int64{int64(cv), int64(fv)}, + } + out.Location = append(out.Location, l) + out.Sample = append(out.Sample, s) + } + + return out.Write(w) +} + +type functionMap map[string]*profile.Function + +// findOrAdd takes a node representing a function, adds the function +// represented by the node to the map if the function is not already present, +// and returns the function the node represents. This also returns a boolean, +// which is true if the function was added and false otherwise. +func (fm functionMap) findOrAdd(ni graph.NodeInfo) (*profile.Function, bool) { + fName := fmt.Sprintf("%q%q%q%d", ni.Name, ni.OrigName, ni.File, ni.StartLine) + + if f := fm[fName]; f != nil { + return f, false + } + + f := &profile.Function{ + ID: uint64(len(fm) + 1), + Name: ni.Name, + SystemName: ni.OrigName, + Filename: ni.File, + StartLine: int64(ni.StartLine), + } + fm[fName] = f + return f, true +} + +// printAssembly prints an annotated assembly listing. +func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + return PrintAssembly(w, rpt, obj, -1) +} + +// PrintAssembly prints annotated disassembly of rpt to w. +func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) error { + o := rpt.options + prof := rpt.prof + + g := rpt.newGraph(nil) + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil { + address = &hex + } + + fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total)) + symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj) + symNodes := nodesPerSymbol(g.Nodes, symbols) + + // Sort for printing. + var syms []*objSymbol + for s := range symNodes { + syms = append(syms, s) + } + byName := func(a, b *objSymbol) bool { + if na, nb := a.sym.Name[0], b.sym.Name[0]; na != nb { + return na < nb + } + return a.sym.Start < b.sym.Start + } + if maxFuncs < 0 { + sort.Sort(orderSyms{syms, byName}) + } else { + byFlatSum := func(a, b *objSymbol) bool { + suma, _ := symNodes[a].Sum() + sumb, _ := symNodes[b].Sum() + if suma != sumb { + return suma > sumb + } + return byName(a, b) + } + sort.Sort(orderSyms{syms, byFlatSum}) + if len(syms) > maxFuncs { + syms = syms[:maxFuncs] + } + } + + if len(syms) == 0 { + // The symbol regexp case + if address == nil { + return fmt.Errorf("no matches found for regexp %s", o.Symbol) + } + + // The address case + if len(symbols) == 0 { + return fmt.Errorf("no matches found for address 0x%x", *address) + } + return fmt.Errorf("address 0x%x found in binary, but the corresponding symbols do not have samples in the profile", *address) + } + + // Correlate the symbols from the binary with the profile samples. + for _, s := range syms { + sns := symNodes[s] + + // Gather samples for this symbol. + flatSum, cumSum := sns.Sum() + + // Get the function assembly. + insts, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End, o.IntelSyntax) + if err != nil { + return err + } + + ns := annotateAssembly(insts, sns, s.file) + + fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0]) + for _, name := range s.sym.Name[1:] { + fmt.Fprintf(w, " AKA ======================== %s\n", name) + } + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + function, file, line := "", "", 0 + for _, n := range ns { + locStr := "" + // Skip loc information if it hasn't changed from previous instruction. + if n.function != function || n.file != file || n.line != line { + function, file, line = n.function, n.file, n.line + if n.function != "" { + locStr = n.function + " " + } + if n.file != "" { + locStr += n.file + if n.line != 0 { + locStr += fmt.Sprintf(":%d", n.line) + } + } + } + switch { + case locStr == "": + // No location info, just print the instruction. + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + case len(n.instruction) < 40: + // Short instruction, print loc on the same line. + fmt.Fprintf(w, "%10s %10s %10x: %-40s;%s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + locStr, + ) + default: + // Long instruction, print loc on a separate line. + fmt.Fprintf(w, "%74s;%s\n", "", locStr) + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + } + } + } + return nil +} + +// symbolsFromBinaries examines the binaries listed on the profile that have +// associated samples, and returns the identified symbols matching rx. +func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol { + // fileHasSamplesAndMatched is for optimization to speed up pprof: when later + // walking through the profile mappings, it will only examine the ones that have + // samples and are matched to the regexp. + fileHasSamplesAndMatched := make(map[string]bool) + for _, n := range g.Nodes { + if name := n.Info.PrintableName(); rx.MatchString(name) && n.Info.Objfile != "" { + fileHasSamplesAndMatched[n.Info.Objfile] = true + } + } + + // Walk all mappings looking for matching functions with samples. + var objSyms []*objSymbol + for _, m := range prof.Mapping { + // Skip the mapping if its file does not have samples or is not matched to + // the regexp (unless the regexp is an address and the mapping's range covers + // the address) + if !fileHasSamplesAndMatched[m.File] { + if address == nil || !(m.Start <= *address && *address <= m.Limit) { + continue + } + } + + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + fmt.Printf("%v\n", err) + continue + } + + // Find symbols in this binary matching the user regexp. + var addr uint64 + if address != nil { + addr = *address + } + msyms, err := f.Symbols(rx, addr) + f.Close() + if err != nil { + continue + } + for _, ms := range msyms { + objSyms = append(objSyms, + &objSymbol{ + sym: ms, + file: f, + }, + ) + } + } + + return objSyms +} + +// objSym represents a symbol identified from a binary. It includes +// the SymbolInfo from the disasm package and the base that must be +// added to correspond to sample addresses +type objSymbol struct { + sym *plugin.Sym + file plugin.ObjFile +} + +// orderSyms is a wrapper type to sort []*objSymbol by a supplied comparator. +type orderSyms struct { + v []*objSymbol + less func(a, b *objSymbol) bool +} + +func (o orderSyms) Len() int { return len(o.v) } +func (o orderSyms) Less(i, j int) bool { return o.less(o.v[i], o.v[j]) } +func (o orderSyms) Swap(i, j int) { o.v[i], o.v[j] = o.v[j], o.v[i] } + +// nodesPerSymbol classifies nodes into a group of symbols. +func nodesPerSymbol(ns graph.Nodes, symbols []*objSymbol) map[*objSymbol]graph.Nodes { + symNodes := make(map[*objSymbol]graph.Nodes) + for _, s := range symbols { + // Gather samples for this symbol. + for _, n := range ns { + if address, err := s.file.ObjAddr(n.Info.Address); err == nil && address >= s.sym.Start && address < s.sym.End { + symNodes[s] = append(symNodes[s], n) + } + } + } + return symNodes +} + +type assemblyInstruction struct { + address uint64 + instruction string + function string + file string + line int + flat, cum int64 + flatDiv, cumDiv int64 + startsBlock bool + inlineCalls []callID +} + +type callID struct { + file string + line int +} + +func (a *assemblyInstruction) flatValue() int64 { + if a.flatDiv != 0 { + return a.flat / a.flatDiv + } + return a.flat +} + +func (a *assemblyInstruction) cumValue() int64 { + if a.cumDiv != 0 { + return a.cum / a.cumDiv + } + return a.cum +} + +// annotateAssembly annotates a set of assembly instructions with a +// set of samples. It returns a set of nodes to display. base is an +// offset to adjust the sample addresses. +func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, file plugin.ObjFile) []assemblyInstruction { + // Add end marker to simplify printing loop. + insts = append(insts, plugin.Inst{ + Addr: ^uint64(0), + }) + + // Ensure samples are sorted by address. + samples.Sort(graph.AddressOrder) + + s := 0 + asm := make([]assemblyInstruction, 0, len(insts)) + for ix, in := range insts[:len(insts)-1] { + n := assemblyInstruction{ + address: in.Addr, + instruction: in.Text, + function: in.Function, + line: in.Line, + } + if in.File != "" { + n.file = filepath.Base(in.File) + } + + // Sum all the samples until the next instruction (to account + // for samples attributed to the middle of an instruction). + for next := insts[ix+1].Addr; s < len(samples); s++ { + if addr, err := file.ObjAddr(samples[s].Info.Address); err != nil || addr >= next { + break + } + sample := samples[s] + n.flatDiv += sample.FlatDiv + n.flat += sample.Flat + n.cumDiv += sample.CumDiv + n.cum += sample.Cum + if f := sample.Info.File; f != "" && n.file == "" { + n.file = filepath.Base(f) + } + if ln := sample.Info.Lineno; ln != 0 && n.line == 0 { + n.line = ln + } + if f := sample.Info.Name; f != "" && n.function == "" { + n.function = f + } + } + asm = append(asm, n) + } + + return asm +} + +// valueOrDot formats a value according to a report, intercepting zero +// values. +func valueOrDot(value int64, rpt *Report) string { + if value == 0 { + return "." + } + return rpt.formatValue(value) +} + +// printTags collects all tags referenced in the profile and prints +// them in a sorted table. +func printTags(w io.Writer, rpt *Report) error { + p := rpt.prof + + o := rpt.options + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + // Hashtable to keep accumulate tags as key,value,count. + tagMap := make(map[string]map[string]int64) + for _, s := range p.Sample { + for key, vals := range s.Label { + for _, val := range vals { + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + for key, vals := range s.NumLabel { + unit := o.NumLabelUnits[key] + for _, nval := range vals { + val := formatTag(nval, unit) + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + } + + tagKeys := make([]*graph.Tag, 0, len(tagMap)) + for key := range tagMap { + tagKeys = append(tagKeys, &graph.Tag{Name: key}) + } + tabw := tabwriter.NewWriter(w, 0, 0, 1, ' ', tabwriter.AlignRight) + for _, tagKey := range graph.SortTags(tagKeys, true) { + var total int64 + key := tagKey.Name + tags := make([]*graph.Tag, 0, len(tagMap[key])) + for t, c := range tagMap[key] { + total += c + tags = append(tags, &graph.Tag{Name: t, Flat: c}) + } + + f, u := measurement.Scale(total, o.SampleUnit, o.OutputUnit) + fmt.Fprintf(tabw, "%s:\t Total %.1f%s\n", key, f, u) + for _, t := range graph.SortTags(tags, true) { + f, u := measurement.Scale(t.FlatValue(), o.SampleUnit, o.OutputUnit) + if total > 0 { + fmt.Fprintf(tabw, " \t%.1f%s (%s):\t %s\n", f, u, measurement.Percentage(t.FlatValue(), total), t.Name) + } else { + fmt.Fprintf(tabw, " \t%.1f%s:\t %s\n", f, u, t.Name) + } + } + fmt.Fprintln(tabw) + } + return tabw.Flush() +} + +// printComments prints all freeform comments in the profile. +func printComments(w io.Writer, rpt *Report) error { + p := rpt.prof + + for _, c := range p.Comments { + fmt.Fprintln(w, c) + } + return nil +} + +// TextItem holds a single text report entry. +type TextItem struct { + Name string + InlineLabel string // Not empty if inlined + Flat, Cum int64 // Raw values + FlatFormat, CumFormat string // Formatted values +} + +// TextItems returns a list of text items from the report and a list +// of labels that describe the report. +func TextItems(rpt *Report) ([]TextItem, []string) { + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, g, origCount, droppedNodes, 0, false) + + var items []TextItem + var flatSum int64 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + var inline, noinline bool + for _, e := range n.In { + if e.Inline { + inline = true + } else { + noinline = true + } + } + + var inl string + if inline { + if noinline { + inl = "(partial-inline)" + } else { + inl = "(inline)" + } + } + + flatSum += flat + items = append(items, TextItem{ + Name: name, + InlineLabel: inl, + Flat: flat, + Cum: cum, + FlatFormat: rpt.formatValue(flat), + CumFormat: rpt.formatValue(cum), + }) + } + return items, labels +} + +// printText prints a flat text report for a profile. +func printText(w io.Writer, rpt *Report) error { + items, labels := TextItems(rpt) + fmt.Fprintln(w, strings.Join(labels, "\n")) + fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n", + "flat", "flat", "sum", "cum", "cum") + var flatSum int64 + for _, item := range items { + inl := item.InlineLabel + if inl != "" { + inl = " " + inl + } + flatSum += item.Flat + fmt.Fprintf(w, "%10s %s %s %10s %s %s%s\n", + item.FlatFormat, measurement.Percentage(item.Flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + item.CumFormat, measurement.Percentage(item.Cum, rpt.total), + item.Name, inl) + } + return nil +} + +// printTraces prints all traces from a profile. +func printTraces(w io.Writer, rpt *Report) error { + fmt.Fprintln(w, strings.Join(ProfileLabels(rpt), "\n")) + + prof := rpt.prof + o := rpt.options + + const separator = "-----------+-------------------------------------------------------" + + _, locations := graph.CreateNodes(prof, &graph.Options{}) + for _, sample := range prof.Sample { + type stk struct { + *graph.NodeInfo + inline bool + } + var stack []stk + for _, loc := range sample.Location { + nodes := locations[loc.ID] + for i, n := range nodes { + // The inline flag may be inaccurate if 'show' or 'hide' filter is + // used. See https://github.com/google/pprof/issues/511. + inline := i != len(nodes)-1 + stack = append(stack, stk{&n.Info, inline}) + } + } + + if len(stack) == 0 { + continue + } + + fmt.Fprintln(w, separator) + // Print any text labels for the sample. + var labels []string + for s, vs := range sample.Label { + labels = append(labels, fmt.Sprintf("%10s: %s\n", s, strings.Join(vs, " "))) + } + sort.Strings(labels) + fmt.Fprint(w, strings.Join(labels, "")) + + // Print any numeric labels for the sample + var numLabels []string + for key, vals := range sample.NumLabel { + unit := o.NumLabelUnits[key] + numValues := make([]string, len(vals)) + for i, vv := range vals { + numValues[i] = measurement.Label(vv, unit) + } + numLabels = append(numLabels, fmt.Sprintf("%10s: %s\n", key, strings.Join(numValues, " "))) + } + sort.Strings(numLabels) + fmt.Fprint(w, strings.Join(numLabels, "")) + + var d, v int64 + v = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + d = o.SampleMeanDivisor(sample.Value) + } + // Print call stack. + if d != 0 { + v = v / d + } + for i, s := range stack { + var vs, inline string + if i == 0 { + vs = rpt.formatValue(v) + } + if s.inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%10s %s%s\n", vs, s.PrintableName(), inline) + } + } + fmt.Fprintln(w, separator) + return nil +} + +// printCallgrind prints a graph for a profile on callgrind format. +func printCallgrind(w io.Writer, rpt *Report) error { + o := rpt.options + rpt.options.NodeFraction = 0 + rpt.options.EdgeFraction = 0 + rpt.options.NodeCount = 0 + + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + nodeNames := getDisambiguatedNames(g) + + fmt.Fprintln(w, "positions: instr line") + fmt.Fprintln(w, "events:", o.SampleType+"("+o.OutputUnit+")") + + objfiles := make(map[string]int) + files := make(map[string]int) + names := make(map[string]int) + + // prevInfo points to the previous NodeInfo. + // It is used to group cost lines together as much as possible. + var prevInfo *graph.NodeInfo + for _, n := range g.Nodes { + if prevInfo == nil || n.Info.Objfile != prevInfo.Objfile || n.Info.File != prevInfo.File || n.Info.Name != prevInfo.Name { + fmt.Fprintln(w) + fmt.Fprintln(w, "ob="+callgrindName(objfiles, n.Info.Objfile)) + fmt.Fprintln(w, "fl="+callgrindName(files, n.Info.File)) + fmt.Fprintln(w, "fn="+callgrindName(names, n.Info.Name)) + } + + addr := callgrindAddress(prevInfo, n.Info.Address) + sv, _ := measurement.Scale(n.FlatValue(), o.SampleUnit, o.OutputUnit) + fmt.Fprintf(w, "%s %d %d\n", addr, n.Info.Lineno, int64(sv)) + + // Print outgoing edges. + for _, out := range n.Out.Sort() { + c, _ := measurement.Scale(out.Weight, o.SampleUnit, o.OutputUnit) + callee := out.Dest + fmt.Fprintln(w, "cfl="+callgrindName(files, callee.Info.File)) + fmt.Fprintln(w, "cfn="+callgrindName(names, nodeNames[callee])) + // pprof doesn't have a flat weight for a call, leave as 0. + fmt.Fprintf(w, "calls=0 %s %d\n", callgrindAddress(prevInfo, callee.Info.Address), callee.Info.Lineno) + // TODO: This address may be in the middle of a call + // instruction. It would be best to find the beginning + // of the instruction, but the tools seem to handle + // this OK. + fmt.Fprintf(w, "* * %d\n", int64(c)) + } + + prevInfo = &n.Info + } + + return nil +} + +// getDisambiguatedNames returns a map from each node in the graph to +// the name to use in the callgrind output. Callgrind merges all +// functions with the same [file name, function name]. Add a [%d/n] +// suffix to disambiguate nodes with different values of +// node.Function, which we want to keep separate. In particular, this +// affects graphs created with --call_tree, where nodes from different +// contexts are associated to different Functions. +func getDisambiguatedNames(g *graph.Graph) map[*graph.Node]string { + nodeName := make(map[*graph.Node]string, len(g.Nodes)) + + type names struct { + file, function string + } + + // nameFunctionIndex maps the callgrind names (filename, function) + // to the node.Function values found for that name, and each + // node.Function value to a sequential index to be used on the + // disambiguated name. + nameFunctionIndex := make(map[names]map[*graph.Node]int) + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + p, ok := nameFunctionIndex[nm] + if !ok { + p = make(map[*graph.Node]int) + nameFunctionIndex[nm] = p + } + if _, ok := p[n.Function]; !ok { + p[n.Function] = len(p) + } + } + + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + nodeName[n] = n.Info.Name + if p := nameFunctionIndex[nm]; len(p) > 1 { + // If there is more than one function, add suffix to disambiguate. + nodeName[n] += fmt.Sprintf(" [%d/%d]", p[n.Function]+1, len(p)) + } + } + return nodeName +} + +// callgrindName implements the callgrind naming compression scheme. +// For names not previously seen returns "(N) name", where N is a +// unique index. For names previously seen returns "(N)" where N is +// the index returned the first time. +func callgrindName(names map[string]int, name string) string { + if name == "" { + return "" + } + if id, ok := names[name]; ok { + return fmt.Sprintf("(%d)", id) + } + id := len(names) + 1 + names[name] = id + return fmt.Sprintf("(%d) %s", id, name) +} + +// callgrindAddress implements the callgrind subposition compression scheme if +// possible. If prevInfo != nil, it contains the previous address. The current +// address can be given relative to the previous address, with an explicit +/- +// to indicate it is relative, or * for the same address. +func callgrindAddress(prevInfo *graph.NodeInfo, curr uint64) string { + abs := fmt.Sprintf("%#x", curr) + if prevInfo == nil { + return abs + } + + prev := prevInfo.Address + if prev == curr { + return "*" + } + + diff := int64(curr - prev) + relative := fmt.Sprintf("%+d", diff) + + // Only bother to use the relative address if it is actually shorter. + if len(relative) < len(abs) { + return relative + } + + return abs +} + +// printTree prints a tree-based report in text form. +func printTree(w io.Writer, rpt *Report) error { + const separator = "----------------------------------------------------------+-------------" + const legend = " flat flat% sum% cum cum% calls calls% + context " + + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + fmt.Fprintln(w, strings.Join(reportLabels(rpt, g, origCount, droppedNodes, 0, false), "\n")) + + fmt.Fprintln(w, separator) + fmt.Fprintln(w, legend) + var flatSum int64 + + rx := rpt.options.Symbol + matched := 0 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + // Skip any entries that do not match the regexp (for the "peek" command). + if rx != nil && !rx.MatchString(name) { + continue + } + matched++ + + fmt.Fprintln(w, separator) + // Print incoming edges. + inEdges := n.In.Sort() + for _, in := range inEdges { + var inline string + if in.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(in.Weight), + measurement.Percentage(in.Weight, cum), in.Src.Info.PrintableName(), inline) + } + + // Print current node. + flatSum += flat + fmt.Fprintf(w, "%10s %s %s %10s %s | %s\n", + rpt.formatValue(flat), + measurement.Percentage(flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + rpt.formatValue(cum), + measurement.Percentage(cum, rpt.total), + name) + + // Print outgoing edges. + outEdges := n.Out.Sort() + for _, out := range outEdges { + var inline string + if out.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(out.Weight), + measurement.Percentage(out.Weight, cum), out.Dest.Info.PrintableName(), inline) + } + } + if len(g.Nodes) > 0 { + fmt.Fprintln(w, separator) + } + if rx != nil && matched == 0 { + return fmt.Errorf("no matches found for regexp: %s", rx) + } + return nil +} + +// GetDOT returns a graph suitable for dot processing along with some +// configuration information. +func GetDOT(rpt *Report) (*graph.Graph, *graph.DotConfig) { + g, origCount, droppedNodes, droppedEdges := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, g, origCount, droppedNodes, droppedEdges, true) + + c := &graph.DotConfig{ + Title: rpt.options.Title, + Labels: labels, + FormatValue: rpt.formatValue, + Total: rpt.total, + } + return g, c +} + +// printDOT prints an annotated callgraph in DOT format. +func printDOT(w io.Writer, rpt *Report) error { + g, c := GetDOT(rpt) + graph.ComposeDot(w, g, &graph.DotAttributes{}, c) + return nil +} + +// ProfileLabels returns printable labels for a profile. +func ProfileLabels(rpt *Report) []string { + label := []string{} + prof := rpt.prof + o := rpt.options + if len(prof.Mapping) > 0 { + if prof.Mapping[0].File != "" { + label = append(label, "File: "+filepath.Base(prof.Mapping[0].File)) + } + if prof.Mapping[0].BuildID != "" { + label = append(label, "Build ID: "+prof.Mapping[0].BuildID) + } + } + // Only include comments that do not start with '#'. + for _, c := range prof.Comments { + if !strings.HasPrefix(c, "#") { + label = append(label, c) + } + } + if o.SampleType != "" { + label = append(label, "Type: "+o.SampleType) + } + if prof.TimeNanos != 0 { + const layout = "Jan 2, 2006 at 3:04pm (MST)" + label = append(label, "Time: "+time.Unix(0, prof.TimeNanos).Format(layout)) + } + if prof.DurationNanos != 0 { + duration := measurement.Label(prof.DurationNanos, "nanoseconds") + totalNanos, totalUnit := measurement.Scale(rpt.total, o.SampleUnit, "nanoseconds") + var ratio string + if totalUnit == "ns" && totalNanos != 0 { + ratio = "(" + measurement.Percentage(int64(totalNanos), prof.DurationNanos) + ")" + } + label = append(label, fmt.Sprintf("Duration: %s, Total samples = %s %s", duration, rpt.formatValue(rpt.total), ratio)) + } + return label +} + +// reportLabels returns printable labels for a report. Includes +// profileLabels. +func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedEdges int, fullHeaders bool) []string { + nodeFraction := rpt.options.NodeFraction + edgeFraction := rpt.options.EdgeFraction + nodeCount := len(g.Nodes) + + var label []string + if len(rpt.options.ProfileLabels) > 0 { + label = append(label, rpt.options.ProfileLabels...) + } else if fullHeaders || !rpt.options.CompactLabels { + label = ProfileLabels(rpt) + } + + var flatSum int64 + for _, n := range g.Nodes { + flatSum = flatSum + n.FlatValue() + } + + if len(rpt.options.ActiveFilters) > 0 { + activeFilters := legendActiveFilters(rpt.options.ActiveFilters) + label = append(label, activeFilters...) + } + + label = append(label, fmt.Sprintf("Showing nodes accounting for %s, %s of %s total", rpt.formatValue(flatSum), strings.TrimSpace(measurement.Percentage(flatSum, rpt.total)), rpt.formatValue(rpt.total))) + + if rpt.total != 0 { + if droppedNodes > 0 { + label = append(label, genLabel(droppedNodes, "node", "cum", + rpt.formatValue(abs64(int64(float64(rpt.total)*nodeFraction))))) + } + if droppedEdges > 0 { + label = append(label, genLabel(droppedEdges, "edge", "freq", + rpt.formatValue(abs64(int64(float64(rpt.total)*edgeFraction))))) + } + if nodeCount > 0 && nodeCount < origCount { + label = append(label, fmt.Sprintf("Showing top %d nodes out of %d", + nodeCount, origCount)) + } + } + + // Help new users understand the graph. + // A new line is intentionally added here to better show this message. + if fullHeaders { + label = append(label, "\nSee https://git.io/JfYMW for how to read the graph") + } + + return label +} + +func legendActiveFilters(activeFilters []string) []string { + legendActiveFilters := make([]string, len(activeFilters)+1) + legendActiveFilters[0] = "Active filters:" + for i, s := range activeFilters { + if len(s) > 80 { + s = s[:80] + "…" + } + legendActiveFilters[i+1] = " " + s + } + return legendActiveFilters +} + +func genLabel(d int, n, l, f string) string { + if d > 1 { + n = n + "s" + } + return fmt.Sprintf("Dropped %d %s (%s <= %s)", d, n, l, f) +} + +// New builds a new report indexing the sample values interpreting the +// samples with the provided function. +func New(prof *profile.Profile, o *Options) *Report { + format := func(v int64) string { + if r := o.Ratio; r > 0 && r != 1 { + fv := float64(v) * r + v = int64(fv) + } + return measurement.ScaledLabel(v, o.SampleUnit, o.OutputUnit) + } + return &Report{prof, computeTotal(prof, o.SampleValue, o.SampleMeanDivisor), + o, format} +} + +// NewDefault builds a new report indexing the last sample value +// available. +func NewDefault(prof *profile.Profile, options Options) *Report { + index := len(prof.SampleType) - 1 + o := &options + if o.Title == "" && len(prof.Mapping) > 0 && prof.Mapping[0].File != "" { + o.Title = filepath.Base(prof.Mapping[0].File) + } + o.SampleType = prof.SampleType[index].Type + o.SampleUnit = strings.ToLower(prof.SampleType[index].Unit) + o.SampleValue = func(v []int64) int64 { + return v[index] + } + return New(prof, o) +} + +// computeTotal computes the sum of the absolute value of all sample values. +// If any samples have label indicating they belong to the diff base, then the +// total will only include samples with that label. +func computeTotal(prof *profile.Profile, value, meanDiv func(v []int64) int64) int64 { + var div, total, diffDiv, diffTotal int64 + for _, sample := range prof.Sample { + var d, v int64 + v = value(sample.Value) + if meanDiv != nil { + d = meanDiv(sample.Value) + } + if v < 0 { + v = -v + } + total += v + div += d + if sample.DiffBaseSample() { + diffTotal += v + diffDiv += d + } + } + if diffTotal > 0 { + total = diffTotal + div = diffDiv + } + if div != 0 { + return total / div + } + return total +} + +// Report contains the data and associated routines to extract a +// report from a profile. +type Report struct { + prof *profile.Profile + total int64 + options *Options + formatValue func(int64) string +} + +// Total returns the total number of samples in a report. +func (rpt *Report) Total() int64 { return rpt.total } + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/shortnames.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/shortnames.go new file mode 100644 index 0000000000000000000000000000000000000000..3d9f3f48ea17f3e00539cfced3faaaf4281ad9a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/shortnames.go @@ -0,0 +1,39 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "regexp" + + "github.com/google/pprof/internal/graph" +) + +var sepRE = regexp.MustCompile(`::|\.`) + +// shortNameList returns a non-empty sequence of shortened names +// (in decreasing preference) that can be used to represent name. +func shortNameList(name string) []string { + name = graph.ShortenFunctionName(name) + seps := sepRE.FindAllStringIndex(name, -1) + result := make([]string, 0, len(seps)+1) + result = append(result, name) + for _, sep := range seps { + // Suffix starting just after sep + if sep[1] < len(name) { + result = append(result, name[sep[1]:]) + } + } + return result +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source.go new file mode 100644 index 0000000000000000000000000000000000000000..d8b4395265634525d919cf7a000e5b1d74e7908f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source.go @@ -0,0 +1,1114 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +// This file contains routines related to the generation of annotated +// source listings. + +import ( + "bufio" + "fmt" + "html/template" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// printSource prints an annotated source listing, include all +// functions with samples that match the regexp rpt.options.symbol. +// The sources are sorted by function name and then by filename to +// eliminate potential nondeterminism. +func printSource(w io.Writer, rpt *Report) error { + o := rpt.options + g := rpt.newGraph(nil) + + // Identify all the functions that match the regexp provided. + // Group nodes for each matching function. + var functions graph.Nodes + functionNodes := make(map[string]graph.Nodes) + for _, n := range g.Nodes { + if !o.Symbol.MatchString(n.Info.Name) { + continue + } + if functionNodes[n.Info.Name] == nil { + functions = append(functions, n) + } + functionNodes[n.Info.Name] = append(functionNodes[n.Info.Name], n) + } + functions.Sort(graph.NameOrder) + + if len(functionNodes) == 0 { + return fmt.Errorf("no matches found for regexp: %s", o.Symbol) + } + + sourcePath := o.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + reader := newSourceReader(sourcePath, o.TrimPath) + + fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total)) + for _, fn := range functions { + name := fn.Info.Name + + // Identify all the source files associated to this function. + // Group nodes for each source file. + var sourceFiles graph.Nodes + fileNodes := make(map[string]graph.Nodes) + for _, n := range functionNodes[name] { + if n.Info.File == "" { + continue + } + if fileNodes[n.Info.File] == nil { + sourceFiles = append(sourceFiles, n) + } + fileNodes[n.Info.File] = append(fileNodes[n.Info.File], n) + } + + if len(sourceFiles) == 0 { + fmt.Fprintf(w, "No source information for %s\n", name) + continue + } + + sourceFiles.Sort(graph.FileOrder) + + // Print each file associated with this function. + for _, fl := range sourceFiles { + filename := fl.Info.File + fns := fileNodes[filename] + flatSum, cumSum := fns.Sum() + + fnodes, _, err := getSourceFromFile(filename, reader, fns, 0, 0) + fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename) + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + if err != nil { + fmt.Fprintf(w, " Error: %v\n", err) + continue + } + + for _, fn := range fnodes { + fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), fn.Info.Lineno, fn.Info.Name) + } + } + } + return nil +} + +// printWebSource prints an annotated source listing, include all +// functions with samples that match the regexp rpt.options.symbol. +func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + printHeader(w, rpt) + if err := PrintWebList(w, rpt, obj, -1); err != nil { + return err + } + printPageClosing(w) + return nil +} + +// sourcePrinter holds state needed for generating source+asm HTML listing. +type sourcePrinter struct { + reader *sourceReader + synth *synthCode + objectTool plugin.ObjTool + objects map[string]plugin.ObjFile // Opened object files + sym *regexp.Regexp // May be nil + files map[string]*sourceFile // Set of files to print. + insts map[uint64]instructionInfo // Instructions of interest (keyed by address). + + // Set of function names that we are interested in (because they had + // a sample and match sym). + interest map[string]bool + + // Mapping from system function names to printable names. + prettyNames map[string]string +} + +// addrInfo holds information for an address we are interested in. +type addrInfo struct { + loc *profile.Location // Always non-nil + obj plugin.ObjFile // May be nil +} + +// instructionInfo holds collected information for an instruction. +type instructionInfo struct { + objAddr uint64 // Address in object file (with base subtracted out) + length int // Instruction length in bytes + disasm string // Disassembly of instruction + file string // For top-level function in which instruction occurs + line int // For top-level function in which instruction occurs + flat, cum int64 // Samples to report (divisor already applied) +} + +// sourceFile contains collected information for files we will print. +type sourceFile struct { + fname string + cum int64 + flat int64 + lines map[int][]sourceInst // Instructions to show per line + funcName map[int]string // Function name per line +} + +// sourceInst holds information for an instruction to be displayed. +type sourceInst struct { + addr uint64 + stack []callID // Inlined call-stack +} + +// sourceFunction contains information for a contiguous range of lines per function we +// will print. +type sourceFunction struct { + name string + begin, end int // Line numbers (end is not included in the range) + flat, cum int64 +} + +// addressRange is a range of addresses plus the object file that contains it. +type addressRange struct { + begin, end uint64 + obj plugin.ObjFile + mapping *profile.Mapping + score int64 // Used to order ranges for processing +} + +// PrintWebList prints annotated source listing of rpt to w. +// rpt.prof should contain inlined call info. +func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) error { + sourcePath := rpt.options.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + sp := newSourcePrinter(rpt, obj, sourcePath) + if len(sp.interest) == 0 { + return fmt.Errorf("no matches found for regexp: %s", rpt.options.Symbol) + } + sp.print(w, maxFiles, rpt) + sp.close() + return nil +} + +func newSourcePrinter(rpt *Report, obj plugin.ObjTool, sourcePath string) *sourcePrinter { + sp := &sourcePrinter{ + reader: newSourceReader(sourcePath, rpt.options.TrimPath), + synth: newSynthCode(rpt.prof.Mapping), + objectTool: obj, + objects: map[string]plugin.ObjFile{}, + sym: rpt.options.Symbol, + files: map[string]*sourceFile{}, + insts: map[uint64]instructionInfo{}, + prettyNames: map[string]string{}, + interest: map[string]bool{}, + } + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if sp.sym != nil { + if hex, err := strconv.ParseUint(sp.sym.String(), 0, 64); err == nil { + address = &hex + } + } + + addrs := map[uint64]addrInfo{} + flat := map[uint64]int64{} + cum := map[uint64]int64{} + + // Record an interest in the function corresponding to lines[index]. + markInterest := func(addr uint64, loc *profile.Location, index int) { + fn := loc.Line[index] + if fn.Function == nil { + return + } + sp.interest[fn.Function.Name] = true + sp.interest[fn.Function.SystemName] = true + if _, ok := addrs[addr]; !ok { + addrs[addr] = addrInfo{loc, sp.objectFile(loc.Mapping)} + } + } + + // See if sp.sym matches line. + matches := func(line profile.Line) bool { + if line.Function == nil { + return false + } + return sp.sym.MatchString(line.Function.Name) || + sp.sym.MatchString(line.Function.SystemName) || + sp.sym.MatchString(line.Function.Filename) + } + + // Extract sample counts and compute set of interesting functions. + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + if rpt.options.SampleMeanDivisor != nil { + div := rpt.options.SampleMeanDivisor(sample.Value) + if div != 0 { + value /= div + } + } + + // Find call-sites matching sym. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for _, line := range loc.Line { + if line.Function == nil { + continue + } + sp.prettyNames[line.Function.SystemName] = line.Function.Name + } + + addr := loc.Address + if addr == 0 { + // Some profiles are missing valid addresses. + addr = sp.synth.address(loc) + } + + cum[addr] += value + if i == 0 { + flat[addr] += value + } + + if sp.sym == nil || (address != nil && addr == *address) { + // Interested in top-level entry of stack. + if len(loc.Line) > 0 { + markInterest(addr, loc, len(loc.Line)-1) + } + continue + } + + // Search in inlined stack for a match. + matchFile := (loc.Mapping != nil && sp.sym.MatchString(loc.Mapping.File)) + for j, line := range loc.Line { + if (j == 0 && matchFile) || matches(line) { + markInterest(addr, loc, j) + } + } + } + } + + sp.expandAddresses(rpt, addrs, flat) + sp.initSamples(flat, cum) + return sp +} + +func (sp *sourcePrinter) close() { + for _, objFile := range sp.objects { + if objFile != nil { + objFile.Close() + } + } +} + +func (sp *sourcePrinter) expandAddresses(rpt *Report, addrs map[uint64]addrInfo, flat map[uint64]int64) { + // We found interesting addresses (ones with non-zero samples) above. + // Get covering address ranges and disassemble the ranges. + ranges, unprocessed := sp.splitIntoRanges(rpt.prof, addrs, flat) + sp.handleUnprocessed(addrs, unprocessed) + + // Trim ranges if there are too many. + const maxRanges = 25 + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].score > ranges[j].score + }) + if len(ranges) > maxRanges { + ranges = ranges[:maxRanges] + } + + for _, r := range ranges { + objBegin, err := r.obj.ObjAddr(r.begin) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range start %x: %v\n", r.begin, err) + continue + } + objEnd, err := r.obj.ObjAddr(r.end) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range end %x: %v\n", r.end, err) + continue + } + base := r.begin - objBegin + insts, err := sp.objectTool.Disasm(r.mapping.File, objBegin, objEnd, rpt.options.IntelSyntax) + if err != nil { + // TODO(sanjay): Report that the covered addresses are missing. + continue + } + + var lastFrames []plugin.Frame + var lastAddr, maxAddr uint64 + for i, inst := range insts { + addr := inst.Addr + base + + // Guard against duplicate output from Disasm. + if addr <= maxAddr { + continue + } + maxAddr = addr + + length := 1 + if i+1 < len(insts) && insts[i+1].Addr > inst.Addr { + // Extend to next instruction. + length = int(insts[i+1].Addr - inst.Addr) + } + + // Get inlined-call-stack for address. + frames, err := r.obj.SourceLine(addr) + if err != nil { + // Construct a frame from disassembler output. + frames = []plugin.Frame{{Func: inst.Function, File: inst.File, Line: inst.Line}} + } + + x := instructionInfo{objAddr: inst.Addr, length: length, disasm: inst.Text} + if len(frames) > 0 { + // We could consider using the outer-most caller's source + // location so we give the some hint as to where the + // inlining happened that led to this instruction. So for + // example, suppose we have the following (inlined) call + // chains for this instruction: + // F1->G->H + // F2->G->H + // We could tag the instructions from the first call with + // F1 and instructions from the second call with F2. But + // that leads to a somewhat confusing display. So for now, + // we stick with just the inner-most location (i.e., H). + // In the future we will consider changing the display to + // make caller info more visible. + index := 0 // Inner-most frame + x.file = frames[index].File + x.line = frames[index].Line + } + sp.insts[addr] = x + + // We sometimes get instructions with a zero reported line number. + // Make such instructions have the same line info as the preceding + // instruction, if an earlier instruction is found close enough. + const neighborhood = 32 + if len(frames) > 0 && frames[0].Line != 0 { + lastFrames = frames + lastAddr = addr + } else if (addr-lastAddr <= neighborhood) && lastFrames != nil { + frames = lastFrames + } + + sp.addStack(addr, frames) + } + } +} + +func (sp *sourcePrinter) addStack(addr uint64, frames []plugin.Frame) { + // See if the stack contains a function we are interested in. + for i, f := range frames { + if !sp.interest[f.Func] { + continue + } + + // Record sub-stack under frame's file/line. + fname := canonicalizeFileName(f.File) + file := sp.files[fname] + if file == nil { + file = &sourceFile{ + fname: fname, + lines: map[int][]sourceInst{}, + funcName: map[int]string{}, + } + sp.files[fname] = file + } + callees := frames[:i] + stack := make([]callID, 0, len(callees)) + for j := len(callees) - 1; j >= 0; j-- { // Reverse so caller is first + stack = append(stack, callID{ + file: callees[j].File, + line: callees[j].Line, + }) + } + file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack}) + + // Remember the first function name encountered per source line + // and assume that that line belongs to that function. + if _, ok := file.funcName[f.Line]; !ok { + file.funcName[f.Line] = f.Func + } + } +} + +// synthAsm is the special disassembler value used for instructions without an object file. +const synthAsm = "" + +// handleUnprocessed handles addresses that were skipped by splitIntoRanges because they +// did not belong to a known object file. +func (sp *sourcePrinter) handleUnprocessed(addrs map[uint64]addrInfo, unprocessed []uint64) { + // makeFrames synthesizes a []plugin.Frame list for the specified address. + // The result will typically have length 1, but may be longer if address corresponds + // to inlined calls. + makeFrames := func(addr uint64) []plugin.Frame { + loc := addrs[addr].loc + stack := make([]plugin.Frame, 0, len(loc.Line)) + for _, line := range loc.Line { + fn := line.Function + if fn == nil { + continue + } + stack = append(stack, plugin.Frame{ + Func: fn.Name, + File: fn.Filename, + Line: int(line.Line), + }) + } + return stack + } + + for _, addr := range unprocessed { + frames := makeFrames(addr) + x := instructionInfo{ + objAddr: addr, + length: 1, + disasm: synthAsm, + } + if len(frames) > 0 { + x.file = frames[0].File + x.line = frames[0].Line + } + sp.insts[addr] = x + + sp.addStack(addr, frames) + } +} + +// splitIntoRanges converts the set of addresses we are interested in into a set of address +// ranges to disassemble. It also returns the set of addresses found that did not have an +// associated object file and were therefore not added to an address range. +func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, addrMap map[uint64]addrInfo, flat map[uint64]int64) ([]addressRange, []uint64) { + // Partition addresses into two sets: ones with a known object file, and ones without. + var addrs, unprocessed []uint64 + for addr, info := range addrMap { + if info.obj != nil { + addrs = append(addrs, addr) + } else { + unprocessed = append(unprocessed, addr) + } + } + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + const expand = 500 // How much to expand range to pick up nearby addresses. + var result []addressRange + for i, n := 0, len(addrs); i < n; { + begin, end := addrs[i], addrs[i] + sum := flat[begin] + i++ + + info := addrMap[begin] + m := info.loc.Mapping + obj := info.obj // Non-nil because of the partitioning done above. + + // Find following addresses that are close enough to addrs[i]. + for i < n && addrs[i] <= end+2*expand && addrs[i] < m.Limit { + // When we expand ranges by "expand" on either side, the ranges + // for addrs[i] and addrs[i-1] will merge. + end = addrs[i] + sum += flat[end] + i++ + } + if m.Start-begin >= expand { + begin -= expand + } else { + begin = m.Start + } + if m.Limit-end >= expand { + end += expand + } else { + end = m.Limit + } + + result = append(result, addressRange{begin, end, obj, m, sum}) + } + return result, unprocessed +} + +func (sp *sourcePrinter) initSamples(flat, cum map[uint64]int64) { + for addr, inst := range sp.insts { + // Move all samples that were assigned to the middle of an instruction to the + // beginning of that instruction. This takes care of samples that were recorded + // against pc+1. + instEnd := addr + uint64(inst.length) + for p := addr; p < instEnd; p++ { + inst.flat += flat[p] + inst.cum += cum[p] + } + sp.insts[addr] = inst + } +} + +func (sp *sourcePrinter) print(w io.Writer, maxFiles int, rpt *Report) { + // Finalize per-file counts. + for _, file := range sp.files { + seen := map[uint64]bool{} + for _, line := range file.lines { + for _, x := range line { + if seen[x.addr] { + // Same address can be displayed multiple times in a file + // (e.g., if we show multiple inlined functions). + // Avoid double-counting samples in this case. + continue + } + seen[x.addr] = true + inst := sp.insts[x.addr] + file.cum += inst.cum + file.flat += inst.flat + } + } + } + + // Get sorted list of files to print. + var files []*sourceFile + for _, f := range sp.files { + files = append(files, f) + } + order := func(i, j int) bool { return files[i].flat > files[j].flat } + if maxFiles < 0 { + // Order by name for compatibility with old code. + order = func(i, j int) bool { return files[i].fname < files[j].fname } + maxFiles = len(files) + } + sort.Slice(files, order) + for i, f := range files { + if i < maxFiles { + sp.printFile(w, f, rpt) + } + } +} + +func (sp *sourcePrinter) printFile(w io.Writer, f *sourceFile, rpt *Report) { + for _, fn := range sp.functions(f) { + if fn.cum == 0 { + continue + } + printFunctionHeader(w, fn.name, f.fname, fn.flat, fn.cum, rpt) + var asm []assemblyInstruction + for l := fn.begin; l < fn.end; l++ { + lineContents, ok := sp.reader.line(f.fname, l) + if !ok { + if len(f.lines[l]) == 0 { + // Outside of range of valid lines and nothing to print. + continue + } + if l == 0 { + // Line number 0 shows up if line number is not known. + lineContents = "" + } else { + // Past end of file, but have data to print. + lineContents = "???" + } + } + + // Make list of assembly instructions. + asm = asm[:0] + var flatSum, cumSum int64 + var lastAddr uint64 + for _, inst := range f.lines[l] { + addr := inst.addr + x := sp.insts[addr] + flatSum += x.flat + cumSum += x.cum + startsBlock := (addr != lastAddr+uint64(sp.insts[lastAddr].length)) + lastAddr = addr + + // divisors already applied, so leave flatDiv,cumDiv as 0 + asm = append(asm, assemblyInstruction{ + address: x.objAddr, + instruction: x.disasm, + function: fn.name, + file: x.file, + line: x.line, + flat: x.flat, + cum: x.cum, + startsBlock: startsBlock, + inlineCalls: inst.stack, + }) + } + + printFunctionSourceLine(w, l, flatSum, cumSum, lineContents, asm, sp.reader, rpt) + } + printFunctionClosing(w) + } +} + +// functions splits apart the lines to show in a file into a list of per-function ranges. +func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction { + var funcs []sourceFunction + + // Get interesting lines in sorted order. + lines := make([]int, 0, len(f.lines)) + for l := range f.lines { + lines = append(lines, l) + } + sort.Ints(lines) + + // Merge adjacent lines that are in same function and not too far apart. + const mergeLimit = 20 + for _, l := range lines { + name := f.funcName[l] + if pretty, ok := sp.prettyNames[name]; ok { + // Use demangled name if available. + name = pretty + } + + fn := sourceFunction{name: name, begin: l, end: l + 1} + for _, x := range f.lines[l] { + inst := sp.insts[x.addr] + fn.flat += inst.flat + fn.cum += inst.cum + } + + // See if we should merge into preceding function. + if len(funcs) > 0 { + last := funcs[len(funcs)-1] + if l-last.end < mergeLimit && last.name == name { + last.end = l + 1 + last.flat += fn.flat + last.cum += fn.cum + funcs[len(funcs)-1] = last + continue + } + } + + // Add new function. + funcs = append(funcs, fn) + } + + // Expand function boundaries to show neighborhood. + const expand = 5 + for i, f := range funcs { + if i == 0 { + // Extend backwards, stopping at line number 1, but do not disturb 0 + // since that is a special line number that can show up when addr2line + // cannot determine the real line number. + if f.begin > expand { + f.begin -= expand + } else if f.begin > 1 { + f.begin = 1 + } + } else { + // Find gap from predecessor and divide between predecessor and f. + halfGap := (f.begin - funcs[i-1].end) / 2 + if halfGap > expand { + halfGap = expand + } + funcs[i-1].end += halfGap + f.begin -= halfGap + } + funcs[i] = f + } + + // Also extend the ending point of the last function. + if len(funcs) > 0 { + funcs[len(funcs)-1].end += expand + } + + return funcs +} + +// objectFile return the object for the specified mapping, opening it if necessary. +// It returns nil on error. +func (sp *sourcePrinter) objectFile(m *profile.Mapping) plugin.ObjFile { + if m == nil { + return nil + } + if object, ok := sp.objects[m.File]; ok { + return object // May be nil if we detected an error earlier. + } + object, err := sp.objectTool.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + object = nil + } + sp.objects[m.File] = object // Cache even on error. + return object +} + +// printHeader prints the page header for a weblist report. +func printHeader(w io.Writer, rpt *Report) { + fmt.Fprintln(w, ` + + + + +Pprof listing`) + fmt.Fprintln(w, weblistPageCSS) + fmt.Fprintln(w, weblistPageScript) + fmt.Fprint(w, "\n\n\n") + + var labels []string + for _, l := range ProfileLabels(rpt) { + labels = append(labels, template.HTMLEscapeString(l)) + } + + fmt.Fprintf(w, `
%s
Total: %s
`, + strings.Join(labels, "
\n"), + rpt.formatValue(rpt.total), + ) +} + +// printFunctionHeader prints a function header for a weblist report. +func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) { + fmt.Fprintf(w, `

%s

%s

+
+  Total:  %10s %10s (flat, cum) %s
+`,
+		template.HTMLEscapeString(name), template.HTMLEscapeString(path),
+		rpt.formatValue(flatSum), rpt.formatValue(cumSum),
+		measurement.Percentage(cumSum, rpt.total))
+}
+
+// printFunctionSourceLine prints a source line and the corresponding assembly.
+func printFunctionSourceLine(w io.Writer, lineNo int, flat, cum int64, lineContents string,
+	assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
+	if len(assembly) == 0 {
+		fmt.Fprintf(w,
+			" %6d   %10s %10s %8s  %s \n",
+			lineNo,
+			valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+			"", template.HTMLEscapeString(lineContents))
+		return
+	}
+
+	nestedInfo := false
+	cl := "deadsrc"
+	for _, an := range assembly {
+		if len(an.inlineCalls) > 0 || an.instruction != synthAsm {
+			nestedInfo = true
+			cl = "livesrc"
+		}
+	}
+
+	fmt.Fprintf(w,
+		" %6d   %10s %10s %8s  %s ",
+		lineNo, cl,
+		valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+		"", template.HTMLEscapeString(lineContents))
+	if nestedInfo {
+		srcIndent := indentation(lineContents)
+		printNested(w, srcIndent, assembly, reader, rpt)
+	}
+	fmt.Fprintln(w)
+}
+
+func printNested(w io.Writer, srcIndent int, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
+	fmt.Fprint(w, "")
+	var curCalls []callID
+	for i, an := range assembly {
+		if an.startsBlock && i != 0 {
+			// Insert a separator between discontiguous blocks.
+			fmt.Fprintf(w, " %8s %28s\n", "", "⋮")
+		}
+
+		var fileline string
+		if an.file != "" {
+			fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(filepath.Base(an.file)), an.line)
+		}
+		flat, cum := an.flat, an.cum
+
+		// Print inlined call context.
+		for j, c := range an.inlineCalls {
+			if j < len(curCalls) && curCalls[j] == c {
+				// Skip if same as previous instruction.
+				continue
+			}
+			curCalls = nil
+			fline, ok := reader.line(c.file, c.line)
+			if !ok {
+				fline = ""
+			}
+			text := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline)
+			fmt.Fprintf(w, " %8s %10s %10s %8s  %s %s:%d\n",
+				"", "", "", "",
+				template.HTMLEscapeString(rightPad(text, 80)),
+				template.HTMLEscapeString(filepath.Base(c.file)), c.line)
+		}
+		curCalls = an.inlineCalls
+		if an.instruction == synthAsm {
+			continue
+		}
+		text := strings.Repeat(" ", srcIndent+4+4*len(curCalls)) + an.instruction
+		fmt.Fprintf(w, " %8s %10s %10s %8x: %s %s\n",
+			"", valueOrDot(flat, rpt), valueOrDot(cum, rpt), an.address,
+			template.HTMLEscapeString(rightPad(text, 80)),
+			// fileline should not be escaped since it was formed by appending
+			// line number (just digits) to an escaped file name. Escaping here
+			// would cause double-escaping of file name.
+			fileline)
+	}
+	fmt.Fprint(w, "")
+}
+
+// printFunctionClosing prints the end of a function in a weblist report.
+func printFunctionClosing(w io.Writer) {
+	fmt.Fprintln(w, "
") +} + +// printPageClosing prints the end of the page in a weblist report. +func printPageClosing(w io.Writer) { + fmt.Fprintln(w, weblistPageClosing) +} + +// getSourceFromFile collects the sources of a function from a source +// file and annotates it with the samples in fns. Returns the sources +// as nodes, using the info.name field to hold the source code. +func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start, end int) (graph.Nodes, string, error) { + lineNodes := make(map[int]graph.Nodes) + + // Collect source coordinates from profile. + const margin = 5 // Lines before first/after last sample. + if start == 0 { + if fns[0].Info.StartLine != 0 { + start = fns[0].Info.StartLine + } else { + start = fns[0].Info.Lineno - margin + } + } else { + start -= margin + } + if end == 0 { + end = fns[0].Info.Lineno + } + end += margin + for _, n := range fns { + lineno := n.Info.Lineno + nodeStart := n.Info.StartLine + if nodeStart == 0 { + nodeStart = lineno - margin + } + nodeEnd := lineno + margin + if nodeStart < start { + start = nodeStart + } else if nodeEnd > end { + end = nodeEnd + } + lineNodes[lineno] = append(lineNodes[lineno], n) + } + if start < 1 { + start = 1 + } + + var src graph.Nodes + for lineno := start; lineno <= end; lineno++ { + line, ok := reader.line(file, lineno) + if !ok { + break + } + flat, cum := lineNodes[lineno].Sum() + src = append(src, &graph.Node{ + Info: graph.NodeInfo{ + Name: strings.TrimRight(line, "\n"), + Lineno: lineno, + }, + Flat: flat, + Cum: cum, + }) + } + if err := reader.fileError(file); err != nil { + return nil, file, err + } + return src, file, nil +} + +// sourceReader provides access to source code with caching of file contents. +type sourceReader struct { + // searchPath is a filepath.ListSeparator-separated list of directories where + // source files should be searched. + searchPath string + + // trimPath is a filepath.ListSeparator-separated list of paths to trim. + trimPath string + + // files maps from path name to a list of lines. + // files[*][0] is unused since line numbering starts at 1. + files map[string][]string + + // errors collects errors encountered per file. These errors are + // consulted before returning out of these module. + errors map[string]error +} + +func newSourceReader(searchPath, trimPath string) *sourceReader { + return &sourceReader{ + searchPath, + trimPath, + make(map[string][]string), + make(map[string]error), + } +} + +func (reader *sourceReader) fileError(path string) error { + return reader.errors[path] +} + +// line returns the line numbered "lineno" in path, or _,false if lineno is out of range. +func (reader *sourceReader) line(path string, lineno int) (string, bool) { + lines, ok := reader.files[path] + if !ok { + // Read and cache file contents. + lines = []string{""} // Skip 0th line + f, err := openSourceFile(path, reader.searchPath, reader.trimPath) + if err != nil { + reader.errors[path] = err + } else { + s := bufio.NewScanner(f) + for s.Scan() { + lines = append(lines, s.Text()) + } + f.Close() + if s.Err() != nil { + reader.errors[path] = err + } + } + reader.files[path] = lines + } + if lineno <= 0 || lineno >= len(lines) { + return "", false + } + return lines[lineno], true +} + +// openSourceFile opens a source file from a name encoded in a profile. File +// names in a profile after can be relative paths, so search them in each of +// the paths in searchPath and their parents. In case the profile contains +// absolute paths, additional paths may be configured to trim from the source +// paths in the profile. This effectively turns the path into a relative path +// searching it using searchPath as usual). +func openSourceFile(path, searchPath, trim string) (*os.File, error) { + path = trimPath(path, trim, searchPath) + // If file is still absolute, require file to exist. + if filepath.IsAbs(path) { + f, err := os.Open(path) + return f, err + } + // Scan each component of the path. + for _, dir := range filepath.SplitList(searchPath) { + // Search up for every parent of each possible path. + for { + filename := filepath.Join(dir, path) + if f, err := os.Open(filename); err == nil { + return f, nil + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + } + + return nil, fmt.Errorf("could not find file %s on path %s", path, searchPath) +} + +// trimPath cleans up a path by removing prefixes that are commonly +// found on profiles plus configured prefixes. +// TODO(aalexand): Consider optimizing out the redundant work done in this +// function if it proves to matter. +func trimPath(path, trimPath, searchPath string) string { + // Keep path variable intact as it's used below to form the return value. + sPath, searchPath := filepath.ToSlash(path), filepath.ToSlash(searchPath) + if trimPath == "" { + // If the trim path is not configured, try to guess it heuristically: + // search for basename of each search path in the original path and, if + // found, strip everything up to and including the basename. So, for + // example, given original path "/some/remote/path/my-project/foo/bar.c" + // and search path "/my/local/path/my-project" the heuristic will return + // "/my/local/path/my-project/foo/bar.c". + for _, dir := range filepath.SplitList(searchPath) { + want := "/" + filepath.Base(dir) + "/" + if found := strings.Index(sPath, want); found != -1 { + return path[found+len(want):] + } + } + } + // Trim configured trim prefixes. + trimPaths := append(filepath.SplitList(filepath.ToSlash(trimPath)), "/proc/self/cwd/./", "/proc/self/cwd/") + for _, trimPath := range trimPaths { + if !strings.HasSuffix(trimPath, "/") { + trimPath += "/" + } + if strings.HasPrefix(sPath, trimPath) { + return path[len(trimPath):] + } + } + return path +} + +func indentation(line string) int { + column := 0 + for _, c := range line { + if c == ' ' { + column++ + } else if c == '\t' { + column++ + for column%8 != 0 { + column++ + } + } else { + break + } + } + return column +} + +// rightPad pads the input with spaces on the right-hand-side to make it have +// at least width n. It treats tabs as enough spaces that lead to the next +// 8-aligned tab-stop. +func rightPad(s string, n int) string { + var str strings.Builder + + // Convert tabs to spaces as we go so padding works regardless of what prefix + // is placed before the result. + column := 0 + for _, c := range s { + column++ + if c == '\t' { + str.WriteRune(' ') + for column%8 != 0 { + column++ + str.WriteRune(' ') + } + } else { + str.WriteRune(c) + } + } + for column < n { + column++ + str.WriteRune(' ') + } + return str.String() +} + +func canonicalizeFileName(fname string) string { + fname = strings.TrimPrefix(fname, "/proc/self/cwd/") + fname = strings.TrimPrefix(fname, "./") + return filepath.Clean(fname) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go new file mode 100644 index 0000000000000000000000000000000000000000..851693f1d0e6d717357d4c567b7108d7069f8ec0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "html/template" +) + +// AddSourceTemplates adds templates used by PrintWebList to t. +func AddSourceTemplates(t *template.Template) { + template.Must(t.Parse(`{{define "weblistcss"}}` + weblistPageCSS + `{{end}}`)) + template.Must(t.Parse(`{{define "weblistjs"}}` + weblistPageScript + `{{end}}`)) +} + +const weblistPageCSS = `` + +const weblistPageScript = `` + +const weblistPageClosing = ` + +` diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go new file mode 100644 index 0000000000000000000000000000000000000000..7db51bc01c76b12fccbbb1395476c72f92f76915 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go @@ -0,0 +1,194 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "regexp" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/profile" +) + +// StackSet holds a set of stacks corresponding to a profile. +// +// Slices in StackSet and the types it contains are always non-nil, +// which makes Javascript code that uses the JSON encoding less error-prone. +type StackSet struct { + Total int64 // Total value of the profile. + Scale float64 // Multiplier to generate displayed value + Type string // Profile type. E.g., "cpu". + Unit string // One of "B", "s", "GCU", or "" (if unknown) + Stacks []Stack // List of stored stacks + Sources []StackSource // Mapping from source index to info +} + +// Stack holds a single stack instance. +type Stack struct { + Value int64 // Total value for all samples of this stack. + Sources []int // Indices in StackSet.Sources (callers before callees). +} + +// StackSource holds function/location info for a stack entry. +type StackSource struct { + FullName string + FileName string + UniqueName string // Disambiguates functions with same names + Inlined bool // If true this source was inlined into its caller + + // Alternative names to display (with decreasing lengths) to make text fit. + // Guaranteed to be non-empty. + Display []string + + // Regular expression (anchored) that matches exactly FullName. + RE string + + // Places holds the list of stack slots where this source occurs. + // In particular, if [a,b] is an element in Places, + // StackSet.Stacks[a].Sources[b] points to this source. + // + // No stack will be referenced twice in the Places slice for a given + // StackSource. In case of recursion, Places will contain the outer-most + // entry in the recursive stack. E.g., if stack S has source X at positions + // 4,6,9,10, the Places entry for X will contain [S,4]. + Places []StackSlot + + // Combined count of stacks where this source is the leaf. + Self int64 + + // Color number to use for this source. + // Colors with high numbers than supported may be treated as zero. + Color int +} + +// StackSlot identifies a particular StackSlot. +type StackSlot struct { + Stack int // Index in StackSet.Stacks + Pos int // Index in Stack.Sources +} + +// Stacks returns a StackSet for the profile in rpt. +func (rpt *Report) Stacks() StackSet { + // Get scale for converting to default unit of the right type. + scale, unit := measurement.Scale(1, rpt.options.SampleUnit, "default") + if unit == "default" { + unit = "" + } + if rpt.options.Ratio > 0 { + scale *= rpt.options.Ratio + } + s := &StackSet{ + Total: rpt.total, + Scale: scale, + Type: rpt.options.SampleType, + Unit: unit, + Stacks: []Stack{}, // Ensure non-nil + Sources: []StackSource{}, // Ensure non-nil + } + s.makeInitialStacks(rpt) + s.fillPlaces() + s.assignColors() + return *s +} + +func (s *StackSet) makeInitialStacks(rpt *Report) { + type key struct { + line profile.Line + inlined bool + } + srcs := map[key]int{} // Sources identified so far. + seenFunctions := map[string]bool{} + unknownIndex := 1 + getSrc := func(line profile.Line, inlined bool) int { + k := key{line, inlined} + if i, ok := srcs[k]; ok { + return i + } + x := StackSource{Places: []StackSlot{}} // Ensure Places is non-nil + if fn := line.Function; fn != nil { + x.FullName = fn.Name + x.FileName = fn.Filename + if !seenFunctions[fn.Name] { + x.UniqueName = fn.Name + seenFunctions[fn.Name] = true + } else { + // Assign a different name so pivoting picks this function. + x.UniqueName = fmt.Sprint(fn.Name, "#", fn.ID) + } + } else { + x.FullName = fmt.Sprintf("?%d?", unknownIndex) + x.UniqueName = x.FullName + unknownIndex++ + } + x.Inlined = inlined + x.RE = "^" + regexp.QuoteMeta(x.UniqueName) + "$" + x.Display = shortNameList(x.FullName) + s.Sources = append(s.Sources, x) + srcs[k] = len(s.Sources) - 1 + return len(s.Sources) - 1 + } + + // Synthesized root location that will be placed at the beginning of each stack. + s.Sources = []StackSource{{ + FullName: "root", + Display: []string{"root"}, + Places: []StackSlot{}, + }} + + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + stack := Stack{Value: value, Sources: []int{0}} // Start with the root + + // Note: we need to reverse the order in the produced stack. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for j := len(loc.Line) - 1; j >= 0; j-- { + line := loc.Line[j] + inlined := (j != len(loc.Line)-1) + stack.Sources = append(stack.Sources, getSrc(line, inlined)) + } + } + + leaf := stack.Sources[len(stack.Sources)-1] + s.Sources[leaf].Self += value + s.Stacks = append(s.Stacks, stack) + } +} + +func (s *StackSet) fillPlaces() { + for i, stack := range s.Stacks { + seenSrcs := map[int]bool{} + for j, src := range stack.Sources { + if seenSrcs[src] { + continue + } + seenSrcs[src] = true + s.Sources[src].Places = append(s.Sources[src].Places, StackSlot{i, j}) + } + } +} + +func (s *StackSet) assignColors() { + // Assign different color indices to different packages. + const numColors = 1048576 + for i, src := range s.Sources { + pkg := packageName(src.FullName) + h := sha256.Sum256([]byte(pkg)) + index := binary.LittleEndian.Uint32(h[:]) + s.Sources[i].Color = int(index % numColors) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go new file mode 100644 index 0000000000000000000000000000000000000000..7a35bbcda8f0263e3cf8f9bc5fbad24e39f45317 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go @@ -0,0 +1,39 @@ +package report + +import ( + "github.com/google/pprof/profile" +) + +// synthCode assigns addresses to locations without an address. +type synthCode struct { + next uint64 + addr map[*profile.Location]uint64 // Synthesized address assigned to a location +} + +func newSynthCode(mappings []*profile.Mapping) *synthCode { + // Find a larger address than any mapping. + s := &synthCode{next: 1} + for _, m := range mappings { + if s.next < m.Limit { + s.next = m.Limit + } + } + return s +} + +// address returns the synthetic address for loc, creating one if needed. +func (s *synthCode) address(loc *profile.Location) uint64 { + if loc.Address != 0 { + panic("can only synthesize addresses for locations without an address") + } + if addr, ok := s.addr[loc]; ok { + return addr + } + if s.addr == nil { + s.addr = map[*profile.Location]uint64{} + } + addr := s.next + s.next++ + s.addr[loc] = addr + return addr +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go new file mode 100644 index 0000000000000000000000000000000000000000..c3f6cc62815599027bf2b65b5a608a3a61066b87 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go @@ -0,0 +1,379 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolizer provides a routine to populate a profile with +// symbol, file and line number information. It relies on the +// addr2liner and demangle packages to do the actual work. +package symbolizer + +import ( + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/symbolz" + "github.com/google/pprof/profile" + "github.com/ianlancetaylor/demangle" +) + +// Symbolizer implements the plugin.Symbolize interface. +type Symbolizer struct { + Obj plugin.ObjTool + UI plugin.UI + Transport http.RoundTripper +} + +// test taps for dependency injection +var symbolzSymbolize = symbolz.Symbolize +var localSymbolize = doLocalSymbolize +var demangleFunction = Demangle + +// Symbolize attempts to symbolize profile p. First uses binutils on +// local binaries; if the source is a URL it attempts to get any +// missed entries using symbolz. +func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *profile.Profile) error { + remote, local, fast, force, demanglerMode := true, true, false, false, "" + for _, o := range strings.Split(strings.ToLower(mode), ":") { + switch o { + case "": + continue + case "none", "no": + return nil + case "local": + remote, local = false, true + case "fastlocal": + remote, local, fast = false, true, true + case "remote": + remote, local = true, false + case "force": + force = true + default: + switch d := strings.TrimPrefix(o, "demangle="); d { + case "full", "none", "templates": + demanglerMode = d + force = true + continue + case "default": + continue + } + s.UI.PrintErr("ignoring unrecognized symbolization option: " + mode) + s.UI.PrintErr("expecting -symbolize=[local|fastlocal|remote|none][:force][:demangle=[none|full|templates|default]") + } + } + + var err error + if local { + // Symbolize locally using binutils. + if err = localSymbolize(p, fast, force, s.Obj, s.UI); err != nil { + s.UI.PrintErr("local symbolization: " + err.Error()) + } + } + if remote { + post := func(source, post string) ([]byte, error) { + return postURL(source, post, s.Transport) + } + if err = symbolzSymbolize(p, force, sources, post, s.UI); err != nil { + return err // Ran out of options. + } + } + + demangleFunction(p, force, demanglerMode) + return nil +} + +// postURL issues a POST to a URL over HTTP. +func postURL(source, post string, tr http.RoundTripper) ([]byte, error) { + client := &http.Client{ + Transport: tr, + } + resp, err := client.Post(source, "application/octet-stream", strings.NewReader(post)) + if err != nil { + return nil, fmt.Errorf("http post %s: %v", source, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("http post %s: %v", source, statusCodeError(resp)) + } + return io.ReadAll(resp.Body) +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// doLocalSymbolize adds symbol and line number information to all locations +// in a profile. mode enables some options to control +// symbolization. +func doLocalSymbolize(prof *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + if fast { + if bu, ok := obj.(*binutils.Binutils); ok { + bu.SetFastSymbolization(true) + } + } + + mt, err := newMapping(prof, obj, ui, force) + if err != nil { + return err + } + defer mt.close() + + functions := make(map[profile.Function]*profile.Function) + for _, l := range mt.prof.Location { + m := l.Mapping + segment := mt.segments[m] + if segment == nil { + // Nothing to do. + continue + } + + stack, err := segment.SourceLine(l.Address) + if err != nil || len(stack) == 0 { + // No answers from addr2line. + continue + } + + l.Line = make([]profile.Line, len(stack)) + l.IsFolded = false + for i, frame := range stack { + if frame.Func != "" { + m.HasFunctions = true + } + if frame.File != "" { + m.HasFilenames = true + } + if frame.Line != 0 { + m.HasLineNumbers = true + } + f := &profile.Function{ + Name: frame.Func, + SystemName: frame.Func, + Filename: frame.File, + } + if fp := functions[*f]; fp != nil { + f = fp + } else { + functions[*f] = f + f.ID = uint64(len(mt.prof.Function)) + 1 + mt.prof.Function = append(mt.prof.Function, f) + } + l.Line[i] = profile.Line{ + Function: f, + Line: int64(frame.Line), + } + } + + if len(stack) > 0 { + m.HasInlineFrames = true + } + } + + return nil +} + +// Demangle updates the function names in a profile with demangled C++ +// names, simplified according to demanglerMode. If force is set, +// overwrite any names that appear already demangled. +func Demangle(prof *profile.Profile, force bool, demanglerMode string) { + if force { + // Remove the current demangled names to force demangling + for _, f := range prof.Function { + if f.Name != "" && f.SystemName != "" { + f.Name = f.SystemName + } + } + } + + options := demanglerModeToOptions(demanglerMode) + for _, fn := range prof.Function { + demangleSingleFunction(fn, options) + } +} + +func demanglerModeToOptions(demanglerMode string) []demangle.Option { + switch demanglerMode { + case "": // demangled, simplified: no parameters, no templates, no return type + return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams} + case "templates": // demangled, simplified: no parameters, no return type + return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams} + case "full": + return []demangle.Option{demangle.NoClones} + case "none": // no demangling + return []demangle.Option{} + } + + panic(fmt.Sprintf("unknown demanglerMode %s", demanglerMode)) +} + +func demangleSingleFunction(fn *profile.Function, options []demangle.Option) { + if fn.Name != "" && fn.SystemName != fn.Name { + return // Already demangled. + } + // Copy the options because they may be updated by the call. + o := make([]demangle.Option, len(options)) + copy(o, options) + if demangled := demangle.Filter(fn.SystemName, o...); demangled != fn.SystemName { + fn.Name = demangled + return + } + // Could not demangle. Apply heuristics in case the name is + // already demangled. + name := fn.SystemName + if looksLikeDemangledCPlusPlus(name) { + for _, o := range options { + switch o { + case demangle.NoParams: + name = removeMatching(name, '(', ')') + case demangle.NoTemplateParams: + name = removeMatching(name, '<', '>') + } + } + } + fn.Name = name +} + +// looksLikeDemangledCPlusPlus is a heuristic to decide if a name is +// the result of demangling C++. If so, further heuristics will be +// applied to simplify the name. +func looksLikeDemangledCPlusPlus(demangled string) bool { + // Skip java names of the form "class.". + if strings.Contains(demangled, ".<") { + return false + } + // Skip Go names of the form "foo.(*Bar[...]).Method". + if strings.Contains(demangled, "]).") { + return false + } + return strings.ContainsAny(demangled, "<>[]") || strings.Contains(demangled, "::") +} + +// removeMatching removes nested instances of start..end from name. +func removeMatching(name string, start, end byte) string { + s := string(start) + string(end) + var nesting, first, current int + for index := strings.IndexAny(name[current:], s); index != -1; index = strings.IndexAny(name[current:], s) { + switch current += index; name[current] { + case start: + nesting++ + if nesting == 1 { + first = current + } + case end: + nesting-- + switch { + case nesting < 0: + return name // Mismatch, abort + case nesting == 0: + name = name[:first] + name[current+1:] + current = first - 1 + } + } + current++ + } + return name +} + +// newMapping creates a mappingTable for a profile. +func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) { + mt := &mappingTable{ + prof: prof, + segments: make(map[*profile.Mapping]plugin.ObjFile), + } + + // Identify used mappings + mappings := make(map[*profile.Mapping]bool) + for _, l := range prof.Location { + mappings[l.Mapping] = true + } + + missingBinaries := false + for midx, m := range prof.Mapping { + if !mappings[m] { + continue + } + + // Do not attempt to re-symbolize a mapping that has already been symbolized. + if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) { + continue + } + + if m.File == "" { + if midx == 0 { + ui.PrintErr("Main binary filename not available.") + continue + } + missingBinaries = true + continue + } + + // Skip well-known system mappings + if m.Unsymbolizable() { + continue + } + + // Skip mappings pointing to a source URL + if m.BuildID == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() && strings.Contains(strings.ToLower(u.Scheme), "http") { + continue + } + } + + name := filepath.Base(m.File) + if m.BuildID != "" { + name += fmt.Sprintf(" (build ID %s)", m.BuildID) + } + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + ui.PrintErr("Local symbolization failed for ", name, ": ", err) + missingBinaries = true + continue + } + if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID { + ui.PrintErr("Local symbolization failed for ", name, ": build ID mismatch") + f.Close() + continue + } + + mt.segments[m] = f + } + if missingBinaries { + ui.PrintErr("Some binary filenames not available. Symbolization may be incomplete.\n" + + "Try setting PPROF_BINARY_PATH to the search path for local binaries.") + } + return mt, nil +} + +// mappingTable contains the mechanisms for symbolization of a +// profile. +type mappingTable struct { + prof *profile.Profile + segments map[*profile.Mapping]plugin.ObjFile +} + +// close releases any external processes being used for the mapping. +func (mt *mappingTable) close() { + for _, segment := range mt.segments { + segment.Close() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go new file mode 100644 index 0000000000000000000000000000000000000000..7be304866fdc57535c4b4d3a53ee33d4e2574675 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go @@ -0,0 +1,200 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolz symbolizes a profile using the output from the symbolz +// service. +package symbolz + +import ( + "bytes" + "fmt" + "io" + "net/url" + "path" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +var ( + symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`) +) + +// Symbolize symbolizes profile p by parsing data returned by a symbolz +// handler. syms receives the symbolz query (hex addresses separated by '+') +// and returns the symbolz output in a string. If force is false, it will only +// symbolize locations from mappings not already marked as HasFunctions. Never +// attempts symbolization of addresses from unsymbolizable system +// mappings as those may look negative - e.g. "[vsyscall]". +func Symbolize(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { + for _, m := range p.Mapping { + if !force && m.HasFunctions { + // Only check for HasFunctions as symbolz only populates function names. + continue + } + // Skip well-known system mappings. + if m.Unsymbolizable() { + continue + } + mappingSources := sources[m.File] + if m.BuildID != "" { + mappingSources = append(mappingSources, sources[m.BuildID]...) + } + for _, source := range mappingSources { + if symz := symbolz(source.Source); symz != "" { + if err := symbolizeMapping(symz, int64(source.Start)-int64(m.Start), syms, m, p); err != nil { + return err + } + m.HasFunctions = true + break + } + } + } + + return nil +} + +// hasGperftoolsSuffix checks whether path ends with one of the suffixes listed in +// pprof_remote_servers.html from the gperftools distribution +func hasGperftoolsSuffix(path string) bool { + suffixes := []string{ + "/pprof/heap", + "/pprof/growth", + "/pprof/profile", + "/pprof/pmuprofile", + "/pprof/contention", + } + for _, s := range suffixes { + if strings.HasSuffix(path, s) { + return true + } + } + return false +} + +// symbolz returns the corresponding symbolz source for a profile URL. +func symbolz(source string) string { + if url, err := url.Parse(source); err == nil && url.Host != "" { + // All paths in the net/http/pprof Go package contain /debug/pprof/ + if strings.Contains(url.Path, "/debug/pprof/") || hasGperftoolsSuffix(url.Path) { + url.Path = path.Clean(url.Path + "/../symbol") + } else { + url.Path = "/symbolz" + } + url.RawQuery = "" + return url.String() + } + + return "" +} + +// symbolizeMapping symbolizes locations belonging to a Mapping by querying +// a symbolz handler. An offset is applied to all addresses to take care of +// normalization occurred for merged Mappings. +func symbolizeMapping(source string, offset int64, syms func(string, string) ([]byte, error), m *profile.Mapping, p *profile.Profile) error { + // Construct query of addresses to symbolize. + var a []string + for _, l := range p.Location { + if l.Mapping == m && l.Address != 0 && len(l.Line) == 0 { + // Compensate for normalization. + addr, overflow := adjust(l.Address, offset) + if overflow { + return fmt.Errorf("cannot adjust address %d by %d, it would overflow (mapping %v)", l.Address, offset, l.Mapping) + } + a = append(a, fmt.Sprintf("%#x", addr)) + } + } + + if len(a) == 0 { + // No addresses to symbolize. + return nil + } + + lines := make(map[uint64]profile.Line) + functions := make(map[string]*profile.Function) + + b, err := syms(source, strings.Join(a, "+")) + if err != nil { + return err + } + + buf := bytes.NewBuffer(b) + for { + l, err := buf.ReadString('\n') + + if err != nil { + if err == io.EOF { + break + } + return err + } + + if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 { + origAddr, err := strconv.ParseUint(symbol[1], 0, 64) + if err != nil { + return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err) + } + // Reapply offset expected by the profile. + addr, overflow := adjust(origAddr, -offset) + if overflow { + return fmt.Errorf("cannot adjust symbolz address %d by %d, it would overflow", origAddr, -offset) + } + + name := symbol[2] + fn := functions[name] + if fn == nil { + fn = &profile.Function{ + ID: uint64(len(p.Function) + 1), + Name: name, + SystemName: name, + } + functions[name] = fn + p.Function = append(p.Function, fn) + } + + lines[addr] = profile.Line{Function: fn} + } + } + + for _, l := range p.Location { + if l.Mapping != m { + continue + } + if line, ok := lines[l.Address]; ok { + l.Line = []profile.Line{line} + } + } + + return nil +} + +// adjust shifts the specified address by the signed offset. It returns the +// adjusted address. It signals that the address cannot be adjusted without an +// overflow by returning true in the second return value. +func adjust(addr uint64, offset int64) (uint64, bool) { + adj := uint64(int64(addr) + offset) + if offset < 0 { + if adj >= addr { + return 0, true + } + } else { + if adj < addr { + return 0, true + } + } + return adj, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go new file mode 100644 index 0000000000000000000000000000000000000000..6c3bd0dfd0111f3aa66345a394f62e2c48c64f6d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go @@ -0,0 +1,131 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides a mechanism to send requests with https cert, +// key, and CA. +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +type transport struct { + cert *string + key *string + ca *string + caCertPool *x509.CertPool + certs []tls.Certificate + initOnce sync.Once + initErr error +} + +const extraUsage = ` -tls_cert TLS client certificate file for fetching profile and symbols + -tls_key TLS private key file for fetching profile and symbols + -tls_ca TLS CA certs file for fetching profile and symbols` + +// New returns a round tripper for making requests with the +// specified cert, key, and ca. The flags tls_cert, tls_key, and tls_ca are +// added to the flagset to allow a user to specify the cert, key, and ca. If +// the flagset is nil, no flags will be added, and users will not be able to +// use these flags. +func New(flagset plugin.FlagSet) http.RoundTripper { + if flagset == nil { + return &transport{} + } + flagset.AddExtraUsage(extraUsage) + return &transport{ + cert: flagset.String("tls_cert", "", "TLS client certificate file for fetching profile and symbols"), + key: flagset.String("tls_key", "", "TLS private key file for fetching profile and symbols"), + ca: flagset.String("tls_ca", "", "TLS CA certs file for fetching profile and symbols"), + } +} + +// initialize uses the cert, key, and ca to initialize the certs +// to use these when making requests. +func (tr *transport) initialize() error { + var cert, key, ca string + if tr.cert != nil { + cert = *tr.cert + } + if tr.key != nil { + key = *tr.key + } + if tr.ca != nil { + ca = *tr.ca + } + + if cert != "" && key != "" { + tlsCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return fmt.Errorf("could not load certificate/key pair specified by -tls_cert and -tls_key: %v", err) + } + tr.certs = []tls.Certificate{tlsCert} + } else if cert == "" && key != "" { + return fmt.Errorf("-tls_key is specified, so -tls_cert must also be specified") + } else if cert != "" && key == "" { + return fmt.Errorf("-tls_cert is specified, so -tls_key must also be specified") + } + + if ca != "" { + caCertPool := x509.NewCertPool() + caCert, err := os.ReadFile(ca) + if err != nil { + return fmt.Errorf("could not load CA specified by -tls_ca: %v", err) + } + caCertPool.AppendCertsFromPEM(caCert) + tr.caCertPool = caCertPool + } + + return nil +} + +// RoundTrip executes a single HTTP transaction, returning +// a Response for the provided Request. +func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) { + tr.initOnce.Do(func() { + tr.initErr = tr.initialize() + }) + if tr.initErr != nil { + return nil, tr.initErr + } + + tlsConfig := &tls.Config{ + RootCAs: tr.caCertPool, + Certificates: tr.certs, + } + + if req.URL.Scheme == "https+insecure" { + // Make shallow copy of request, and req.URL, so the request's URL can be + // modified. + r := *req + *r.URL = *req.URL + req = &r + tlsConfig.InsecureSkipVerify = true + req.URL.Scheme = "https" + } + + transport := http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + } + + return transport.RoundTrip(req) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/encode.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 0000000000000000000000000000000000000000..182c926b9089842a6f1c4e2cc84d8b371e3ebde3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,588 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/filter.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 0000000000000000000000000000000000000000..c794b939067cfd119e0427a5ea7af5ff5ba2cdb7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/index.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 0000000000000000000000000000000000000000..bef1d60467c0b38f49dfab4ebf057435bd32ebdb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 0000000000000000000000000000000000000000..91f45e53c6c269d30a9426ca7dc66c3f57060d95 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 0000000000000000000000000000000000000000..8d07fd6c27c2af3052c3fd36562307560a058623 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 0000000000000000000000000000000000000000..4b66282cb8e03f28c531c707ec33857bb7220906 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,667 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 0000000000000000000000000000000000000000..60ef7e92687f56e20c9b0541cf555f0c3caadc15 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,856 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/proto.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 0000000000000000000000000000000000000000..a15696ba16f24448c3e81163c6d41b277eec104e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/prune.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 0000000000000000000000000000000000000000..b2f9fd54660d9a55758dc2cd8d05fb27a36fcbc4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8dada3edaf50dbc082c9a125058f25def75e625a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b0145150fd35f3e325d7ddec6ed3be68c457975e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE @@ -0,0 +1,13 @@ +Copyright 2010-2021 Mike Bostock + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md new file mode 100644 index 0000000000000000000000000000000000000000..eb84b6800784ed3a9420f0aafdfe65924aff8422 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md @@ -0,0 +1,33 @@ +# Building a customized D3.js bundle + +The D3.js version distributed with pprof is customized to only include the +modules required by pprof. + +## Dependencies + +- Install [npm](https://www.npmjs.com). + +## Building + +- Run `update.sh` to: + - Download npm package dependencies (declared in `package.json` and `package-lock.json`) + - Create a d3.js bundle containing the JavScript of d3 and d3-flame-graph (by running `webpack`) + +This will `d3_flame_graph.go`, the minified custom D3.js bundle as Go source code. + +# References / Appendix + +## D3 Custom Bundle + +A demonstration of building a custom D3 4.0 bundle using ES2015 modules and Rollup. + +[bl.ocks.org/mbostock/bb09af4c39c79cffcde4](https://bl.ocks.org/mbostock/bb09af4c39c79cffcde4) + +## Old version of d3-pprof + +A previous version of d3-flame-graph bundled for pprof used Rollup instead of +Webpack. This has now been migrated directly into this directory. + +The repository configuring Rollup was here: + +[github.com/spiermar/d3-pprof](https://github.com/spiermar/d3-pprof) diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go new file mode 100644 index 0000000000000000000000000000000000000000..7e2794199510033a422162e534023e8e2e3f337e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go @@ -0,0 +1,65 @@ +// D3.js is a JavaScript library for manipulating documents based on data. +// https://github.com/d3/d3 +// See D3_LICENSE file for license details + +// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. +// https://github.com/spiermar/d3-flame-graph +// See D3_FLAME_GRAPH_LICENSE file for license details + +package d3flamegraph + +// JSSource returns the d3 and d3-flame-graph JavaScript bundle +const JSSource = ` + +!function(t,n){if("object"==typeof exports&&"object"==typeof module)module.exports=n();else if("function"==typeof define&&define.amd)define([],n);else{var e=n();for(var r in e)("object"==typeof exports?exports:t)[r]=e[r]}}(self,(function(){return(()=>{"use strict";var t={d:(n,e)=>{for(var r in e)t.o(e,r)&&!t.o(n,r)&&Object.defineProperty(n,r,{enumerable:!0,get:e[r]})},o:(t,n)=>Object.prototype.hasOwnProperty.call(t,n),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},n={};function e(){}function r(t){return null==t?e:function(){return this.querySelector(t)}}function i(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function o(){return[]}function u(t){return function(n){return n.matches(t)}}t.r(n),t.d(n,{flamegraph:()=>ji,select:()=>pt});var a=Array.prototype.find;function l(){return this.firstElementChild}var s=Array.prototype.filter;function c(){return Array.from(this.children)}function f(t){return new Array(t.length)}function h(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}function p(t){return function(){return t}}function d(t,n,e,r,i,o){for(var u,a=0,l=n.length,s=o.length;an?1:t>=n?0:NaN}h.prototype={constructor:h,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var _="http://www.w3.org/1999/xhtml";const w={svg:"http://www.w3.org/2000/svg",xhtml:_,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function b(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),w.hasOwnProperty(n)?{space:w[n],local:t}:t}function x(t){return function(){this.removeAttribute(t)}}function M(t){return function(){this.removeAttributeNS(t.space,t.local)}}function A(t,n){return function(){this.setAttribute(t,n)}}function N(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function E(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function k(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function S(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function C(t){return function(){this.style.removeProperty(t)}}function P(t,n,e){return function(){this.style.setProperty(t,n,e)}}function j(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function q(t,n){return t.style.getPropertyValue(n)||S(t).getComputedStyle(t,null).getPropertyValue(n)}function O(t){return function(){delete this[t]}}function L(t,n){return function(){this[t]=n}}function T(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function B(t){return t.trim().split(/^|\s+/)}function D(t){return t.classList||new H(t)}function H(t){this._node=t,this._names=B(t.getAttribute("class")||"")}function R(t,n){for(var e=D(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function ut(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var ft=[null];function ht(t,n){this._groups=t,this._parents=n}function pt(t){return"string"==typeof t?new ht([[document.querySelector(t)]],[document.documentElement]):new ht([[t]],ft)}function dt(){}function gt(t){return null==t?dt:function(){return this.querySelector(t)}}function vt(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function yt(){return[]}function mt(t){return null==t?yt:function(){return this.querySelectorAll(t)}}function _t(t){return function(){return this.matches(t)}}function wt(t){return function(n){return n.matches(t)}}ht.prototype=function(){return new ht([[document.documentElement]],ft)}.prototype={constructor:ht,select:function(t){"function"!=typeof t&&(t=r(t));for(var n=this._groups,e=n.length,i=new Array(e),o=0;o=E&&(E=N+1);!(A=b[E])&&++E<_;);M._next=A||null}}return(u=new ht(u,r))._enter=a,u._exit=l,u},enter:function(){return new ht(this._enter||this._groups.map(f),this._parents)},exit:function(){return new ht(this._exit||this._groups.map(f),this._parents)},join:function(t,n,e){var r=this.enter(),i=this,o=this.exit();return"function"==typeof t?(r=t(r))&&(r=r.selection()):r=r.append(t+""),null!=n&&(i=n(i))&&(i=i.selection()),null==e?o.remove():e(o),r&&i?r.merge(i).order():i},merge:function(t){for(var n=t.selection?t.selection():t,e=this._groups,r=n._groups,i=e.length,o=r.length,u=Math.min(i,o),a=new Array(i),l=0;l=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=m);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?C:"function"==typeof n?j:P)(t,n,null==e?"":e)):q(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?O:"function"==typeof n?T:L)(t,n)):this.node()[t]},classed:function(t,n){var e=B(t+"");if(arguments.length<2){for(var r=D(this.node()),i=-1,o=e.length;++in?1:t>=n?0:NaN}Et.prototype={constructor:Et,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var Ot="http://www.w3.org/1999/xhtml";const Lt={svg:"http://www.w3.org/2000/svg",xhtml:Ot,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Tt(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Lt.hasOwnProperty(n)?{space:Lt[n],local:t}:t}function Bt(t){return function(){this.removeAttribute(t)}}function Dt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ht(t,n){return function(){this.setAttribute(t,n)}}function Rt(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function Vt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function Xt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function zt(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function It(t){return function(){this.style.removeProperty(t)}}function $t(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Ut(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function Yt(t,n){return t.style.getPropertyValue(n)||zt(t).getComputedStyle(t,null).getPropertyValue(n)}function Ft(t){return function(){delete this[t]}}function Zt(t,n){return function(){this[t]=n}}function Gt(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function Jt(t){return t.trim().split(/^|\s+/)}function Kt(t){return t.classList||new Qt(t)}function Qt(t){this._node=t,this._names=Jt(t.getAttribute("class")||"")}function Wt(t,n){for(var e=Kt(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function bn(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var En=[null];function kn(t,n){this._groups=t,this._parents=n}function Sn(){return new kn([[document.documentElement]],En)}kn.prototype=Sn.prototype={constructor:kn,select:function(t){"function"!=typeof t&&(t=gt(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=b&&(b=w+1);!(_=v[b])&&++b=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=qt);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?It:"function"==typeof n?Ut:$t)(t,n,null==e?"":e)):Yt(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?Ft:"function"==typeof n?Gt:Zt)(t,n)):this.node()[t]},classed:function(t,n){var e=Jt(t+"");if(arguments.length<2){for(var r=Kt(this.node()),i=-1,o=e.length;++i1?r[0]+r.slice(2):r,+t.slice(e+1)]}function qn(t){return(t=jn(Math.abs(t)))?t[1]:NaN}var On,Ln=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Tn(t){if(!(n=Ln.exec(t)))throw new Error("invalid format: "+t);var n;return new Bn({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function Bn(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function Dn(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Tn.prototype=Bn.prototype,Bn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};const Hn={"%":(t,n)=>(100*t).toFixed(n),b:t=>Math.round(t).toString(2),c:t=>t+"",d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:(t,n)=>t.toExponential(n),f:(t,n)=>t.toFixed(n),g:(t,n)=>t.toPrecision(n),o:t=>Math.round(t).toString(8),p:(t,n)=>Dn(100*t,n),r:Dn,s:function(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(On=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+jn(t,Math.max(0,n+o-1))[0]},X:t=>Math.round(t).toString(16).toUpperCase(),x:t=>Math.round(t).toString(16)};function Rn(t){return t}var Vn,Xn,zn,In=Array.prototype.map,$n=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function Un(t,n){return null==t||null==n?NaN:tn?1:t>=n?0:NaN}function Yn(t){t.x0=Math.round(t.x0),t.y0=Math.round(t.y0),t.x1=Math.round(t.x1),t.y1=Math.round(t.y1)}function Fn(t){var n=0,e=t.children,r=e&&e.length;if(r)for(;--r>=0;)n+=e[r].value;else n=1;t.value=n}function Zn(t,n){t instanceof Map?(t=[void 0,t],void 0===n&&(n=Jn)):void 0===n&&(n=Gn);for(var e,r,i,o,u,a=new Wn(t),l=[a];e=l.pop();)if((i=n(e.data))&&(u=(i=Array.from(i)).length))for(e.children=i,o=u-1;o>=0;--o)l.push(r=i[o]=new Wn(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(Qn)}function Gn(t){return t.children}function Jn(t){return Array.isArray(t)?t[1]:null}function Kn(t){void 0!==t.data.value&&(t.value=t.data.value),t.data=t.data.data}function Qn(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Wn(t){this.data=t,this.depth=this.height=0,this.parent=null}Vn=function(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?Rn:(n=In.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],u=0,a=n[0],l=0;i>0&&a>0&&(l+a+1>r&&(a=Math.max(1,r-l)),o.push(t.substring(i-=a,i+a)),!((l+=a+1)>r));)a=n[u=(u+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",u=void 0===t.decimal?".":t.decimal+"",a=void 0===t.numerals?Rn:function(t){return function(n){return n.replace(/[0-9]/g,(function(n){return t[+n]}))}}(In.call(t.numerals,String)),l=void 0===t.percent?"%":t.percent+"",s=void 0===t.minus?"−":t.minus+"",c=void 0===t.nan?"NaN":t.nan+"";function f(t){var n=(t=Tn(t)).fill,e=t.align,f=t.sign,h=t.symbol,p=t.zero,d=t.width,g=t.comma,v=t.precision,y=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Hn[m]||(void 0===v&&(v=12),y=!0,m="g"),(p||"0"===n&&"="===e)&&(p=!0,n="0",e="=");var _="$"===h?i:"#"===h&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",w="$"===h?o:/[%p]/.test(m)?l:"",b=Hn[m],x=/[defgprs%]/.test(m);function M(t){var i,o,l,h=_,M=w;if("c"===m)M=b(t)+M,t="";else{var A=(t=+t)<0||1/t<0;if(t=isNaN(t)?c:b(Math.abs(t),v),y&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),A&&0==+t&&"+"!==f&&(A=!1),h=(A?"("===f?f:s:"-"===f||"("===f?"":f)+h,M=("s"===m?$n[8+On/3]:"")+M+(A&&"("===f?")":""),x)for(i=-1,o=t.length;++i(l=t.charCodeAt(i))||l>57){M=(46===l?u+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}g&&!p&&(t=r(t,1/0));var N=h.length+t.length+M.length,E=N>1)+h+t+M+E.slice(N);break;default:t=E+h+t+M}return a(t)}return v=void 0===v?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,v)):Math.max(0,Math.min(20,v)),M.toString=function(){return t+""},M}return{format:f,formatPrefix:function(t,n){var e=f(((t=Tn(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(qn(n)/3))),i=Math.pow(10,-r),o=$n[8+r/3];return function(t){return e(i*t)+o}}}}({thousands:",",grouping:[3],currency:["$",""]}),Xn=Vn.format,zn=Vn.formatPrefix,Wn.prototype=Zn.prototype={constructor:Wn,count:function(){return this.eachAfter(Fn)},each:function(t,n){let e=-1;for(const r of this)t.call(n,r,++e,this);return this},eachAfter:function(t,n){for(var e,r,i,o=this,u=[o],a=[],l=-1;o=u.pop();)if(a.push(o),e=o.children)for(r=0,i=e.length;r=0;--r)o.push(e[r]);return this},find:function(t,n){let e=-1;for(const r of this)if(t.call(n,r,++e,this))return r},sum:function(t){return this.eachAfter((function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e}))},sort:function(t){return this.eachBefore((function(n){n.children&&n.children.sort(t)}))},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){return Array.from(this)},leaves:function(){var t=[];return this.eachBefore((function(n){n.children||t.push(n)})),t},links:function(){var t=this,n=[];return t.each((function(e){e!==t&&n.push({source:e.parent,target:e})})),n},copy:function(){return Zn(this).eachBefore(Kn)},[Symbol.iterator]:function*(){var t,n,e,r,i=this,o=[i];do{for(t=o.reverse(),o=[];i=t.pop();)if(yield i,n=i.children)for(e=0,r=n.length;e=0?(o>=te?10:o>=ne?5:o>=ee?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=te?10:o>=ne?5:o>=ee?2:1)}function ie(t){let n=t,e=t,r=t;function i(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<0?i=e+1:o=e}while(it(n)-e,e=Un,r=(n,e)=>Un(t(n),e)),{left:i,center:function(t,e,r=0,o=t.length){const u=i(t,e,r,o-1);return u>r&&n(t[u-1],e)>-n(t[u],e)?u-1:u},right:function(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<=0?i=e+1:o=e}while(i>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?Se(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?Se(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=ye.exec(t))?new je(n[1],n[2],n[3],1):(n=me.exec(t))?new je(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=_e.exec(t))?Se(n[1],n[2],n[3],n[4]):(n=we.exec(t))?Se(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=be.exec(t))?Te(n[1],n[2]/100,n[3]/100,1):(n=xe.exec(t))?Te(n[1],n[2]/100,n[3]/100,n[4]):Me.hasOwnProperty(t)?ke(Me[t]):"transparent"===t?new je(NaN,NaN,NaN,0):null}function ke(t){return new je(t>>16&255,t>>8&255,255&t,1)}function Se(t,n,e,r){return r<=0&&(t=n=e=NaN),new je(t,n,e,r)}function Ce(t){return t instanceof ce||(t=Ee(t)),t?new je((t=t.rgb()).r,t.g,t.b,t.opacity):new je}function Pe(t,n,e,r){return 1===arguments.length?Ce(t):new je(t,n,e,null==r?1:r)}function je(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function qe(){return"#"+Le(this.r)+Le(this.g)+Le(this.b)}function Oe(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Le(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Te(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new De(t,n,e,r)}function Be(t){if(t instanceof De)return new De(t.h,t.s,t.l,t.opacity);if(t instanceof ce||(t=Ee(t)),!t)return new De;if(t instanceof De)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,l=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&l<1?0:u,new De(u,a,l,t.opacity)}function De(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function He(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function Re(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Ve(t){return function(){return t}}function Xe(t,n){var e=n-t;return e?function(t,n){return function(e){return t+e*n}}(t,e):Ve(isNaN(t)?n:t)}le(ce,Ee,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Ae,formatHex:Ae,formatHsl:function(){return Be(this).formatHsl()},formatRgb:Ne,toString:Ne}),le(je,Pe,se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:qe,formatHex:qe,formatRgb:Oe,toString:Oe})),le(De,(function(t,n,e,r){return 1===arguments.length?Be(t):new De(t,n,e,null==r?1:r)}),se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new De(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new De(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new je(He(t>=240?t-240:t+120,i,r),He(t,i,r),He(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const ze=function t(n){var e=function(t){return 1==(t=+t)?Xe:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):Ve(isNaN(n)?e:n)}}(n);function r(t,n){var r=e((t=Pe(t)).r,(n=Pe(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),u=Xe(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=u(n),t+""}}return r.gamma=t,r}(1);function Ie(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=ro&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,l.push({i:u,x:Ye(e,r)})),o=Ge.lastIndex;return on&&(e=t,t=n,n=e),s=function(e){return Math.max(t,Math.min(n,e))}),r=l>2?or:ir,i=o=null,f}function f(n){return null==n||isNaN(n=+n)?e:(i||(i=r(u.map(t),a,l)))(t(s(n)))}return f.invert=function(e){return s(n((o||(o=r(a,u.map(t),Ye)))(e)))},f.domain=function(t){return arguments.length?(u=Array.from(t,tr),c()):u.slice()},f.range=function(t){return arguments.length?(a=Array.from(t),c()):a.slice()},f.rangeRound=function(t){return a=Array.from(t),l=We,c()},f.clamp=function(t){return arguments.length?(s=!!t||er,c()):s!==er},f.interpolate=function(t){return arguments.length?(l=t,c()):l},f.unknown=function(t){return arguments.length?(e=t,f):e},function(e,r){return t=e,n=r,c()}}()(er,er)}function lr(t,n){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(n).domain(t)}return this}function sr(t){var n=t.domain;return t.ticks=function(t){var e=n();return function(t,n,e){var r,i,o,u,a=-1;if(e=+e,(t=+t)==(n=+n)&&e>0)return[t];if((r=n0){let e=Math.round(t/u),r=Math.round(n/u);for(e*un&&--r,o=new Array(i=r-e+1);++an&&--r,o=new Array(i=r-e+1);++a=te?i*=10:o>=ne?i*=5:o>=ee&&(i*=2),n0;){if((i=re(l,s,e))===r)return o[u]=l,o[a]=s,n(o);if(i>0)l=Math.floor(l/i)*i,s=Math.ceil(s/i)*i;else{if(!(i<0))break;l=Math.ceil(l*i)/i,s=Math.floor(s*i)/i}r=i}return t},t}function cr(){var t=ar();return t.copy=function(){return ur(t,cr())},lr.apply(t,arguments),sr(t)}function fr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}var hr={value:()=>{}};function pr(){for(var t,n=0,e=arguments.length,r={};n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}}))}function vr(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&n._call.call(null,t),n=n._next;--br}()}finally{br=0,function(){for(var t,n,e=_r,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:_r=n);wr=t,Tr(r)}(),Nr=0}}function Lr(){var t=kr.now(),n=t-Ar;n>1e3&&(Er-=n,Ar=t)}function Tr(t){br||(xr&&(xr=clearTimeout(xr)),t-Nr>24?(t<1/0&&(xr=setTimeout(Or,t-kr.now()-Er)),Mr&&(Mr=clearInterval(Mr))):(Mr||(Ar=kr.now(),Mr=setInterval(Lr,1e3)),br=1,Sr(Or)))}function Br(t,n,e){var r=new jr;return n=null==n?0:+n,r.restart((function(e){r.stop(),t(e+n)}),n,e),r}jr.prototype=qr.prototype={constructor:jr,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?Cr():+e)+(null==n?0:+n),this._next||wr===this||(wr?wr._next=this:_r=this,wr=this),this._call=t,this._time=e,Tr()},stop:function(){this._call&&(this._call=null,this._time=1/0,Tr())}};var Dr=mr("start","end","cancel","interrupt"),Hr=[];function Rr(t,n,e,r,i,o){var u=t.__transition;if(u){if(e in u)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(l){var s,c,f,h;if(1!==e.state)return a();for(s in i)if((h=i[s]).name===e.name){if(3===h.state)return Br(o);4===h.state?(h.state=6,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[s]):+s0)throw new Error("too late; already scheduled");return e}function Xr(t,n){var e=zr(t,n);if(e.state>3)throw new Error("too late; already running");return e}function zr(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}var Ir,$r,Ur,Yr,Fr=180/Math.PI,Zr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function Gr(t,n,e,r,i,o){var u,a,l;return(u=Math.sqrt(t*t+n*n))&&(t/=u,n/=u),(l=t*e+n*r)&&(e-=t*l,r-=n*l),(a=Math.sqrt(e*e+r*r))&&(e/=a,r/=a,l/=a),t*r180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:Ye(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,u.rotate,a,l),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:Ye(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,u.skewX,a,l),function(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:Ye(t,e)},{i:a-2,x:Ye(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,u.scaleX,u.scaleY,a,l),o=u=null,function(t){for(var n,e=-1,r=l.length;++e=0&&(t=t.slice(0,n)),!t||"start"===t}))}(n)?Vr:Xr;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}var _i=Cn.prototype.constructor;function wi(t){return function(){this.style.removeProperty(t)}}function bi(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}function xi(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&bi(t,o,e)),r}return o._value=n,o}function Mi(t){return function(n){this.textContent=t.call(this,n)}}function Ai(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&Mi(r)),n}return r._value=t,r}var Ni=0;function Ei(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function ki(){return++Ni}var Si=Cn.prototype;Ei.prototype=function(t){return Cn().transition(t)}.prototype={constructor:Ei,select:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=gt(t));for(var r=this._groups,i=r.length,o=new Array(i),u=0;u{p&&(p.textContent="search: "+n+" of "+e+" total samples ( "+Xn(".3f")(n/e*100,3)+"%)")},d()};const k=E;let S=(t,n,e=!1)=>{if(!n)return!1;let r=b(t);e&&(n=n.toLowerCase(),r=r.toLowerCase());const i=new RegExp(n);return void 0!==r&&r&&r.match(i)};const C=S;let P=function(t){p&&(t?p.textContent=t:"function"==typeof d?d():p.textContent="")};const j=P;let q=function(t){return b(t)+" ("+Xn(".3f")(100*(t.x1-t.x0),3)+"%, "+x(t)+" samples)"},O=function(t){return t.highlight?"#E600E6":function(t,n){let e=w||"warm";w||void 0===n||""===n||(e="red",void 0!==t&&t&&t.match(/::/)&&(e="yellow"),"kernel"===n?e="orange":"jit"===n?e="green":"inlined"===n&&(e="aqua"));const r=function(t){let n=0;if(t){const e=t.split("` + "`" + `");e.length>1&&(t=e[e.length-1]),n=function(t){let n=0,e=0,r=1;if(t){for(let i=0;i6);i++)n+=r*(t.charCodeAt(i)%10),e+=9*r,r*=.7;e>0&&(n/=e)}return n}(t=t.split("(")[0])}return n}(t);return function(t,n){let e,r,i;return"red"===t?(e=200+Math.round(55*n),r=50+Math.round(80*n),i=r):"orange"===t?(e=190+Math.round(65*n),r=90+Math.round(65*n),i=0):"yellow"===t?(e=175+Math.round(55*n),r=e,i=50+Math.round(20*n)):"green"===t?(e=50+Math.round(60*n),r=200+Math.round(55*n),i=e):"pastelgreen"===t?(e=163+Math.round(75*n),r=195+Math.round(49*n),i=72+Math.round(149*n)):"blue"===t?(e=91+Math.round(126*n),r=156+Math.round(76*n),i=221+Math.round(26*n)):"aqua"===t?(e=50+Math.round(60*n),r=165+Math.round(55*n),i=r):"cold"===t?(e=0+Math.round(55*(1-n)),r=0+Math.round(230*(1-n)),i=200+Math.round(55*n)):(e=200+Math.round(55*n),r=0+Math.round(230*(1-n)),i=0+Math.round(55*(1-n))),"rgb("+e+","+r+","+i+")"}(e,r)}(b(t),A(t))};const L=O;function T(t){t.data.fade=!1,t.data.hide=!1,t.children&&t.children.forEach(T)}function B(t){t.parent&&(t.parent.data.fade=!0,B(t.parent))}function D(t){if(i&&i.hide(),function(t){let n,e,r,i=t,o=i.parent;for(;o;){for(n=o.children,e=n.length;e--;)r=n[e],r!==i&&(r.data.hide=!0);i=o,o=i.parent}}(t),T(t),B(t),z(),y){const n=Pn(this).select("svg")._groups[0][0].parentNode.offsetTop,r=(window.innerHeight-n)/e,i=(t.height-r+10)*e;window.scrollTo({top:n+i,left:0,behavior:"smooth"})}"function"==typeof c&&c(t)}function H(t,n){if(t.id===n)return t;{const e=M(t);if(e)for(let t=0;t0){const r=t/(n.x1-n.x0);e=e.filter((function(t){return(t.x1-t.x0)*r>h}))}return e}(r),y=Pn(this).select("svg");y.attr("width",t);let _=y.selectAll("g").data(g,(function(t){return t.id}));if(!n||v){const t=Math.max.apply(null,g.map((function(t){return t.depth})));n=(t+3)*e,n{D(n)})),_.exit().remove(),_.on("mouseover",(function(t,n){i&&i.show(n,this),P(q(n)),"function"==typeof f&&f(n)})).on("mouseout",(function(){i&&i.hide(),P(null)}))}))}function I(t,n){n.forEach((function(n){const e=t.find((function(t){return t.name===n.name}));e?(e.value+=n.value,n.children&&(e.children||(e.children=[]),I(e.children,n.children))):t.push(n)}))}function $(t){let n,e,r,i,o,u,a,l;const s=[],c=[],f=[],h=!g;let p=t.data;for(p.hide?(t.value=0,e=t.children,e&&f.push(e)):(t.value=p.fade?0:x(p),s.push(t));n=s.pop();)if(e=n.children,e&&(o=e.length)){for(i=0;o--;)a=e[o],p=a.data,p.hide?(a.value=0,r=a.children,r&&f.push(r)):(p.fade?a.value=0:(l=x(p),a.value=l,i+=l),s.push(a));h&&n.value&&(n.value-=i),c.push(e)}for(o=c.length;o--;){for(e=c[o],i=0,u=e.length;u--;)i+=e[u].value;e[0].parent.value+=i}for(;f.length;)for(e=f.pop(),u=e.length;u--;)a=e[u],a.value=0,r=a.children,r&&f.push(r)}function U(){r.datum((t=>{if("Node"!==t.constructor.name){const n=Zn(t,M);return function(t){let n=0;!function(t,n){n(t);let e=t.children;if(e){const t=[e];let r,i,o;for(;t.length;)for(e=t.pop(),r=e.length;r--;)i=e[r],n(i),o=i.children,o&&t.push(o)}}(t,(function(t){t.id=n++}))}(n),$(n),n.originalValue=n.value,_&&n.eachAfter((t=>{let n=N(t);const e=t.children;let r=e&&e.length;for(;--r>=0;)n+=e[r].delta;t.delta=n})),n}}))}function Y(e){if(!arguments.length)return Y;r=e,U(),r.each((function(e){if(0===Pn(this).select("svg").size()){const e=Pn(this).append("svg:svg").attr("width",t).attr("class","partition d3-flame-graph");n&&(n(I([n.data],[t]),n.data))),U(),z(),Y):Y},Y.update=function(t){return r?(t&&(r.datum(t),U()),z(),Y):Y},Y.destroy=function(){return r?(i&&(i.hide(),"function"==typeof i.destroy&&i.destroy()),r.selectAll("svg").remove(),Y):Y},Y.setColorMapper=function(t){return arguments.length?(O=n=>{const e=L(n);return t(n,e)},Y):(O=L,Y)},Y.color=Y.setColorMapper,Y.setColorHue=function(t){return arguments.length?(w=t,Y):(w=null,Y)},Y.minFrameSize=function(t){return arguments.length?(h=t,Y):h},Y.setDetailsElement=function(t){return arguments.length?(p=t,Y):p},Y.details=Y.setDetailsElement,Y.selfValue=function(t){return arguments.length?(g=t,Y):g},Y.resetHeightOnZoom=function(t){return arguments.length?(v=t,Y):v},Y.scrollOnZoom=function(t){return arguments.length?(y=t,Y):y},Y.getName=function(t){return arguments.length?(b=t,Y):b},Y.getValue=function(t){return arguments.length?(x=t,Y):x},Y.getChildren=function(t){return arguments.length?(M=t,Y):M},Y.getLibtype=function(t){return arguments.length?(A=t,Y):A},Y.getDelta=function(t){return arguments.length?(N=t,Y):N},Y.setSearchHandler=function(t){return arguments.length?(E=t,Y):(E=k,Y)},Y.setDetailsHandler=function(t){return arguments.length?(P=t,Y):(P=j,Y)},Y.setSearchMatch=function(t){return arguments.length?(S=t,Y):(S=C,Y)},Y}return Cn.prototype.interrupt=function(t){return this.each((function(){!function(t,n){var e,r,i,o=t.__transition,u=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>2&&e.state<5,e.state=6,e.timer.stop(),e.on.call(r?"interrupt":"cancel",t,t.__data__,e.index,e.group),delete o[i]):u=!1;u&&delete t.__transition}}(this,t)}))},Cn.prototype.transition=function(t){var n,e;t instanceof Ei?(n=t._id,t=t._name):(n=ki(),(e=Ci).time=Cr(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,o=0;o d3_flame_graph.go +// D3.js is a JavaScript library for manipulating documents based on data. +// https://github.com/d3/d3 +// See D3_LICENSE file for license details + +// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. +// https://github.com/spiermar/d3-flame-graph +// See D3_FLAME_GRAPH_LICENSE file for license details + +package d3flamegraph + +// JSSource returns the d3 and d3-flame-graph JavaScript bundle +const JSSource = \` + +$d3_js +\` + +// CSSSource returns the $D3FLAMEGRAPH_CSS file +const CSSSource = \` +$d3_css +\` + +EOF + gofmt -w d3_flame_graph.go +} + +get_licenses() { + cp node_modules/d3-selection/LICENSE D3_LICENSE + cp node_modules/d3-flame-graph/LICENSE D3_FLAME_GRAPH_LICENSE +} + +get_licenses +generate_d3_flame_graph_go diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js new file mode 100644 index 0000000000000000000000000000000000000000..71239d9e9684a84970d19719bc91f2f8c2aebb8c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js @@ -0,0 +1,13 @@ +// Minimal webpack config to package a minified JS bundle (including +// dependencies) for execution in a