diff --git a/platform/dbops/binaries/go/go/src/net/ip_test.go b/platform/dbops/binaries/go/go/src/net/ip_test.go new file mode 100644 index 0000000000000000000000000000000000000000..acc2310be164e90510f71a936f63b8f4a0aba4a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ip_test.go @@ -0,0 +1,782 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "math/rand" + "reflect" + "runtime" + "testing" +) + +var parseIPTests = []struct { + in string + out IP +}{ + {"127.0.1.2", IPv4(127, 0, 1, 2)}, + {"127.0.0.1", IPv4(127, 0, 0, 1)}, + {"::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"::ffff:7f01:0203", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:0000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:000000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + + {"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + {"2001:4860:0000:2001:0000:0000:0000:0068", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + + {"-0.0.0.0", nil}, + {"0.-1.0.0", nil}, + {"0.0.-2.0", nil}, + {"0.0.0.-3", nil}, + {"127.0.0.256", nil}, + {"abc", nil}, + {"123:", nil}, + {"fe80::1%lo0", nil}, + {"fe80::1%911", nil}, + {"", nil}, + {"a1:a2:a3:a4::b1:b2:b3:b4", nil}, // Issue 6628 + {"127.001.002.003", nil}, + {"::ffff:127.001.002.003", nil}, + {"123.000.000.000", nil}, + {"1.2..4", nil}, + {"0123.0.0.1", nil}, +} + +func TestParseIP(t *testing.T) { + for _, tt := range parseIPTests { + if out := ParseIP(tt.in); !reflect.DeepEqual(out, tt.out) { + t.Errorf("ParseIP(%q) = %v, want %v", tt.in, out, tt.out) + } + if tt.in == "" { + // Tested in TestMarshalEmptyIP below. + continue + } + var out IP + if err := out.UnmarshalText([]byte(tt.in)); !reflect.DeepEqual(out, tt.out) || (tt.out == nil) != (err != nil) { + t.Errorf("IP.UnmarshalText(%q) = %v, %v, want %v", tt.in, out, err, tt.out) + } + } +} + +func TestLookupWithIP(t *testing.T) { + _, err := LookupIP("") + if err == nil { + t.Errorf(`LookupIP("") succeeded, should fail`) + } + _, err = LookupHost("") + if err == nil { + t.Errorf(`LookupIP("") succeeded, should fail`) + } + + // Test that LookupHost and LookupIP, which normally + // expect host names, work with IP addresses. + for _, tt := range parseIPTests { + if tt.out != nil { + addrs, err := LookupHost(tt.in) + if len(addrs) != 1 || addrs[0] != tt.in || err != nil { + t.Errorf("LookupHost(%q) = %v, %v, want %v, nil", tt.in, addrs, err, []string{tt.in}) + } + } else if !testing.Short() { + // We can't control what the host resolver does; if it can resolve, say, + // 127.0.0.256 or fe80::1%911 or a host named 'abc', who are we to judge? + // Warn about these discrepancies but don't fail the test. + addrs, err := LookupHost(tt.in) + if err == nil { + t.Logf("warning: LookupHost(%q) = %v, want error", tt.in, addrs) + } + } + + if tt.out != nil { + ips, err := LookupIP(tt.in) + if len(ips) != 1 || !reflect.DeepEqual(ips[0], tt.out) || err != nil { + t.Errorf("LookupIP(%q) = %v, %v, want %v, nil", tt.in, ips, err, []IP{tt.out}) + } + } else if !testing.Short() { + ips, err := LookupIP(tt.in) + // We can't control what the host resolver does. See above. + if err == nil { + t.Logf("warning: LookupIP(%q) = %v, want error", tt.in, ips) + } + } + } +} + +func BenchmarkParseIP(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + for i := 0; i < b.N; i++ { + for _, tt := range parseIPTests { + ParseIP(tt.in) + } + } +} + +func BenchmarkParseIPValidIPv4(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + for i := 0; i < b.N; i++ { + ParseIP("192.0.2.1") + } +} + +func BenchmarkParseIPValidIPv6(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + for i := 0; i < b.N; i++ { + ParseIP("2001:DB8::1") + } +} + +// Issue 6339 +func TestMarshalEmptyIP(t *testing.T) { + for _, in := range [][]byte{nil, []byte("")} { + var out = IP{1, 2, 3, 4} + if err := out.UnmarshalText(in); err != nil || out != nil { + t.Errorf("UnmarshalText(%v) = %v, %v; want nil, nil", in, out, err) + } + } + var ip IP + got, err := ip.MarshalText() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, []byte("")) { + t.Errorf(`got %#v, want []byte("")`, got) + } +} + +var ipStringTests = []*struct { + in IP // see RFC 791 and RFC 4291 + str string // see RFC 791, RFC 4291 and RFC 5952 + byt []byte + error +}{ + // IPv4 address + { + IP{192, 0, 2, 1}, + "192.0.2.1", + []byte("192.0.2.1"), + nil, + }, + { + IP{0, 0, 0, 0}, + "0.0.0.0", + []byte("0.0.0.0"), + nil, + }, + + // IPv4-mapped IPv6 address + { + IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 0, 2, 1}, + "192.0.2.1", + []byte("192.0.2.1"), + nil, + }, + { + IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 0, 0, 0}, + "0.0.0.0", + []byte("0.0.0.0"), + nil, + }, + + // IPv6 address + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1}, + "2001:db8::123:12:1", + []byte("2001:db8::123:12:1"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1}, + "2001:db8::1", + []byte("2001:db8::1"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1}, + "2001:db8:0:1:0:1:0:1", + []byte("2001:db8:0:1:0:1:0:1"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0}, + "2001:db8:1:0:1:0:1:0", + []byte("2001:db8:1:0:1:0:1:0"), + nil, + }, + { + IP{0x20, 0x1, 0, 0, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1}, + "2001::1:0:0:1", + []byte("2001::1:0:0:1"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0}, + "2001:db8:0:0:1::", + []byte("2001:db8:0:0:1::"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1}, + "2001:db8::1:0:0:1", + []byte("2001:db8::1:0:0:1"), + nil, + }, + { + IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0xa, 0, 0xb, 0, 0xc, 0, 0xd}, + "2001:db8::a:b:c:d", + []byte("2001:db8::a:b:c:d"), + nil, + }, + { + IPv6unspecified, + "::", + []byte("::"), + nil, + }, + + // IP wildcard equivalent address in Dial/Listen API + { + nil, + "", + nil, + nil, + }, + + // Opaque byte sequence + { + IP{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + "?0123456789abcdef", + nil, + &AddrError{Err: "invalid IP address", Addr: "0123456789abcdef"}, + }, +} + +func TestIPString(t *testing.T) { + for _, tt := range ipStringTests { + if out := tt.in.String(); out != tt.str { + t.Errorf("IP.String(%v) = %q, want %q", tt.in, out, tt.str) + } + if out, err := tt.in.MarshalText(); !bytes.Equal(out, tt.byt) || !reflect.DeepEqual(err, tt.error) { + t.Errorf("IP.MarshalText(%v) = %v, %v, want %v, %v", tt.in, out, err, tt.byt, tt.error) + } + } +} + +var sink string + +func BenchmarkIPString(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + b.Run("IPv4", func(b *testing.B) { + benchmarkIPString(b, IPv4len) + }) + + b.Run("IPv6", func(b *testing.B) { + benchmarkIPString(b, IPv6len) + }) +} + +func benchmarkIPString(b *testing.B, size int) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tt := range ipStringTests { + if tt.in != nil && len(tt.in) == size { + sink = tt.in.String() + } + } + } +} + +var ipMaskTests = []struct { + in IP + mask IPMask + out IP +}{ + {IPv4(192, 168, 1, 127), IPv4Mask(255, 255, 255, 128), IPv4(192, 168, 1, 0)}, + {IPv4(192, 168, 1, 127), IPMask(ParseIP("255.255.255.192")), IPv4(192, 168, 1, 64)}, + {IPv4(192, 168, 1, 127), IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffe0")), IPv4(192, 168, 1, 96)}, + {IPv4(192, 168, 1, 127), IPv4Mask(255, 0, 255, 0), IPv4(192, 0, 1, 0)}, + {ParseIP("2001:db8::1"), IPMask(ParseIP("ffff:ff80::")), ParseIP("2001:d80::")}, + {ParseIP("2001:db8::1"), IPMask(ParseIP("f0f0:0f0f::")), ParseIP("2000:d08::")}, +} + +func TestIPMask(t *testing.T) { + for _, tt := range ipMaskTests { + if out := tt.in.Mask(tt.mask); out == nil || !tt.out.Equal(out) { + t.Errorf("IP(%v).Mask(%v) = %v, want %v", tt.in, tt.mask, out, tt.out) + } + } +} + +var ipMaskStringTests = []struct { + in IPMask + out string +}{ + {IPv4Mask(255, 255, 255, 240), "fffffff0"}, + {IPv4Mask(255, 0, 128, 0), "ff008000"}, + {IPMask(ParseIP("ffff:ff80::")), "ffffff80000000000000000000000000"}, + {IPMask(ParseIP("ef00:ff80::cafe:0")), "ef00ff800000000000000000cafe0000"}, + {nil, ""}, +} + +func TestIPMaskString(t *testing.T) { + for _, tt := range ipMaskStringTests { + if out := tt.in.String(); out != tt.out { + t.Errorf("IPMask.String(%v) = %q, want %q", tt.in, out, tt.out) + } + } +} + +func BenchmarkIPMaskString(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + for i := 0; i < b.N; i++ { + for _, tt := range ipMaskStringTests { + sink = tt.in.String() + } + } +} + +var parseCIDRTests = []struct { + in string + ip IP + net *IPNet + err error +}{ + {"135.104.0.0/32", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"0.0.0.0/24", IPv4(0, 0, 0, 0), &IPNet{IP: IPv4(0, 0, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.0/24", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.1/32", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"135.104.0.1/24", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"::1/128", ParseIP("::1"), &IPNet{IP: ParseIP("::1"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"))}, nil}, + {"abcd:2345::/127", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"))}, nil}, + {"abcd:2345::/65", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/64", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff::"))}, nil}, + {"abcd:2345::/63", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:fffe::"))}, nil}, + {"abcd:2345::/33", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/32", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff::"))}, nil}, + {"abcd:2344::/31", ParseIP("abcd:2344::"), &IPNet{IP: ParseIP("abcd:2344::"), Mask: IPMask(ParseIP("ffff:fffe::"))}, nil}, + {"abcd:2300::/24", ParseIP("abcd:2300::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"abcd:2345::/24", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"2001:DB8::/48", ParseIP("2001:DB8::"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"2001:DB8::1/48", ParseIP("2001:DB8::1"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"192.168.1.1/255.255.255.0", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/255.255.255.0"}}, + {"192.168.1.1/35", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/35"}}, + {"2001:db8::1/-1", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-1"}}, + {"2001:db8::1/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-0"}}, + {"-0.0.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "-0.0.0.0/32"}}, + {"0.-1.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.-1.0.0/32"}}, + {"0.0.-2.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.-2.0/32"}}, + {"0.0.0.-3/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.-3/32"}}, + {"0.0.0.0/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.0/-0"}}, + {"127.000.000.001/32", nil, nil, &ParseError{Type: "CIDR address", Text: "127.000.000.001/32"}}, + {"", nil, nil, &ParseError{Type: "CIDR address", Text: ""}}, +} + +func TestParseCIDR(t *testing.T) { + for _, tt := range parseCIDRTests { + ip, net, err := ParseCIDR(tt.in) + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("ParseCIDR(%q) = %v, %v; want %v, %v", tt.in, ip, net, tt.ip, tt.net) + } + if err == nil && (!tt.ip.Equal(ip) || !tt.net.IP.Equal(net.IP) || !reflect.DeepEqual(net.Mask, tt.net.Mask)) { + t.Errorf("ParseCIDR(%q) = %v, {%v, %v}; want %v, {%v, %v}", tt.in, ip, net.IP, net.Mask, tt.ip, tt.net.IP, tt.net.Mask) + } + } +} + +var ipNetContainsTests = []struct { + ip IP + net *IPNet + ok bool +}{ + {IPv4(172, 16, 1, 1), &IPNet{IP: IPv4(172, 16, 0, 0), Mask: CIDRMask(12, 32)}, true}, + {IPv4(172, 24, 0, 1), &IPNet{IP: IPv4(172, 16, 0, 0), Mask: CIDRMask(13, 32)}, false}, + {IPv4(192, 168, 0, 3), &IPNet{IP: IPv4(192, 168, 0, 0), Mask: IPv4Mask(0, 0, 255, 252)}, true}, + {IPv4(192, 168, 0, 4), &IPNet{IP: IPv4(192, 168, 0, 0), Mask: IPv4Mask(0, 255, 0, 252)}, false}, + {ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: CIDRMask(47, 128)}, true}, + {ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:2::"), Mask: CIDRMask(47, 128)}, false}, + {ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: IPMask(ParseIP("ffff:0:ffff::"))}, true}, + {ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: IPMask(ParseIP("0:0:0:ffff::"))}, false}, +} + +func TestIPNetContains(t *testing.T) { + for _, tt := range ipNetContainsTests { + if ok := tt.net.Contains(tt.ip); ok != tt.ok { + t.Errorf("IPNet(%v).Contains(%v) = %v, want %v", tt.net, tt.ip, ok, tt.ok) + } + } +} + +var ipNetStringTests = []struct { + in *IPNet + out string +}{ + {&IPNet{IP: IPv4(192, 168, 1, 0), Mask: CIDRMask(26, 32)}, "192.168.1.0/26"}, + {&IPNet{IP: IPv4(192, 168, 1, 0), Mask: IPv4Mask(255, 0, 255, 0)}, "192.168.1.0/ff00ff00"}, + {&IPNet{IP: ParseIP("2001:db8::"), Mask: CIDRMask(55, 128)}, "2001:db8::/55"}, + {&IPNet{IP: ParseIP("2001:db8::"), Mask: IPMask(ParseIP("8000:f123:0:cafe::"))}, "2001:db8::/8000f1230000cafe0000000000000000"}, + {nil, ""}, +} + +func TestIPNetString(t *testing.T) { + for _, tt := range ipNetStringTests { + if out := tt.in.String(); out != tt.out { + t.Errorf("IPNet.String(%v) = %q, want %q", tt.in, out, tt.out) + } + } +} + +var cidrMaskTests = []struct { + ones int + bits int + out IPMask +}{ + {0, 32, IPv4Mask(0, 0, 0, 0)}, + {12, 32, IPv4Mask(255, 240, 0, 0)}, + {24, 32, IPv4Mask(255, 255, 255, 0)}, + {32, 32, IPv4Mask(255, 255, 255, 255)}, + {0, 128, IPMask{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {4, 128, IPMask{0xf0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {48, 128, IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {128, 128, IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + {33, 32, nil}, + {32, 33, nil}, + {-1, 128, nil}, + {128, -1, nil}, +} + +func TestCIDRMask(t *testing.T) { + for _, tt := range cidrMaskTests { + if out := CIDRMask(tt.ones, tt.bits); !reflect.DeepEqual(out, tt.out) { + t.Errorf("CIDRMask(%v, %v) = %v, want %v", tt.ones, tt.bits, out, tt.out) + } + } +} + +var ( + v4addr = IP{192, 168, 0, 1} + v4mappedv6addr = IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1} + v6addr = IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1} + v4mask = IPMask{255, 255, 255, 0} + v4mappedv6mask = IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 255, 255, 255, 0} + v6mask = IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0} + badaddr = IP{192, 168, 0} + badmask = IPMask{255, 255, 0} + v4maskzero = IPMask{0, 0, 0, 0} +) + +var networkNumberAndMaskTests = []struct { + in IPNet + out IPNet +}{ + {IPNet{IP: v4addr, Mask: v4mask}, IPNet{IP: v4addr, Mask: v4mask}}, + {IPNet{IP: v4addr, Mask: v4mappedv6mask}, IPNet{IP: v4addr, Mask: v4mask}}, + {IPNet{IP: v4mappedv6addr, Mask: v4mappedv6mask}, IPNet{IP: v4addr, Mask: v4mask}}, + {IPNet{IP: v4mappedv6addr, Mask: v6mask}, IPNet{IP: v4addr, Mask: v4maskzero}}, + {IPNet{IP: v4addr, Mask: v6mask}, IPNet{IP: v4addr, Mask: v4maskzero}}, + {IPNet{IP: v6addr, Mask: v6mask}, IPNet{IP: v6addr, Mask: v6mask}}, + {IPNet{IP: v6addr, Mask: v4mappedv6mask}, IPNet{IP: v6addr, Mask: v4mappedv6mask}}, + {in: IPNet{IP: v6addr, Mask: v4mask}}, + {in: IPNet{IP: v4addr, Mask: badmask}}, + {in: IPNet{IP: v4mappedv6addr, Mask: badmask}}, + {in: IPNet{IP: v6addr, Mask: badmask}}, + {in: IPNet{IP: badaddr, Mask: v4mask}}, + {in: IPNet{IP: badaddr, Mask: v4mappedv6mask}}, + {in: IPNet{IP: badaddr, Mask: v6mask}}, + {in: IPNet{IP: badaddr, Mask: badmask}}, +} + +func TestNetworkNumberAndMask(t *testing.T) { + for _, tt := range networkNumberAndMaskTests { + ip, m := networkNumberAndMask(&tt.in) + out := &IPNet{IP: ip, Mask: m} + if !reflect.DeepEqual(&tt.out, out) { + t.Errorf("networkNumberAndMask(%v) = %v, want %v", tt.in, out, &tt.out) + } + } +} + +func TestSplitHostPort(t *testing.T) { + for _, tt := range []struct { + hostPort string + host string + port string + }{ + // Host name + {"localhost:http", "localhost", "http"}, + {"localhost:80", "localhost", "80"}, + + // Go-specific host name with zone identifier + {"localhost%lo0:http", "localhost%lo0", "http"}, + {"localhost%lo0:80", "localhost%lo0", "80"}, + {"[localhost%lo0]:http", "localhost%lo0", "http"}, // Go 1 behavior + {"[localhost%lo0]:80", "localhost%lo0", "80"}, // Go 1 behavior + + // IP literal + {"127.0.0.1:http", "127.0.0.1", "http"}, + {"127.0.0.1:80", "127.0.0.1", "80"}, + {"[::1]:http", "::1", "http"}, + {"[::1]:80", "::1", "80"}, + + // IP literal with zone identifier + {"[::1%lo0]:http", "::1%lo0", "http"}, + {"[::1%lo0]:80", "::1%lo0", "80"}, + + // Go-specific wildcard for host name + {":http", "", "http"}, // Go 1 behavior + {":80", "", "80"}, // Go 1 behavior + + // Go-specific wildcard for service name or transport port number + {"golang.org:", "golang.org", ""}, // Go 1 behavior + {"127.0.0.1:", "127.0.0.1", ""}, // Go 1 behavior + {"[::1]:", "::1", ""}, // Go 1 behavior + + // Opaque service name + {"golang.org:https%foo", "golang.org", "https%foo"}, // Go 1 behavior + } { + if host, port, err := SplitHostPort(tt.hostPort); host != tt.host || port != tt.port || err != nil { + t.Errorf("SplitHostPort(%q) = %q, %q, %v; want %q, %q, nil", tt.hostPort, host, port, err, tt.host, tt.port) + } + } + + for _, tt := range []struct { + hostPort string + err string + }{ + {"golang.org", "missing port in address"}, + {"127.0.0.1", "missing port in address"}, + {"[::1]", "missing port in address"}, + {"[fe80::1%lo0]", "missing port in address"}, + {"[localhost%lo0]", "missing port in address"}, + {"localhost%lo0", "missing port in address"}, + + {"::1", "too many colons in address"}, + {"fe80::1%lo0", "too many colons in address"}, + {"fe80::1%lo0:80", "too many colons in address"}, + + // Test cases that didn't fail in Go 1 + + {"[foo:bar]", "missing port in address"}, + {"[foo:bar]baz", "missing port in address"}, + {"[foo]bar:baz", "missing port in address"}, + + {"[foo]:[bar]:baz", "too many colons in address"}, + + {"[foo]:[bar]baz", "unexpected '[' in address"}, + {"foo[bar]:baz", "unexpected '[' in address"}, + + {"foo]bar:baz", "unexpected ']' in address"}, + } { + if host, port, err := SplitHostPort(tt.hostPort); err == nil { + t.Errorf("SplitHostPort(%q) should have failed", tt.hostPort) + } else { + e := err.(*AddrError) + if e.Err != tt.err { + t.Errorf("SplitHostPort(%q) = _, _, %q; want %q", tt.hostPort, e.Err, tt.err) + } + if host != "" || port != "" { + t.Errorf("SplitHostPort(%q) = %q, %q, err; want %q, %q, err on failure", tt.hostPort, host, port, "", "") + } + } + } +} + +func TestJoinHostPort(t *testing.T) { + for _, tt := range []struct { + host string + port string + hostPort string + }{ + // Host name + {"localhost", "http", "localhost:http"}, + {"localhost", "80", "localhost:80"}, + + // Go-specific host name with zone identifier + {"localhost%lo0", "http", "localhost%lo0:http"}, + {"localhost%lo0", "80", "localhost%lo0:80"}, + + // IP literal + {"127.0.0.1", "http", "127.0.0.1:http"}, + {"127.0.0.1", "80", "127.0.0.1:80"}, + {"::1", "http", "[::1]:http"}, + {"::1", "80", "[::1]:80"}, + + // IP literal with zone identifier + {"::1%lo0", "http", "[::1%lo0]:http"}, + {"::1%lo0", "80", "[::1%lo0]:80"}, + + // Go-specific wildcard for host name + {"", "http", ":http"}, // Go 1 behavior + {"", "80", ":80"}, // Go 1 behavior + + // Go-specific wildcard for service name or transport port number + {"golang.org", "", "golang.org:"}, // Go 1 behavior + {"127.0.0.1", "", "127.0.0.1:"}, // Go 1 behavior + {"::1", "", "[::1]:"}, // Go 1 behavior + + // Opaque service name + {"golang.org", "https%foo", "golang.org:https%foo"}, // Go 1 behavior + } { + if hostPort := JoinHostPort(tt.host, tt.port); hostPort != tt.hostPort { + t.Errorf("JoinHostPort(%q, %q) = %q; want %q", tt.host, tt.port, hostPort, tt.hostPort) + } + } +} + +var ipAddrFamilyTests = []struct { + in IP + af4 bool + af6 bool +}{ + {IPv4bcast, true, false}, + {IPv4allsys, true, false}, + {IPv4allrouter, true, false}, + {IPv4zero, true, false}, + {IPv4(224, 0, 0, 1), true, false}, + {IPv4(127, 0, 0, 1), true, false}, + {IPv4(240, 0, 0, 1), true, false}, + {IPv6unspecified, false, true}, + {IPv6loopback, false, true}, + {IPv6interfacelocalallnodes, false, true}, + {IPv6linklocalallnodes, false, true}, + {IPv6linklocalallrouters, false, true}, + {ParseIP("ff05::a:b:c:d"), false, true}, + {ParseIP("fe80::1:2:3:4"), false, true}, + {ParseIP("2001:db8::123:12:1"), false, true}, +} + +func TestIPAddrFamily(t *testing.T) { + for _, tt := range ipAddrFamilyTests { + if af := tt.in.To4() != nil; af != tt.af4 { + t.Errorf("verifying IPv4 address family for %q = %v, want %v", tt.in, af, tt.af4) + } + if af := len(tt.in) == IPv6len && tt.in.To4() == nil; af != tt.af6 { + t.Errorf("verifying IPv6 address family for %q = %v, want %v", tt.in, af, tt.af6) + } + } +} + +var ipAddrScopeTests = []struct { + scope func(IP) bool + in IP + ok bool +}{ + {IP.IsUnspecified, IPv4zero, true}, + {IP.IsUnspecified, IPv4(127, 0, 0, 1), false}, + {IP.IsUnspecified, IPv6unspecified, true}, + {IP.IsUnspecified, IPv6interfacelocalallnodes, false}, + {IP.IsUnspecified, nil, false}, + {IP.IsLoopback, IPv4(127, 0, 0, 1), true}, + {IP.IsLoopback, IPv4(127, 255, 255, 254), true}, + {IP.IsLoopback, IPv4(128, 1, 2, 3), false}, + {IP.IsLoopback, IPv6loopback, true}, + {IP.IsLoopback, IPv6linklocalallrouters, false}, + {IP.IsLoopback, nil, false}, + {IP.IsMulticast, IPv4(224, 0, 0, 0), true}, + {IP.IsMulticast, IPv4(239, 0, 0, 0), true}, + {IP.IsMulticast, IPv4(240, 0, 0, 0), false}, + {IP.IsMulticast, IPv6linklocalallnodes, true}, + {IP.IsMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true}, + {IP.IsMulticast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, + {IP.IsMulticast, nil, false}, + {IP.IsInterfaceLocalMulticast, IPv4(224, 0, 0, 0), false}, + {IP.IsInterfaceLocalMulticast, IPv4(0xff, 0x01, 0, 0), false}, + {IP.IsInterfaceLocalMulticast, IPv6interfacelocalallnodes, true}, + {IP.IsInterfaceLocalMulticast, nil, false}, + {IP.IsLinkLocalMulticast, IPv4(224, 0, 0, 0), true}, + {IP.IsLinkLocalMulticast, IPv4(239, 0, 0, 0), false}, + {IP.IsLinkLocalMulticast, IPv4(0xff, 0x02, 0, 0), false}, + {IP.IsLinkLocalMulticast, IPv6linklocalallrouters, true}, + {IP.IsLinkLocalMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, + {IP.IsLinkLocalMulticast, nil, false}, + {IP.IsLinkLocalUnicast, IPv4(169, 254, 0, 0), true}, + {IP.IsLinkLocalUnicast, IPv4(169, 255, 0, 0), false}, + {IP.IsLinkLocalUnicast, IPv4(0xfe, 0x80, 0, 0), false}, + {IP.IsLinkLocalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true}, + {IP.IsLinkLocalUnicast, IP{0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, + {IP.IsLinkLocalUnicast, nil, false}, + {IP.IsGlobalUnicast, IPv4(240, 0, 0, 0), true}, + {IP.IsGlobalUnicast, IPv4(232, 0, 0, 0), false}, + {IP.IsGlobalUnicast, IPv4(169, 254, 0, 0), false}, + {IP.IsGlobalUnicast, IPv4bcast, false}, + {IP.IsGlobalUnicast, IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1}, true}, + {IP.IsGlobalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, + {IP.IsGlobalUnicast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, + {IP.IsGlobalUnicast, nil, false}, + {IP.IsPrivate, nil, false}, + {IP.IsPrivate, IPv4(1, 1, 1, 1), false}, + {IP.IsPrivate, IPv4(9, 255, 255, 255), false}, + {IP.IsPrivate, IPv4(10, 0, 0, 0), true}, + {IP.IsPrivate, IPv4(10, 255, 255, 255), true}, + {IP.IsPrivate, IPv4(11, 0, 0, 0), false}, + {IP.IsPrivate, IPv4(172, 15, 255, 255), false}, + {IP.IsPrivate, IPv4(172, 16, 0, 0), true}, + {IP.IsPrivate, IPv4(172, 16, 255, 255), true}, + {IP.IsPrivate, IPv4(172, 23, 18, 255), true}, + {IP.IsPrivate, IPv4(172, 31, 255, 255), true}, + {IP.IsPrivate, IPv4(172, 31, 0, 0), true}, + {IP.IsPrivate, IPv4(172, 32, 0, 0), false}, + {IP.IsPrivate, IPv4(192, 167, 255, 255), false}, + {IP.IsPrivate, IPv4(192, 168, 0, 0), true}, + {IP.IsPrivate, IPv4(192, 168, 255, 255), true}, + {IP.IsPrivate, IPv4(192, 169, 0, 0), false}, + {IP.IsPrivate, IP{0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, false}, + {IP.IsPrivate, IP{0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true}, + {IP.IsPrivate, IP{0xfc, 0xff, 0x12, 0, 0, 0, 0, 0x44, 0, 0, 0, 0, 0, 0, 0, 0}, true}, + {IP.IsPrivate, IP{0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, true}, + {IP.IsPrivate, IP{0xfe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false}, +} + +func name(f any) string { + return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() +} + +func TestIPAddrScope(t *testing.T) { + for _, tt := range ipAddrScopeTests { + if ok := tt.scope(tt.in); ok != tt.ok { + t.Errorf("%s(%q) = %v, want %v", name(tt.scope), tt.in, ok, tt.ok) + } + ip := tt.in.To4() + if ip == nil { + continue + } + if ok := tt.scope(ip); ok != tt.ok { + t.Errorf("%s(%q) = %v, want %v", name(tt.scope), ip, ok, tt.ok) + } + } +} + +func BenchmarkIPEqual(b *testing.B) { + b.Run("IPv4", func(b *testing.B) { + benchmarkIPEqual(b, IPv4len) + }) + b.Run("IPv6", func(b *testing.B) { + benchmarkIPEqual(b, IPv6len) + }) +} + +func benchmarkIPEqual(b *testing.B, size int) { + ips := make([]IP, 1000) + for i := range ips { + ips[i] = make(IP, size) + rand.Read(ips[i]) + } + // Half of the N are equal. + for i := 0; i < b.N/2; i++ { + x := ips[i%len(ips)] + y := ips[i%len(ips)] + x.Equal(y) + } + // The other half are not equal. + for i := 0; i < b.N/2; i++ { + x := ips[i%len(ips)] + y := ips[(i+1)%len(ips)] + x.Equal(y) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/iprawsock.go b/platform/dbops/binaries/go/go/src/net/iprawsock.go new file mode 100644 index 0000000000000000000000000000000000000000..4c06b1b5aca4461840a3b076f00a1faf05ba83e2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/iprawsock.go @@ -0,0 +1,240 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "syscall" +) + +// BUG(mikio): On every POSIX platform, reads from the "ip4" network +// using the ReadFrom or ReadFromIP method might not return a complete +// IPv4 packet, including its header, even if there is space +// available. This can occur even in cases where Read or ReadMsgIP +// could return a complete packet. For this reason, it is recommended +// that you do not use these methods if it is important to receive a +// full packet. +// +// The Go 1 compatibility guidelines make it impossible for us to +// change the behavior of these methods; use Read or ReadMsgIP +// instead. + +// BUG(mikio): On JS and Plan 9, methods and functions related +// to IPConn are not implemented. + +// BUG(mikio): On Windows, the File method of IPConn is not +// implemented. + +// IPAddr represents the address of an IP end point. +type IPAddr struct { + IP IP + Zone string // IPv6 scoped addressing zone +} + +// Network returns the address's network name, "ip". +func (a *IPAddr) Network() string { return "ip" } + +func (a *IPAddr) String() string { + if a == nil { + return "" + } + ip := ipEmptyString(a.IP) + if a.Zone != "" { + return ip + "%" + a.Zone + } + return ip +} + +func (a *IPAddr) isWildcard() bool { + if a == nil || a.IP == nil { + return true + } + return a.IP.IsUnspecified() +} + +func (a *IPAddr) opAddr() Addr { + if a == nil { + return nil + } + return a +} + +// ResolveIPAddr returns an address of IP end point. +// +// The network must be an IP network name. +// +// If the host in the address parameter is not a literal IP address, +// ResolveIPAddr resolves the address to an address of IP end point. +// Otherwise, it parses the address as a literal IP address. +// The address parameter can use a host name, but this is not +// recommended, because it will return at most one of the host name's +// IP addresses. +// +// See func [Dial] for a description of the network and address +// parameters. +func ResolveIPAddr(network, address string) (*IPAddr, error) { + if network == "" { // a hint wildcard for Go 1.0 undocumented behavior + network = "ip" + } + afnet, _, err := parseNetwork(context.Background(), network, false) + if err != nil { + return nil, err + } + switch afnet { + case "ip", "ip4", "ip6": + default: + return nil, UnknownNetworkError(network) + } + addrs, err := DefaultResolver.internetAddrList(context.Background(), afnet, address) + if err != nil { + return nil, err + } + return addrs.forResolve(network, address).(*IPAddr), nil +} + +// IPConn is the implementation of the [Conn] and [PacketConn] interfaces +// for IP network connections. +type IPConn struct { + conn +} + +// SyscallConn returns a raw network connection. +// This implements the [syscall.Conn] interface. +func (c *IPConn) SyscallConn() (syscall.RawConn, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + return newRawConn(c.fd), nil +} + +// ReadFromIP acts like ReadFrom but returns an IPAddr. +func (c *IPConn) ReadFromIP(b []byte) (int, *IPAddr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + n, addr, err := c.readFrom(b) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, addr, err +} + +// ReadFrom implements the [PacketConn] ReadFrom method. +func (c *IPConn) ReadFrom(b []byte) (int, Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + n, addr, err := c.readFrom(b) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if addr == nil { + return n, nil, err + } + return n, addr, err +} + +// ReadMsgIP reads a message from c, copying the payload into b and +// the associated out-of-band data into oob. It returns the number of +// bytes copied into b, the number of bytes copied into oob, the flags +// that were set on the message and the source address of the message. +// +// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be +// used to manipulate IP-level socket options in oob. +func (c *IPConn) ReadMsgIP(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) { + if !c.ok() { + return 0, 0, 0, nil, syscall.EINVAL + } + n, oobn, flags, addr, err = c.readMsg(b, oob) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return +} + +// WriteToIP acts like [IPConn.WriteTo] but takes an [IPAddr]. +func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeTo(b, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return n, err +} + +// WriteTo implements the [PacketConn] WriteTo method. +func (c *IPConn) WriteTo(b []byte, addr Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + a, ok := addr.(*IPAddr) + if !ok { + return 0, &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr, Err: syscall.EINVAL} + } + n, err := c.writeTo(b, a) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: a.opAddr(), Err: err} + } + return n, err +} + +// WriteMsgIP writes a message to addr via c, copying the payload from +// b and the associated out-of-band data from oob. It returns the +// number of payload and out-of-band bytes written. +// +// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be +// used to manipulate IP-level socket options in oob. +func (c *IPConn) WriteMsgIP(b, oob []byte, addr *IPAddr) (n, oobn int, err error) { + if !c.ok() { + return 0, 0, syscall.EINVAL + } + n, oobn, err = c.writeMsg(b, oob, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return +} + +func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} } + +// DialIP acts like [Dial] for IP networks. +// +// The network must be an IP network name; see func Dial for details. +// +// If laddr is nil, a local address is automatically chosen. +// If the IP field of raddr is nil or an unspecified IP address, the +// local system is assumed. +func DialIP(network string, laddr, raddr *IPAddr) (*IPConn, error) { + if raddr == nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress} + } + sd := &sysDialer{network: network, address: raddr.String()} + c, err := sd.dialIP(context.Background(), laddr, raddr) + if err != nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err} + } + return c, nil +} + +// ListenIP acts like [ListenPacket] for IP networks. +// +// The network must be an IP network name; see func Dial for details. +// +// If the IP field of laddr is nil or an unspecified IP address, +// ListenIP listens on all available IP addresses of the local system +// except multicast IP addresses. +func ListenIP(network string, laddr *IPAddr) (*IPConn, error) { + if laddr == nil { + laddr = &IPAddr{} + } + sl := &sysListener{network: network, address: laddr.String()} + c, err := sl.listenIP(context.Background(), laddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err} + } + return c, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/iprawsock_plan9.go b/platform/dbops/binaries/go/go/src/net/iprawsock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..ebe58088642de4b2f0bfaccb0f478af95cda44bd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/iprawsock_plan9.go @@ -0,0 +1,34 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "syscall" +) + +func (c *IPConn) readFrom(b []byte) (int, *IPAddr, error) { + return 0, nil, syscall.EPLAN9 +} + +func (c *IPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) { + return 0, 0, 0, nil, syscall.EPLAN9 +} + +func (c *IPConn) writeTo(b []byte, addr *IPAddr) (int, error) { + return 0, syscall.EPLAN9 +} + +func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error) { + return 0, 0, syscall.EPLAN9 +} + +func (sd *sysDialer) dialIP(ctx context.Context, laddr, raddr *IPAddr) (*IPConn, error) { + return nil, syscall.EPLAN9 +} + +func (sl *sysListener) listenIP(ctx context.Context, laddr *IPAddr) (*IPConn, error) { + return nil, syscall.EPLAN9 +} diff --git a/platform/dbops/binaries/go/go/src/net/iprawsock_posix.go b/platform/dbops/binaries/go/go/src/net/iprawsock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..73b41ab5226a93044a594a41b5cb7e7f75f762d6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/iprawsock_posix.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "context" + "syscall" +) + +func sockaddrToIP(sa syscall.Sockaddr) Addr { + switch sa := sa.(type) { + case *syscall.SockaddrInet4: + return &IPAddr{IP: sa.Addr[0:]} + case *syscall.SockaddrInet6: + return &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))} + } + return nil +} + +func (a *IPAddr) family() int { + if a == nil || len(a.IP) <= IPv4len { + return syscall.AF_INET + } + if a.IP.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +func (a *IPAddr) sockaddr(family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return ipToSockaddr(family, a.IP, 0, a.Zone) +} + +func (a *IPAddr) toLocal(net string) sockaddr { + return &IPAddr{loopbackIP(net), a.Zone} +} + +func (c *IPConn) readFrom(b []byte) (int, *IPAddr, error) { + // TODO(cw,rsc): consider using readv if we know the family + // type to avoid the header trim/copy + var addr *IPAddr + n, sa, err := c.fd.readFrom(b) + switch sa := sa.(type) { + case *syscall.SockaddrInet4: + addr = &IPAddr{IP: sa.Addr[0:]} + n = stripIPv4Header(n, b) + case *syscall.SockaddrInet6: + addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))} + } + return n, addr, err +} + +func stripIPv4Header(n int, b []byte) int { + if len(b) < 20 { + return n + } + l := int(b[0]&0x0f) << 2 + if 20 > l || l > len(b) { + return n + } + if b[0]>>4 != 4 { + return n + } + copy(b, b[l:]) + return n - l +} + +func (c *IPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) { + var sa syscall.Sockaddr + n, oobn, flags, sa, err = c.fd.readMsg(b, oob, 0) + switch sa := sa.(type) { + case *syscall.SockaddrInet4: + addr = &IPAddr{IP: sa.Addr[0:]} + case *syscall.SockaddrInet6: + addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))} + } + return +} + +func (c *IPConn) writeTo(b []byte, addr *IPAddr) (int, error) { + if c.fd.isConnected { + return 0, ErrWriteToConnected + } + if addr == nil { + return 0, errMissingAddress + } + sa, err := addr.sockaddr(c.fd.family) + if err != nil { + return 0, err + } + return c.fd.writeTo(b, sa) +} + +func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error) { + if c.fd.isConnected { + return 0, 0, ErrWriteToConnected + } + if addr == nil { + return 0, 0, errMissingAddress + } + sa, err := addr.sockaddr(c.fd.family) + if err != nil { + return 0, 0, err + } + return c.fd.writeMsg(b, oob, sa) +} + +func (sd *sysDialer) dialIP(ctx context.Context, laddr, raddr *IPAddr) (*IPConn, error) { + network, proto, err := parseNetwork(ctx, sd.network, true) + if err != nil { + return nil, err + } + switch network { + case "ip", "ip4", "ip6": + default: + return nil, UnknownNetworkError(sd.network) + } + ctrlCtxFn := sd.Dialer.ControlContext + if ctrlCtxFn == nil && sd.Dialer.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sd.Dialer.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, network, laddr, raddr, syscall.SOCK_RAW, proto, "dial", ctrlCtxFn) + if err != nil { + return nil, err + } + return newIPConn(fd), nil +} + +func (sl *sysListener) listenIP(ctx context.Context, laddr *IPAddr) (*IPConn, error) { + network, proto, err := parseNetwork(ctx, sl.network, true) + if err != nil { + return nil, err + } + switch network { + case "ip", "ip4", "ip6": + default: + return nil, UnknownNetworkError(sl.network) + } + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, network, laddr, nil, syscall.SOCK_RAW, proto, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + return newIPConn(fd), nil +} diff --git a/platform/dbops/binaries/go/go/src/net/iprawsock_test.go b/platform/dbops/binaries/go/go/src/net/iprawsock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7f1fc139abc82c4589213eb3a0beb70eb44d58d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/iprawsock_test.go @@ -0,0 +1,200 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/testenv" + "reflect" + "testing" +) + +// The full stack test cases for IPConn have been moved to the +// following: +// golang.org/x/net/ipv4 +// golang.org/x/net/ipv6 +// golang.org/x/net/icmp + +type resolveIPAddrTest struct { + network string + litAddrOrName string + addr *IPAddr + err error +} + +var resolveIPAddrTests = []resolveIPAddrTest{ + {"ip", "127.0.0.1", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, + {"ip4", "127.0.0.1", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, + {"ip4:icmp", "127.0.0.1", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, + + {"ip", "::1", &IPAddr{IP: ParseIP("::1")}, nil}, + {"ip6", "::1", &IPAddr{IP: ParseIP("::1")}, nil}, + {"ip6:ipv6-icmp", "::1", &IPAddr{IP: ParseIP("::1")}, nil}, + {"ip6:IPv6-ICMP", "::1", &IPAddr{IP: ParseIP("::1")}, nil}, + + {"ip", "::1%en0", &IPAddr{IP: ParseIP("::1"), Zone: "en0"}, nil}, + {"ip6", "::1%911", &IPAddr{IP: ParseIP("::1"), Zone: "911"}, nil}, + + {"", "127.0.0.1", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, // Go 1.0 behavior + {"", "::1", &IPAddr{IP: ParseIP("::1")}, nil}, // Go 1.0 behavior + + {"ip4:icmp", "", &IPAddr{}, nil}, + + {"l2tp", "127.0.0.1", nil, UnknownNetworkError("l2tp")}, + {"l2tp:gre", "127.0.0.1", nil, UnknownNetworkError("l2tp:gre")}, + {"tcp", "1.2.3.4:123", nil, UnknownNetworkError("tcp")}, + + {"ip4", "2001:db8::1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "2001:db8::1"}}, + {"ip4:icmp", "2001:db8::1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "2001:db8::1"}}, + {"ip6", "127.0.0.1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "127.0.0.1"}}, + {"ip6", "::ffff:127.0.0.1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "::ffff:127.0.0.1"}}, + {"ip6:ipv6-icmp", "127.0.0.1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "127.0.0.1"}}, + {"ip6:ipv6-icmp", "::ffff:127.0.0.1", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "::ffff:127.0.0.1"}}, +} + +func TestResolveIPAddr(t *testing.T) { + if !testableNetwork("ip+nopriv") { + t.Skip("ip+nopriv test") + } + + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + testHookLookupIP = lookupLocalhost + + for _, tt := range resolveIPAddrTests { + addr, err := ResolveIPAddr(tt.network, tt.litAddrOrName) + if !reflect.DeepEqual(addr, tt.addr) || !reflect.DeepEqual(err, tt.err) { + t.Errorf("ResolveIPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr, err, tt.addr, tt.err) + continue + } + if err == nil { + addr2, err := ResolveIPAddr(addr.Network(), addr.String()) + if !reflect.DeepEqual(addr2, tt.addr) || err != tt.err { + t.Errorf("(%q, %q): ResolveIPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr.Network(), addr.String(), addr2, err, tt.addr, tt.err) + } + } + } +} + +var ipConnLocalNameTests = []struct { + net string + laddr *IPAddr +}{ + {"ip4:icmp", &IPAddr{IP: IPv4(127, 0, 0, 1)}}, + {"ip4:icmp", &IPAddr{}}, + {"ip4:icmp", nil}, +} + +func TestIPConnLocalName(t *testing.T) { + for _, tt := range ipConnLocalNameTests { + if !testableNetwork(tt.net) { + t.Logf("skipping %s test", tt.net) + continue + } + c, err := ListenIP(tt.net, tt.laddr) + if testenv.SyscallIsNotSupported(err) { + // May be inside a container that disallows creating a socket. + t.Logf("skipping %s test: %v", tt.net, err) + continue + } else if err != nil { + t.Fatal(err) + } + defer c.Close() + if la := c.LocalAddr(); la == nil { + t.Fatal("should not fail") + } + } +} + +func TestIPConnRemoteName(t *testing.T) { + network := "ip:tcp" + if !testableNetwork(network) { + t.Skipf("skipping %s test", network) + } + + raddr := &IPAddr{IP: IPv4(127, 0, 0, 1).To4()} + c, err := DialIP(network, &IPAddr{IP: IPv4(127, 0, 0, 1)}, raddr) + if testenv.SyscallIsNotSupported(err) { + // May be inside a container that disallows creating a socket. + t.Skipf("skipping %s test: %v", network, err) + } else if err != nil { + t.Fatal(err) + } + defer c.Close() + if !reflect.DeepEqual(raddr, c.RemoteAddr()) { + t.Fatalf("got %#v; want %#v", c.RemoteAddr(), raddr) + } +} + +func TestDialListenIPArgs(t *testing.T) { + type test struct { + argLists [][2]string + shouldFail bool + } + tests := []test{ + { + argLists: [][2]string{ + {"ip", "127.0.0.1"}, + {"ip:", "127.0.0.1"}, + {"ip::", "127.0.0.1"}, + {"ip", "::1"}, + {"ip:", "::1"}, + {"ip::", "::1"}, + {"ip4", "127.0.0.1"}, + {"ip4:", "127.0.0.1"}, + {"ip4::", "127.0.0.1"}, + {"ip6", "::1"}, + {"ip6:", "::1"}, + {"ip6::", "::1"}, + }, + shouldFail: true, + }, + } + if testableNetwork("ip") { + priv := test{shouldFail: false} + for _, tt := range []struct { + network, address string + args [2]string + }{ + {"ip4:47", "127.0.0.1", [2]string{"ip4:47", "127.0.0.1"}}, + {"ip6:47", "::1", [2]string{"ip6:47", "::1"}}, + } { + c, err := ListenPacket(tt.network, tt.address) + if err != nil { + continue + } + c.Close() + priv.argLists = append(priv.argLists, tt.args) + } + if len(priv.argLists) > 0 { + tests = append(tests, priv) + } + } + + for _, tt := range tests { + for _, args := range tt.argLists { + _, err := Dial(args[0], args[1]) + if tt.shouldFail != (err != nil) { + t.Errorf("Dial(%q, %q) = %v; want (err != nil) is %t", args[0], args[1], err, tt.shouldFail) + } + _, err = ListenPacket(args[0], args[1]) + if tt.shouldFail != (err != nil) { + t.Errorf("ListenPacket(%q, %q) = %v; want (err != nil) is %t", args[0], args[1], err, tt.shouldFail) + } + a, err := ResolveIPAddr("ip", args[1]) + if err != nil { + t.Errorf("ResolveIPAddr(\"ip\", %q) = %v", args[1], err) + continue + } + _, err = DialIP(args[0], nil, a) + if tt.shouldFail != (err != nil) { + t.Errorf("DialIP(%q, %v) = %v; want (err != nil) is %t", args[0], a, err, tt.shouldFail) + } + _, err = ListenIP(args[0], a) + if tt.shouldFail != (err != nil) { + t.Errorf("ListenIP(%q, %v) = %v; want (err != nil) is %t", args[0], a, err, tt.shouldFail) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/ipsock.go b/platform/dbops/binaries/go/go/src/net/ipsock.go new file mode 100644 index 0000000000000000000000000000000000000000..176dbc748e66c83cc311261400cc5496b34da2d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ipsock.go @@ -0,0 +1,315 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "internal/bytealg" + "runtime" + "sync" +) + +// BUG(rsc,mikio): On DragonFly BSD and OpenBSD, listening on the +// "tcp" and "udp" networks does not listen for both IPv4 and IPv6 +// connections. This is due to the fact that IPv4 traffic will not be +// routed to an IPv6 socket - two separate sockets are required if +// both address families are to be supported. +// See inet6(4) for details. + +type ipStackCapabilities struct { + sync.Once // guards following + ipv4Enabled bool + ipv6Enabled bool + ipv4MappedIPv6Enabled bool +} + +var ipStackCaps ipStackCapabilities + +// supportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func supportsIPv4() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4Enabled +} + +// supportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func supportsIPv6() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv6Enabled +} + +// supportsIPv4map reports whether the platform supports mapping an +// IPv4 address inside an IPv6 address at transport layer +// protocols. See RFC 4291, RFC 4038 and RFC 3493. +func supportsIPv4map() bool { + // Some operating systems provide no support for mapping IPv4 + // addresses to IPv6, and a runtime check is unnecessary. + switch runtime.GOOS { + case "dragonfly", "openbsd": + return false + } + + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4MappedIPv6Enabled +} + +// An addrList represents a list of network endpoint addresses. +type addrList []Addr + +// isIPv4 reports whether addr contains an IPv4 address. +func isIPv4(addr Addr) bool { + switch addr := addr.(type) { + case *TCPAddr: + return addr.IP.To4() != nil + case *UDPAddr: + return addr.IP.To4() != nil + case *IPAddr: + return addr.IP.To4() != nil + } + return false +} + +// isNotIPv4 reports whether addr does not contain an IPv4 address. +func isNotIPv4(addr Addr) bool { return !isIPv4(addr) } + +// forResolve returns the most appropriate address in address for +// a call to ResolveTCPAddr, ResolveUDPAddr, or ResolveIPAddr. +// IPv4 is preferred, unless addr contains an IPv6 literal. +func (addrs addrList) forResolve(network, addr string) Addr { + var want6 bool + switch network { + case "ip": + // IPv6 literal (addr does NOT contain a port) + want6 = bytealg.CountString(addr, ':') > 0 + case "tcp", "udp": + // IPv6 literal. (addr contains a port, so look for '[') + want6 = bytealg.CountString(addr, '[') > 0 + } + if want6 { + return addrs.first(isNotIPv4) + } + return addrs.first(isIPv4) +} + +// first returns the first address which satisfies strategy, or if +// none do, then the first address of any kind. +func (addrs addrList) first(strategy func(Addr) bool) Addr { + for _, addr := range addrs { + if strategy(addr) { + return addr + } + } + return addrs[0] +} + +// partition divides an address list into two categories, using a +// strategy function to assign a boolean label to each address. +// The first address, and any with a matching label, are returned as +// primaries, while addresses with the opposite label are returned +// as fallbacks. For non-empty inputs, primaries is guaranteed to be +// non-empty. +func (addrs addrList) partition(strategy func(Addr) bool) (primaries, fallbacks addrList) { + var primaryLabel bool + for i, addr := range addrs { + label := strategy(addr) + if i == 0 || label == primaryLabel { + primaryLabel = label + primaries = append(primaries, addr) + } else { + fallbacks = append(fallbacks, addr) + } + } + return +} + +// filterAddrList applies a filter to a list of IP addresses, +// yielding a list of Addr objects. Known filters are nil, ipv4only, +// and ipv6only. It returns every address when the filter is nil. +// The result contains at least one address when error is nil. +func filterAddrList(filter func(IPAddr) bool, ips []IPAddr, inetaddr func(IPAddr) Addr, originalAddr string) (addrList, error) { + var addrs addrList + for _, ip := range ips { + if filter == nil || filter(ip) { + addrs = append(addrs, inetaddr(ip)) + } + } + if len(addrs) == 0 { + return nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: originalAddr} + } + return addrs, nil +} + +// ipv4only reports whether addr is an IPv4 address. +func ipv4only(addr IPAddr) bool { + return addr.IP.To4() != nil +} + +// ipv6only reports whether addr is an IPv6 address except IPv4-mapped IPv6 address. +func ipv6only(addr IPAddr) bool { + return len(addr.IP) == IPv6len && addr.IP.To4() == nil +} + +// SplitHostPort splits a network address of the form "host:port", +// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or +// host%zone and port. +// +// A literal IPv6 address in hostport must be enclosed in square +// brackets, as in "[::1]:80", "[::1%lo0]:80". +// +// See func Dial for a description of the hostport parameter, and host +// and port results. +func SplitHostPort(hostport string) (host, port string, err error) { + const ( + missingPort = "missing port in address" + tooManyColons = "too many colons in address" + ) + addrErr := func(addr, why string) (host, port string, err error) { + return "", "", &AddrError{Err: why, Addr: addr} + } + j, k := 0, 0 + + // The port starts after the last colon. + i := bytealg.LastIndexByteString(hostport, ':') + if i < 0 { + return addrErr(hostport, missingPort) + } + + if hostport[0] == '[' { + // Expect the first ']' just before the last ':'. + end := bytealg.IndexByteString(hostport, ']') + if end < 0 { + return addrErr(hostport, "missing ']' in address") + } + switch end + 1 { + case len(hostport): + // There can't be a ':' behind the ']' now. + return addrErr(hostport, missingPort) + case i: + // The expected result. + default: + // Either ']' isn't followed by a colon, or it is + // followed by a colon that is not the last one. + if hostport[end+1] == ':' { + return addrErr(hostport, tooManyColons) + } + return addrErr(hostport, missingPort) + } + host = hostport[1:end] + j, k = 1, end+1 // there can't be a '[' resp. ']' before these positions + } else { + host = hostport[:i] + if bytealg.IndexByteString(host, ':') >= 0 { + return addrErr(hostport, tooManyColons) + } + } + if bytealg.IndexByteString(hostport[j:], '[') >= 0 { + return addrErr(hostport, "unexpected '[' in address") + } + if bytealg.IndexByteString(hostport[k:], ']') >= 0 { + return addrErr(hostport, "unexpected ']' in address") + } + + port = hostport[i+1:] + return host, port, nil +} + +func splitHostZone(s string) (host, zone string) { + // The IPv6 scoped addressing zone identifier starts after the + // last percent sign. + if i := bytealg.LastIndexByteString(s, '%'); i > 0 { + host, zone = s[:i], s[i+1:] + } else { + host = s + } + return +} + +// JoinHostPort combines host and port into a network address of the +// form "host:port". If host contains a colon, as found in literal +// IPv6 addresses, then JoinHostPort returns "[host]:port". +// +// See func Dial for a description of the host and port parameters. +func JoinHostPort(host, port string) string { + // We assume that host is a literal IPv6 address if host has + // colons. + if bytealg.IndexByteString(host, ':') >= 0 { + return "[" + host + "]:" + port + } + return host + ":" + port +} + +// internetAddrList resolves addr, which may be a literal IP +// address or a DNS name, and returns a list of internet protocol +// family addresses. The result contains at least one address when +// error is nil. +func (r *Resolver) internetAddrList(ctx context.Context, net, addr string) (addrList, error) { + var ( + err error + host, port string + portnum int + ) + switch net { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + if addr != "" { + if host, port, err = SplitHostPort(addr); err != nil { + return nil, err + } + if portnum, err = r.LookupPort(ctx, net, port); err != nil { + return nil, err + } + } + case "ip", "ip4", "ip6": + if addr != "" { + host = addr + } + default: + return nil, UnknownNetworkError(net) + } + inetaddr := func(ip IPAddr) Addr { + switch net { + case "tcp", "tcp4", "tcp6": + return &TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone} + case "udp", "udp4", "udp6": + return &UDPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone} + case "ip", "ip4", "ip6": + return &IPAddr{IP: ip.IP, Zone: ip.Zone} + default: + panic("unexpected network: " + net) + } + } + if host == "" { + return addrList{inetaddr(IPAddr{})}, nil + } + + // Try as a literal IP address, then as a DNS name. + ips, err := r.lookupIPAddr(ctx, net, host) + if err != nil { + return nil, err + } + // Issue 18806: if the machine has halfway configured + // IPv6 such that it can bind on "::" (IPv6unspecified) + // but not connect back to that same address, fall + // back to dialing 0.0.0.0. + if len(ips) == 1 && ips[0].IP.Equal(IPv6unspecified) { + ips = append(ips, IPAddr{IP: IPv4zero}) + } + + var filter func(IPAddr) bool + if net != "" && net[len(net)-1] == '4' { + filter = ipv4only + } + if net != "" && net[len(net)-1] == '6' { + filter = ipv6only + } + return filterAddrList(filter, ips, inetaddr, host) +} + +func loopbackIP(net string) IP { + if net != "" && net[len(net)-1] == '6' { + return IPv6loopback + } + return IP{127, 0, 0, 1} +} diff --git a/platform/dbops/binaries/go/go/src/net/ipsock_plan9.go b/platform/dbops/binaries/go/go/src/net/ipsock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..c8d01804361177c4248de405ba0a3cce87682685 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ipsock_plan9.go @@ -0,0 +1,366 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "internal/bytealg" + "internal/itoa" + "io/fs" + "os" + "syscall" +) + +// probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication +// capabilities. +// +// Plan 9 uses IPv6 natively, see ip(3). +func (p *ipStackCapabilities) probe() { + p.ipv4Enabled = probe(netdir+"/iproute", "4i") + p.ipv6Enabled = probe(netdir+"/iproute", "6i") + if p.ipv4Enabled && p.ipv6Enabled { + p.ipv4MappedIPv6Enabled = true + } +} + +func probe(filename, query string) bool { + var file *file + var err error + if file, err = open(filename); err != nil { + return false + } + defer file.close() + + r := false + for line, ok := file.readLine(); ok && !r; line, ok = file.readLine() { + f := getFields(line) + if len(f) < 3 { + continue + } + for i := 0; i < len(f); i++ { + if query == f[i] { + r = true + break + } + } + } + return r +} + +// parsePlan9Addr parses address of the form [ip!]port (e.g. 127.0.0.1!80). +func parsePlan9Addr(s string) (ip IP, iport int, err error) { + addr := IPv4zero // address contains port only + i := bytealg.IndexByteString(s, '!') + if i >= 0 { + addr = ParseIP(s[:i]) + if addr == nil { + return nil, 0, &ParseError{Type: "IP address", Text: s} + } + } + p, plen, ok := dtoi(s[i+1:]) + if !ok { + return nil, 0, &ParseError{Type: "port", Text: s} + } + if p < 0 || p > 0xFFFF { + return nil, 0, &AddrError{Err: "invalid port", Addr: s[i+1 : i+1+plen]} + } + return addr, p, nil +} + +func readPlan9Addr(net, filename string) (addr Addr, err error) { + var buf [128]byte + + f, err := os.Open(filename) + if err != nil { + return + } + defer f.Close() + n, err := f.Read(buf[:]) + if err != nil { + return + } + ip, port, err := parsePlan9Addr(string(buf[:n])) + if err != nil { + return + } + switch net { + case "tcp4", "udp4": + if ip.Equal(IPv6zero) { + ip = ip[:IPv4len] + } + } + switch net { + case "tcp", "tcp4", "tcp6": + addr = &TCPAddr{IP: ip, Port: port} + case "udp", "udp4", "udp6": + addr = &UDPAddr{IP: ip, Port: port} + default: + return nil, UnknownNetworkError(net) + } + return addr, nil +} + +func startPlan9(ctx context.Context, net string, addr Addr) (ctl *os.File, dest, proto, name string, err error) { + var ( + ip IP + port int + ) + switch a := addr.(type) { + case *TCPAddr: + proto = "tcp" + ip = a.IP + port = a.Port + case *UDPAddr: + proto = "udp" + ip = a.IP + port = a.Port + default: + err = UnknownNetworkError(net) + return + } + + if port > 65535 { + err = InvalidAddrError("port should be < 65536") + return + } + + clone, dest, err := queryCS1(ctx, proto, ip, port) + if err != nil { + return + } + f, err := os.OpenFile(clone, os.O_RDWR, 0) + if err != nil { + return + } + var buf [16]byte + n, err := f.Read(buf[:]) + if err != nil { + f.Close() + return + } + return f, dest, proto, string(buf[:n]), nil +} + +func fixErr(err error) { + oe, ok := err.(*OpError) + if !ok { + return + } + nonNilInterface := func(a Addr) bool { + switch a := a.(type) { + case *TCPAddr: + return a == nil + case *UDPAddr: + return a == nil + case *IPAddr: + return a == nil + default: + return false + } + } + if nonNilInterface(oe.Source) { + oe.Source = nil + } + if nonNilInterface(oe.Addr) { + oe.Addr = nil + } + if pe, ok := oe.Err.(*fs.PathError); ok { + if _, ok = pe.Err.(syscall.ErrorString); ok { + oe.Err = pe.Err + } + } +} + +func dialPlan9(ctx context.Context, net string, laddr, raddr Addr) (fd *netFD, err error) { + defer func() { fixErr(err) }() + type res struct { + fd *netFD + err error + } + resc := make(chan res) + go func() { + fd, err := dialPlan9Blocking(ctx, net, laddr, raddr) + select { + case resc <- res{fd, err}: + case <-ctx.Done(): + if fd != nil { + fd.Close() + } + } + }() + select { + case res := <-resc: + return res.fd, res.err + case <-ctx.Done(): + return nil, mapErr(ctx.Err()) + } +} + +func dialPlan9Blocking(ctx context.Context, net string, laddr, raddr Addr) (fd *netFD, err error) { + if isWildcard(raddr) { + raddr = toLocal(raddr, net) + } + f, dest, proto, name, err := startPlan9(ctx, net, raddr) + if err != nil { + return nil, err + } + if la := plan9LocalAddr(laddr); la == "" { + err = hangupCtlWrite(ctx, proto, f, "connect "+dest) + } else { + err = hangupCtlWrite(ctx, proto, f, "connect "+dest+" "+la) + } + if err != nil { + f.Close() + return nil, err + } + data, err := os.OpenFile(netdir+"/"+proto+"/"+name+"/data", os.O_RDWR, 0) + if err != nil { + f.Close() + return nil, err + } + laddr, err = readPlan9Addr(net, netdir+"/"+proto+"/"+name+"/local") + if err != nil { + data.Close() + f.Close() + return nil, err + } + return newFD(proto, name, nil, f, data, laddr, raddr) +} + +func listenPlan9(ctx context.Context, net string, laddr Addr) (fd *netFD, err error) { + defer func() { fixErr(err) }() + f, dest, proto, name, err := startPlan9(ctx, net, laddr) + if err != nil { + return nil, err + } + _, err = f.WriteString("announce " + dest) + if err != nil { + f.Close() + return nil, &OpError{Op: "announce", Net: net, Source: laddr, Addr: nil, Err: err} + } + laddr, err = readPlan9Addr(net, netdir+"/"+proto+"/"+name+"/local") + if err != nil { + f.Close() + return nil, err + } + return newFD(proto, name, nil, f, nil, laddr, nil) +} + +func (fd *netFD) netFD() (*netFD, error) { + return newFD(fd.net, fd.n, fd.listen, fd.ctl, fd.data, fd.laddr, fd.raddr) +} + +func (fd *netFD) acceptPlan9() (nfd *netFD, err error) { + defer func() { fixErr(err) }() + if err := fd.pfd.ReadLock(); err != nil { + return nil, err + } + defer fd.pfd.ReadUnlock() + listen, err := os.Open(fd.dir + "/listen") + if err != nil { + return nil, err + } + var buf [16]byte + n, err := listen.Read(buf[:]) + if err != nil { + listen.Close() + return nil, err + } + name := string(buf[:n]) + ctl, err := os.OpenFile(netdir+"/"+fd.net+"/"+name+"/ctl", os.O_RDWR, 0) + if err != nil { + listen.Close() + return nil, err + } + data, err := os.OpenFile(netdir+"/"+fd.net+"/"+name+"/data", os.O_RDWR, 0) + if err != nil { + listen.Close() + ctl.Close() + return nil, err + } + raddr, err := readPlan9Addr(fd.net, netdir+"/"+fd.net+"/"+name+"/remote") + if err != nil { + listen.Close() + ctl.Close() + data.Close() + return nil, err + } + return newFD(fd.net, name, listen, ctl, data, fd.laddr, raddr) +} + +func isWildcard(a Addr) bool { + var wildcard bool + switch a := a.(type) { + case *TCPAddr: + wildcard = a.isWildcard() + case *UDPAddr: + wildcard = a.isWildcard() + case *IPAddr: + wildcard = a.isWildcard() + } + return wildcard +} + +func toLocal(a Addr, net string) Addr { + switch a := a.(type) { + case *TCPAddr: + a.IP = loopbackIP(net) + case *UDPAddr: + a.IP = loopbackIP(net) + case *IPAddr: + a.IP = loopbackIP(net) + } + return a +} + +// plan9LocalAddr returns a Plan 9 local address string. +// See setladdrport at https://9p.io/sources/plan9/sys/src/9/ip/devip.c. +func plan9LocalAddr(addr Addr) string { + var ip IP + port := 0 + switch a := addr.(type) { + case *TCPAddr: + if a != nil { + ip = a.IP + port = a.Port + } + case *UDPAddr: + if a != nil { + ip = a.IP + port = a.Port + } + } + if len(ip) == 0 || ip.IsUnspecified() { + if port == 0 { + return "" + } + return itoa.Itoa(port) + } + return ip.String() + "!" + itoa.Itoa(port) +} + +func hangupCtlWrite(ctx context.Context, proto string, ctl *os.File, msg string) error { + if proto != "tcp" { + _, err := ctl.WriteString(msg) + return err + } + written := make(chan struct{}) + errc := make(chan error) + go func() { + select { + case <-ctx.Done(): + ctl.WriteString("hangup") + errc <- mapErr(ctx.Err()) + case <-written: + errc <- nil + } + }() + _, err := ctl.WriteString(msg) + close(written) + if e := <-errc; err == nil && e != nil { // we hung up + return e + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/net/ipsock_plan9_test.go b/platform/dbops/binaries/go/go/src/net/ipsock_plan9_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e5fb9ff965dd8600d468f3e833e0b816e8e6f9b5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ipsock_plan9_test.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import "testing" + +func TestTCP4ListenZero(t *testing.T) { + l, err := Listen("tcp4", "0.0.0.0:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + if a := l.Addr(); isNotIPv4(a) { + t.Errorf("address does not contain IPv4: %v", a) + } +} + +func TestUDP4ListenZero(t *testing.T) { + c, err := ListenPacket("udp4", "0.0.0.0:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + if a := c.LocalAddr(); isNotIPv4(a) { + t.Errorf("address does not contain IPv4: %v", a) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/ipsock_posix.go b/platform/dbops/binaries/go/go/src/net/ipsock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..67ce1479c647cd578464ddd7c2038c3295132707 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ipsock_posix.go @@ -0,0 +1,244 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "context" + "internal/poll" + "net/netip" + "runtime" + "syscall" +) + +// probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication +// capabilities which are controlled by the IPV6_V6ONLY socket option +// and kernel configuration. +// +// Should we try to use the IPv4 socket interface if we're only +// dealing with IPv4 sockets? As long as the host system understands +// IPv4-mapped IPv6, it's okay to pass IPv4-mapped IPv6 addresses to +// the IPv6 interface. That simplifies our code and is most +// general. Unfortunately, we need to run on kernels built without +// IPv6 support too. So probe the kernel to figure it out. +func (p *ipStackCapabilities) probe() { + switch runtime.GOOS { + case "js", "wasip1": + // Both ipv4 and ipv6 are faked; see net_fake.go. + p.ipv4Enabled = true + p.ipv6Enabled = true + p.ipv4MappedIPv6Enabled = true + return + } + + s, err := sysSocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + switch err { + case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT: + case nil: + poll.CloseFunc(s) + p.ipv4Enabled = true + } + var probes = []struct { + laddr TCPAddr + value int + }{ + // IPv6 communication capability + {laddr: TCPAddr{IP: ParseIP("::1")}, value: 1}, + // IPv4-mapped IPv6 address communication capability + {laddr: TCPAddr{IP: IPv4(127, 0, 0, 1)}, value: 0}, + } + switch runtime.GOOS { + case "dragonfly", "openbsd": + // The latest DragonFly BSD and OpenBSD kernels don't + // support IPV6_V6ONLY=0. They always return an error + // and we don't need to probe the capability. + probes = probes[:1] + } + for i := range probes { + s, err := sysSocket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + if err != nil { + continue + } + defer poll.CloseFunc(s) + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value) + sa, err := probes[i].laddr.sockaddr(syscall.AF_INET6) + if err != nil { + continue + } + if err := syscall.Bind(s, sa); err != nil { + continue + } + if i == 0 { + p.ipv6Enabled = true + } else { + p.ipv4MappedIPv6Enabled = true + } + } +} + +// favoriteAddrFamily returns the appropriate address family for the +// given network, laddr, raddr and mode. +// +// If mode indicates "listen" and laddr is a wildcard, we assume that +// the user wants to make a passive-open connection with a wildcard +// address family, both AF_INET and AF_INET6, and a wildcard address +// like the following: +// +// - A listen for a wildcard communication domain, "tcp" or +// "udp", with a wildcard address: If the platform supports +// both IPv6 and IPv4-mapped IPv6 communication capabilities, +// or does not support IPv4, we use a dual stack, AF_INET6 and +// IPV6_V6ONLY=0, wildcard address listen. The dual stack +// wildcard address listen may fall back to an IPv6-only, +// AF_INET6 and IPV6_V6ONLY=1, wildcard address listen. +// Otherwise we prefer an IPv4-only, AF_INET, wildcard address +// listen. +// +// - A listen for a wildcard communication domain, "tcp" or +// "udp", with an IPv4 wildcard address: same as above. +// +// - A listen for a wildcard communication domain, "tcp" or +// "udp", with an IPv6 wildcard address: same as above. +// +// - A listen for an IPv4 communication domain, "tcp4" or "udp4", +// with an IPv4 wildcard address: We use an IPv4-only, AF_INET, +// wildcard address listen. +// +// - A listen for an IPv6 communication domain, "tcp6" or "udp6", +// with an IPv6 wildcard address: We use an IPv6-only, AF_INET6 +// and IPV6_V6ONLY=1, wildcard address listen. +// +// Otherwise guess: If the addresses are IPv4 then returns AF_INET, +// or else returns AF_INET6. It also returns a boolean value what +// designates IPV6_V6ONLY option. +// +// Note that the latest DragonFly BSD and OpenBSD kernels allow +// neither "net.inet6.ip6.v6only=1" change nor IPPROTO_IPV6 level +// IPV6_V6ONLY socket option setting. +func favoriteAddrFamily(network string, laddr, raddr sockaddr, mode string) (family int, ipv6only bool) { + switch network[len(network)-1] { + case '4': + return syscall.AF_INET, false + case '6': + return syscall.AF_INET6, true + } + + if mode == "listen" && (laddr == nil || laddr.isWildcard()) { + if supportsIPv4map() || !supportsIPv4() { + return syscall.AF_INET6, false + } + if laddr == nil { + return syscall.AF_INET, false + } + return laddr.family(), false + } + + if (laddr == nil || laddr.family() == syscall.AF_INET) && + (raddr == nil || raddr.family() == syscall.AF_INET) { + return syscall.AF_INET, false + } + return syscall.AF_INET6, false +} + +func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) (fd *netFD, err error) { + switch runtime.GOOS { + case "aix", "windows", "openbsd", "js", "wasip1": + if mode == "dial" && raddr.isWildcard() { + raddr = raddr.toLocal(net) + } + } + family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode) + return socket(ctx, net, family, sotype, proto, ipv6only, laddr, raddr, ctrlCtxFn) +} + +func ipToSockaddrInet4(ip IP, port int) (syscall.SockaddrInet4, error) { + if len(ip) == 0 { + ip = IPv4zero + } + ip4 := ip.To4() + if ip4 == nil { + return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: ip.String()} + } + sa := syscall.SockaddrInet4{Port: port} + copy(sa.Addr[:], ip4) + return sa, nil +} + +func ipToSockaddrInet6(ip IP, port int, zone string) (syscall.SockaddrInet6, error) { + // In general, an IP wildcard address, which is either + // "0.0.0.0" or "::", means the entire IP addressing + // space. For some historical reason, it is used to + // specify "any available address" on some operations + // of IP node. + // + // When the IP node supports IPv4-mapped IPv6 address, + // we allow a listener to listen to the wildcard + // address of both IP addressing spaces by specifying + // IPv6 wildcard address. + if len(ip) == 0 || ip.Equal(IPv4zero) { + ip = IPv6zero + } + // We accept any IPv6 address including IPv4-mapped + // IPv6 address. + ip6 := ip.To16() + if ip6 == nil { + return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: ip.String()} + } + sa := syscall.SockaddrInet6{Port: port, ZoneId: uint32(zoneCache.index(zone))} + copy(sa.Addr[:], ip6) + return sa, nil +} + +func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + sa, err := ipToSockaddrInet4(ip, port) + if err != nil { + return nil, err + } + return &sa, nil + case syscall.AF_INET6: + sa, err := ipToSockaddrInet6(ip, port, zone) + if err != nil { + return nil, err + } + return &sa, nil + } + return nil, &AddrError{Err: "invalid address family", Addr: ip.String()} +} + +func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) { + // ipToSockaddrInet4 has special handling here for zero length slices. + // We do not, because netip has no concept of a generic zero IP address. + addr := ap.Addr() + if !addr.Is4() { + return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()} + } + sa := syscall.SockaddrInet4{ + Addr: addr.As4(), + Port: int(ap.Port()), + } + return sa, nil +} + +func addrPortToSockaddrInet6(ap netip.AddrPort) (syscall.SockaddrInet6, error) { + // ipToSockaddrInet6 has special handling here for zero length slices. + // We do not, because netip has no concept of a generic zero IP address. + // + // addr is allowed to be an IPv4 address, because As16 will convert it + // to an IPv4-mapped IPv6 address. + // The error message is kept consistent with ipToSockaddrInet6. + addr := ap.Addr() + if !addr.IsValid() { + return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: addr.String()} + } + sa := syscall.SockaddrInet6{ + Addr: addr.As16(), + Port: int(ap.Port()), + ZoneId: uint32(zoneCache.index(addr.Zone())), + } + return sa, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/ipsock_test.go b/platform/dbops/binaries/go/go/src/net/ipsock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..aede354844748449039aa71d0541ac70a0225f1c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/ipsock_test.go @@ -0,0 +1,282 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "reflect" + "testing" +) + +var testInetaddr = func(ip IPAddr) Addr { return &TCPAddr{IP: ip.IP, Port: 5682, Zone: ip.Zone} } + +var addrListTests = []struct { + filter func(IPAddr) bool + ips []IPAddr + inetaddr func(IPAddr) Addr + first Addr + primaries addrList + fallbacks addrList + err error +}{ + { + nil, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{&TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}}, + addrList{&TCPAddr{IP: IPv6loopback, Port: 5682}}, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv6loopback}, + {IP: IPv4(127, 0, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{&TCPAddr{IP: IPv6loopback, Port: 5682}}, + addrList{&TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}}, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv4(192, 168, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{ + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + &TCPAddr{IP: IPv4(192, 168, 0, 1), Port: 5682}, + }, + nil, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv6loopback}, + {IP: ParseIP("fe80::1"), Zone: "eth0"}, + }, + testInetaddr, + &TCPAddr{IP: IPv6loopback, Port: 5682}, + addrList{ + &TCPAddr{IP: IPv6loopback, Port: 5682}, + &TCPAddr{IP: ParseIP("fe80::1"), Port: 5682, Zone: "eth0"}, + }, + nil, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv4(192, 168, 0, 1)}, + {IP: IPv6loopback}, + {IP: ParseIP("fe80::1"), Zone: "eth0"}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{ + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + &TCPAddr{IP: IPv4(192, 168, 0, 1), Port: 5682}, + }, + addrList{ + &TCPAddr{IP: IPv6loopback, Port: 5682}, + &TCPAddr{IP: ParseIP("fe80::1"), Port: 5682, Zone: "eth0"}, + }, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv6loopback}, + {IP: ParseIP("fe80::1"), Zone: "eth0"}, + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv4(192, 168, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{ + &TCPAddr{IP: IPv6loopback, Port: 5682}, + &TCPAddr{IP: ParseIP("fe80::1"), Port: 5682, Zone: "eth0"}, + }, + addrList{ + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + &TCPAddr{IP: IPv4(192, 168, 0, 1), Port: 5682}, + }, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + {IP: IPv4(192, 168, 0, 1)}, + {IP: ParseIP("fe80::1"), Zone: "eth0"}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{ + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + &TCPAddr{IP: IPv4(192, 168, 0, 1), Port: 5682}, + }, + addrList{ + &TCPAddr{IP: IPv6loopback, Port: 5682}, + &TCPAddr{IP: ParseIP("fe80::1"), Port: 5682, Zone: "eth0"}, + }, + nil, + }, + { + nil, + []IPAddr{ + {IP: IPv6loopback}, + {IP: IPv4(127, 0, 0, 1)}, + {IP: ParseIP("fe80::1"), Zone: "eth0"}, + {IP: IPv4(192, 168, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{ + &TCPAddr{IP: IPv6loopback, Port: 5682}, + &TCPAddr{IP: ParseIP("fe80::1"), Port: 5682, Zone: "eth0"}, + }, + addrList{ + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + &TCPAddr{IP: IPv4(192, 168, 0, 1), Port: 5682}, + }, + nil, + }, + + { + ipv4only, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{&TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}}, + nil, + nil, + }, + { + ipv4only, + []IPAddr{ + {IP: IPv6loopback}, + {IP: IPv4(127, 0, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}, + addrList{&TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 5682}}, + nil, + nil, + }, + + { + ipv6only, + []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + }, + testInetaddr, + &TCPAddr{IP: IPv6loopback, Port: 5682}, + addrList{&TCPAddr{IP: IPv6loopback, Port: 5682}}, + nil, + nil, + }, + { + ipv6only, + []IPAddr{ + {IP: IPv6loopback}, + {IP: IPv4(127, 0, 0, 1)}, + }, + testInetaddr, + &TCPAddr{IP: IPv6loopback, Port: 5682}, + addrList{&TCPAddr{IP: IPv6loopback, Port: 5682}}, + nil, + nil, + }, + + {nil, nil, testInetaddr, nil, nil, nil, &AddrError{errNoSuitableAddress.Error(), "ADDR"}}, + + {ipv4only, nil, testInetaddr, nil, nil, nil, &AddrError{errNoSuitableAddress.Error(), "ADDR"}}, + {ipv4only, []IPAddr{{IP: IPv6loopback}}, testInetaddr, nil, nil, nil, &AddrError{errNoSuitableAddress.Error(), "ADDR"}}, + + {ipv6only, nil, testInetaddr, nil, nil, nil, &AddrError{errNoSuitableAddress.Error(), "ADDR"}}, + {ipv6only, []IPAddr{{IP: IPv4(127, 0, 0, 1)}}, testInetaddr, nil, nil, nil, &AddrError{errNoSuitableAddress.Error(), "ADDR"}}, +} + +func TestAddrList(t *testing.T) { + if !supportsIPv4() || !supportsIPv6() { + t.Skip("both IPv4 and IPv6 are required") + } + + for i, tt := range addrListTests { + addrs, err := filterAddrList(tt.filter, tt.ips, tt.inetaddr, "ADDR") + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%v: got %v; want %v", i, err, tt.err) + } + if tt.err != nil { + if len(addrs) != 0 { + t.Errorf("#%v: got %v; want 0", i, len(addrs)) + } + continue + } + first := addrs.first(isIPv4) + if !reflect.DeepEqual(first, tt.first) { + t.Errorf("#%v: got %v; want %v", i, first, tt.first) + } + primaries, fallbacks := addrs.partition(isIPv4) + if !reflect.DeepEqual(primaries, tt.primaries) { + t.Errorf("#%v: got %v; want %v", i, primaries, tt.primaries) + } + if !reflect.DeepEqual(fallbacks, tt.fallbacks) { + t.Errorf("#%v: got %v; want %v", i, fallbacks, tt.fallbacks) + } + expectedLen := len(primaries) + len(fallbacks) + if len(addrs) != expectedLen { + t.Errorf("#%v: got %v; want %v", i, len(addrs), expectedLen) + } + } +} + +func TestAddrListPartition(t *testing.T) { + addrs := addrList{ + &IPAddr{IP: ParseIP("fe80::"), Zone: "eth0"}, + &IPAddr{IP: ParseIP("fe80::1"), Zone: "eth0"}, + &IPAddr{IP: ParseIP("fe80::2"), Zone: "eth0"}, + } + cases := []struct { + lastByte byte + primaries addrList + fallbacks addrList + }{ + {0, addrList{addrs[0]}, addrList{addrs[1], addrs[2]}}, + {1, addrList{addrs[0], addrs[2]}, addrList{addrs[1]}}, + {2, addrList{addrs[0], addrs[1]}, addrList{addrs[2]}}, + {3, addrList{addrs[0], addrs[1], addrs[2]}, nil}, + } + for i, tt := range cases { + // Inverting the function's output should not affect the outcome. + for _, invert := range []bool{false, true} { + primaries, fallbacks := addrs.partition(func(a Addr) bool { + ip := a.(*IPAddr).IP + return (ip[len(ip)-1] == tt.lastByte) != invert + }) + if !reflect.DeepEqual(primaries, tt.primaries) { + t.Errorf("#%v: got %v; want %v", i, primaries, tt.primaries) + } + if !reflect.DeepEqual(fallbacks, tt.fallbacks) { + t.Errorf("#%v: got %v; want %v", i, fallbacks, tt.fallbacks) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/listen_test.go b/platform/dbops/binaries/go/go/src/net/listen_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9100b3d9f7becdba6d556584bbe88e71f868fd4c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/listen_test.go @@ -0,0 +1,750 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package net + +import ( + "fmt" + "internal/testenv" + "os" + "runtime" + "syscall" + "testing" + "time" +) + +func (ln *TCPListener) port() string { + _, port, err := SplitHostPort(ln.Addr().String()) + if err != nil { + return "" + } + return port +} + +func (c *UDPConn) port() string { + _, port, err := SplitHostPort(c.LocalAddr().String()) + if err != nil { + return "" + } + return port +} + +var tcpListenerTests = []struct { + network string + address string +}{ + {"tcp", ""}, + {"tcp", "0.0.0.0"}, + {"tcp", "::ffff:0.0.0.0"}, + {"tcp", "::"}, + + {"tcp", "127.0.0.1"}, + {"tcp", "::ffff:127.0.0.1"}, + {"tcp", "::1"}, + + {"tcp4", ""}, + {"tcp4", "0.0.0.0"}, + {"tcp4", "::ffff:0.0.0.0"}, + + {"tcp4", "127.0.0.1"}, + {"tcp4", "::ffff:127.0.0.1"}, + + {"tcp6", ""}, + {"tcp6", "::"}, + + {"tcp6", "::1"}, +} + +// TestTCPListener tests both single and double listen to a test +// listener with same address family, same listening address and +// same port. +func TestTCPListener(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + for _, tt := range tcpListenerTests { + if !testableListenArgs(tt.network, JoinHostPort(tt.address, "0"), "") { + t.Logf("skipping %s test", tt.network+" "+tt.address) + continue + } + + ln1, err := Listen(tt.network, JoinHostPort(tt.address, "0")) + if err != nil { + t.Fatal(err) + } + if err := checkFirstListener(tt.network, ln1); err != nil { + ln1.Close() + t.Fatal(err) + } + ln2, err := Listen(tt.network, JoinHostPort(tt.address, ln1.(*TCPListener).port())) + if err == nil { + ln2.Close() + } + if err := checkSecondListener(tt.network, tt.address, err); err != nil { + ln1.Close() + t.Fatal(err) + } + ln1.Close() + } +} + +var udpListenerTests = []struct { + network string + address string +}{ + {"udp", ""}, + {"udp", "0.0.0.0"}, + {"udp", "::ffff:0.0.0.0"}, + {"udp", "::"}, + + {"udp", "127.0.0.1"}, + {"udp", "::ffff:127.0.0.1"}, + {"udp", "::1"}, + + {"udp4", ""}, + {"udp4", "0.0.0.0"}, + {"udp4", "::ffff:0.0.0.0"}, + + {"udp4", "127.0.0.1"}, + {"udp4", "::ffff:127.0.0.1"}, + + {"udp6", ""}, + {"udp6", "::"}, + + {"udp6", "::1"}, +} + +// TestUDPListener tests both single and double listen to a test +// listener with same address family, same listening address and +// same port. +func TestUDPListener(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + for _, tt := range udpListenerTests { + if !testableListenArgs(tt.network, JoinHostPort(tt.address, "0"), "") { + t.Logf("skipping %s test", tt.network+" "+tt.address) + continue + } + + c1, err := ListenPacket(tt.network, JoinHostPort(tt.address, "0")) + if err != nil { + t.Fatal(err) + } + if err := checkFirstListener(tt.network, c1); err != nil { + c1.Close() + t.Fatal(err) + } + c2, err := ListenPacket(tt.network, JoinHostPort(tt.address, c1.(*UDPConn).port())) + if err == nil { + c2.Close() + } + if err := checkSecondListener(tt.network, tt.address, err); err != nil { + c1.Close() + t.Fatal(err) + } + c1.Close() + } +} + +var dualStackTCPListenerTests = []struct { + network1, address1 string // first listener + network2, address2 string // second listener + xerr error // expected error value, nil or other +}{ + // Test cases and expected results for the attempting 2nd listen on the same port + // 1st listen 2nd listen darwin freebsd linux openbsd + // ------------------------------------------------------------------------------------ + // "tcp" "" "tcp" "" - - - - + // "tcp" "" "tcp" "0.0.0.0" - - - - + // "tcp" "0.0.0.0" "tcp" "" - - - - + // ------------------------------------------------------------------------------------ + // "tcp" "" "tcp" "[::]" - - - ok + // "tcp" "[::]" "tcp" "" - - - ok + // "tcp" "0.0.0.0" "tcp" "[::]" - - - ok + // "tcp" "[::]" "tcp" "0.0.0.0" - - - ok + // "tcp" "[::ffff:0.0.0.0]" "tcp" "[::]" - - - ok + // "tcp" "[::]" "tcp" "[::ffff:0.0.0.0]" - - - ok + // ------------------------------------------------------------------------------------ + // "tcp4" "" "tcp6" "" ok ok ok ok + // "tcp6" "" "tcp4" "" ok ok ok ok + // "tcp4" "0.0.0.0" "tcp6" "[::]" ok ok ok ok + // "tcp6" "[::]" "tcp4" "0.0.0.0" ok ok ok ok + // ------------------------------------------------------------------------------------ + // "tcp" "127.0.0.1" "tcp" "[::1]" ok ok ok ok + // "tcp" "[::1]" "tcp" "127.0.0.1" ok ok ok ok + // "tcp4" "127.0.0.1" "tcp6" "[::1]" ok ok ok ok + // "tcp6" "[::1]" "tcp4" "127.0.0.1" ok ok ok ok + // + // Platform default configurations: + // darwin, kernel version 11.3.0 + // net.inet6.ip6.v6only=0 (overridable by sysctl or IPV6_V6ONLY option) + // freebsd, kernel version 8.2 + // net.inet6.ip6.v6only=1 (overridable by sysctl or IPV6_V6ONLY option) + // linux, kernel version 3.0.0 + // net.ipv6.bindv6only=0 (overridable by sysctl or IPV6_V6ONLY option) + // openbsd, kernel version 5.0 + // net.inet6.ip6.v6only=1 (overriding is prohibited) + + {"tcp", "", "tcp", "", syscall.EADDRINUSE}, + {"tcp", "", "tcp", "0.0.0.0", syscall.EADDRINUSE}, + {"tcp", "0.0.0.0", "tcp", "", syscall.EADDRINUSE}, + + {"tcp", "", "tcp", "::", syscall.EADDRINUSE}, + {"tcp", "::", "tcp", "", syscall.EADDRINUSE}, + {"tcp", "0.0.0.0", "tcp", "::", syscall.EADDRINUSE}, + {"tcp", "::", "tcp", "0.0.0.0", syscall.EADDRINUSE}, + {"tcp", "::ffff:0.0.0.0", "tcp", "::", syscall.EADDRINUSE}, + {"tcp", "::", "tcp", "::ffff:0.0.0.0", syscall.EADDRINUSE}, + + {"tcp4", "", "tcp6", "", nil}, + {"tcp6", "", "tcp4", "", nil}, + {"tcp4", "0.0.0.0", "tcp6", "::", nil}, + {"tcp6", "::", "tcp4", "0.0.0.0", nil}, + + {"tcp", "127.0.0.1", "tcp", "::1", nil}, + {"tcp", "::1", "tcp", "127.0.0.1", nil}, + {"tcp4", "127.0.0.1", "tcp6", "::1", nil}, + {"tcp6", "::1", "tcp4", "127.0.0.1", nil}, +} + +// TestDualStackTCPListener tests both single and double listen +// to a test listener with various address families, different +// listening address and same port. +// +// On DragonFly BSD, we expect the kernel version of node under test +// to be greater than or equal to 4.4. +func TestDualStackTCPListener(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv4() || !supportsIPv6() { + t.Skip("both IPv4 and IPv6 are required") + } + + for _, tt := range dualStackTCPListenerTests { + if !testableListenArgs(tt.network1, JoinHostPort(tt.address1, "0"), "") { + t.Logf("skipping %s test", tt.network1+" "+tt.address1) + continue + } + + if !supportsIPv4map() && differentWildcardAddr(tt.address1, tt.address2) { + tt.xerr = nil + } + var firstErr, secondErr error + for i := 0; i < 5; i++ { + lns, err := newDualStackListener() + if err != nil { + t.Fatal(err) + } + port := lns[0].port() + for _, ln := range lns { + ln.Close() + } + var ln1 Listener + ln1, firstErr = Listen(tt.network1, JoinHostPort(tt.address1, port)) + if firstErr != nil { + continue + } + if err := checkFirstListener(tt.network1, ln1); err != nil { + ln1.Close() + t.Fatal(err) + } + ln2, err := Listen(tt.network2, JoinHostPort(tt.address2, ln1.(*TCPListener).port())) + if err == nil { + ln2.Close() + } + if secondErr = checkDualStackSecondListener(tt.network2, tt.address2, err, tt.xerr); secondErr != nil { + ln1.Close() + continue + } + ln1.Close() + break + } + if firstErr != nil { + t.Error(firstErr) + } + if secondErr != nil { + t.Error(secondErr) + } + } +} + +var dualStackUDPListenerTests = []struct { + network1, address1 string // first listener + network2, address2 string // second listener + xerr error // expected error value, nil or other +}{ + {"udp", "", "udp", "", syscall.EADDRINUSE}, + {"udp", "", "udp", "0.0.0.0", syscall.EADDRINUSE}, + {"udp", "0.0.0.0", "udp", "", syscall.EADDRINUSE}, + + {"udp", "", "udp", "::", syscall.EADDRINUSE}, + {"udp", "::", "udp", "", syscall.EADDRINUSE}, + {"udp", "0.0.0.0", "udp", "::", syscall.EADDRINUSE}, + {"udp", "::", "udp", "0.0.0.0", syscall.EADDRINUSE}, + {"udp", "::ffff:0.0.0.0", "udp", "::", syscall.EADDRINUSE}, + {"udp", "::", "udp", "::ffff:0.0.0.0", syscall.EADDRINUSE}, + + {"udp4", "", "udp6", "", nil}, + {"udp6", "", "udp4", "", nil}, + {"udp4", "0.0.0.0", "udp6", "::", nil}, + {"udp6", "::", "udp4", "0.0.0.0", nil}, + + {"udp", "127.0.0.1", "udp", "::1", nil}, + {"udp", "::1", "udp", "127.0.0.1", nil}, + {"udp4", "127.0.0.1", "udp6", "::1", nil}, + {"udp6", "::1", "udp4", "127.0.0.1", nil}, +} + +// TestDualStackUDPListener tests both single and double listen +// to a test listener with various address families, different +// listening address and same port. +// +// On DragonFly BSD, we expect the kernel version of node under test +// to be greater than or equal to 4.4. +func TestDualStackUDPListener(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv4() || !supportsIPv6() { + t.Skip("both IPv4 and IPv6 are required") + } + + for _, tt := range dualStackUDPListenerTests { + if !testableListenArgs(tt.network1, JoinHostPort(tt.address1, "0"), "") { + t.Logf("skipping %s test", tt.network1+" "+tt.address1) + continue + } + + if !supportsIPv4map() && differentWildcardAddr(tt.address1, tt.address2) { + tt.xerr = nil + } + var firstErr, secondErr error + for i := 0; i < 5; i++ { + cs, err := newDualStackPacketListener() + if err != nil { + t.Fatal(err) + } + port := cs[0].port() + for _, c := range cs { + c.Close() + } + var c1 PacketConn + c1, firstErr = ListenPacket(tt.network1, JoinHostPort(tt.address1, port)) + if firstErr != nil { + continue + } + if err := checkFirstListener(tt.network1, c1); err != nil { + c1.Close() + t.Fatal(err) + } + c2, err := ListenPacket(tt.network2, JoinHostPort(tt.address2, c1.(*UDPConn).port())) + if err == nil { + c2.Close() + } + if secondErr = checkDualStackSecondListener(tt.network2, tt.address2, err, tt.xerr); secondErr != nil { + c1.Close() + continue + } + c1.Close() + break + } + if firstErr != nil { + t.Error(firstErr) + } + if secondErr != nil { + t.Error(secondErr) + } + } +} + +func differentWildcardAddr(i, j string) bool { + if (i == "" || i == "0.0.0.0" || i == "::ffff:0.0.0.0") && (j == "" || j == "0.0.0.0" || j == "::ffff:0.0.0.0") { + return false + } + if i == "[::]" && j == "[::]" { + return false + } + return true +} + +func checkFirstListener(network string, ln any) error { + switch network { + case "tcp": + fd := ln.(*TCPListener).fd + if err := checkDualStackAddrFamily(fd); err != nil { + return err + } + case "tcp4": + fd := ln.(*TCPListener).fd + if fd.family != syscall.AF_INET { + return fmt.Errorf("%v got %v; want %v", fd.laddr, fd.family, syscall.AF_INET) + } + case "tcp6": + fd := ln.(*TCPListener).fd + if fd.family != syscall.AF_INET6 { + return fmt.Errorf("%v got %v; want %v", fd.laddr, fd.family, syscall.AF_INET6) + } + case "udp": + fd := ln.(*UDPConn).fd + if err := checkDualStackAddrFamily(fd); err != nil { + return err + } + case "udp4": + fd := ln.(*UDPConn).fd + if fd.family != syscall.AF_INET { + return fmt.Errorf("%v got %v; want %v", fd.laddr, fd.family, syscall.AF_INET) + } + case "udp6": + fd := ln.(*UDPConn).fd + if fd.family != syscall.AF_INET6 { + return fmt.Errorf("%v got %v; want %v", fd.laddr, fd.family, syscall.AF_INET6) + } + default: + return UnknownNetworkError(network) + } + return nil +} + +func checkSecondListener(network, address string, err error) error { + switch network { + case "tcp", "tcp4", "tcp6": + if err == nil { + return fmt.Errorf("%s should fail", network+" "+address) + } + case "udp", "udp4", "udp6": + if err == nil { + return fmt.Errorf("%s should fail", network+" "+address) + } + default: + return UnknownNetworkError(network) + } + return nil +} + +func checkDualStackSecondListener(network, address string, err, xerr error) error { + switch network { + case "tcp", "tcp4", "tcp6": + if xerr == nil && err != nil || xerr != nil && err == nil { + return fmt.Errorf("%s got %v; want %v", network+" "+address, err, xerr) + } + case "udp", "udp4", "udp6": + if xerr == nil && err != nil || xerr != nil && err == nil { + return fmt.Errorf("%s got %v; want %v", network+" "+address, err, xerr) + } + default: + return UnknownNetworkError(network) + } + return nil +} + +func checkDualStackAddrFamily(fd *netFD) error { + switch a := fd.laddr.(type) { + case *TCPAddr: + // If a node under test supports both IPv6 capability + // and IPv6 IPv4-mapping capability, we can assume + // that the node listens on a wildcard address with an + // AF_INET6 socket. + if supportsIPv4map() && fd.laddr.(*TCPAddr).isWildcard() { + if fd.family != syscall.AF_INET6 { + return fmt.Errorf("Listen(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, syscall.AF_INET6) + } + } else { + if fd.family != a.family() { + return fmt.Errorf("Listen(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, a.family()) + } + } + case *UDPAddr: + // If a node under test supports both IPv6 capability + // and IPv6 IPv4-mapping capability, we can assume + // that the node listens on a wildcard address with an + // AF_INET6 socket. + if supportsIPv4map() && fd.laddr.(*UDPAddr).isWildcard() { + if fd.family != syscall.AF_INET6 { + return fmt.Errorf("ListenPacket(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, syscall.AF_INET6) + } + } else { + if fd.family != a.family() { + return fmt.Errorf("ListenPacket(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, a.family()) + } + } + default: + return fmt.Errorf("unexpected protocol address type: %T", a) + } + return nil +} + +func TestWildWildcardListener(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + defer func() { + if p := recover(); p != nil { + t.Fatalf("panicked: %v", p) + } + }() + + if ln, err := Listen("tcp", ""); err == nil { + ln.Close() + } + if ln, err := ListenPacket("udp", ""); err == nil { + ln.Close() + } + if ln, err := ListenTCP("tcp", nil); err == nil { + ln.Close() + } + if ln, err := ListenUDP("udp", nil); err == nil { + ln.Close() + } + if ln, err := ListenIP("ip:icmp", nil); err == nil { + ln.Close() + } +} + +var ipv4MulticastListenerTests = []struct { + net string + gaddr *UDPAddr // see RFC 4727 +}{ + {"udp", &UDPAddr{IP: IPv4(224, 0, 0, 254), Port: 12345}}, + + {"udp4", &UDPAddr{IP: IPv4(224, 0, 0, 254), Port: 12345}}, +} + +// TestIPv4MulticastListener tests both single and double listen to a +// test listener with same address family, same group address and same +// port. +func TestIPv4MulticastListener(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + switch runtime.GOOS { + case "android", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv4() { + t.Skip("IPv4 is not supported") + } + + closer := func(cs []*UDPConn) { + for _, c := range cs { + if c != nil { + c.Close() + } + } + } + + for _, ifi := range []*Interface{loopbackInterface(), nil} { + // Note that multicast interface assignment by system + // is not recommended because it usually relies on + // routing stuff for finding out an appropriate + // nexthop containing both network and link layer + // adjacencies. + if ifi == nil || !*testIPv4 { + continue + } + for _, tt := range ipv4MulticastListenerTests { + var err error + cs := make([]*UDPConn, 2) + if cs[0], err = ListenMulticastUDP(tt.net, ifi, tt.gaddr); err != nil { + t.Fatal(err) + } + if err := checkMulticastListener(cs[0], tt.gaddr.IP); err != nil { + closer(cs) + t.Fatal(err) + } + if cs[1], err = ListenMulticastUDP(tt.net, ifi, tt.gaddr); err != nil { + closer(cs) + t.Fatal(err) + } + if err := checkMulticastListener(cs[1], tt.gaddr.IP); err != nil { + closer(cs) + t.Fatal(err) + } + closer(cs) + } + } +} + +var ipv6MulticastListenerTests = []struct { + net string + gaddr *UDPAddr // see RFC 4727 +}{ + {"udp", &UDPAddr{IP: ParseIP("ff01::114"), Port: 12345}}, + {"udp", &UDPAddr{IP: ParseIP("ff02::114"), Port: 12345}}, + {"udp", &UDPAddr{IP: ParseIP("ff04::114"), Port: 12345}}, + {"udp", &UDPAddr{IP: ParseIP("ff05::114"), Port: 12345}}, + {"udp", &UDPAddr{IP: ParseIP("ff08::114"), Port: 12345}}, + {"udp", &UDPAddr{IP: ParseIP("ff0e::114"), Port: 12345}}, + + {"udp6", &UDPAddr{IP: ParseIP("ff01::114"), Port: 12345}}, + {"udp6", &UDPAddr{IP: ParseIP("ff02::114"), Port: 12345}}, + {"udp6", &UDPAddr{IP: ParseIP("ff04::114"), Port: 12345}}, + {"udp6", &UDPAddr{IP: ParseIP("ff05::114"), Port: 12345}}, + {"udp6", &UDPAddr{IP: ParseIP("ff08::114"), Port: 12345}}, + {"udp6", &UDPAddr{IP: ParseIP("ff0e::114"), Port: 12345}}, +} + +// TestIPv6MulticastListener tests both single and double listen to a +// test listener with same address family, same group address and same +// port. +func TestIPv6MulticastListener(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6() { + t.Skip("IPv6 is not supported") + } + if os.Getuid() != 0 { + t.Skip("must be root") + } + + closer := func(cs []*UDPConn) { + for _, c := range cs { + if c != nil { + c.Close() + } + } + } + + for _, ifi := range []*Interface{loopbackInterface(), nil} { + // Note that multicast interface assignment by system + // is not recommended because it usually relies on + // routing stuff for finding out an appropriate + // nexthop containing both network and link layer + // adjacencies. + if ifi == nil && !*testIPv6 { + continue + } + for _, tt := range ipv6MulticastListenerTests { + var err error + cs := make([]*UDPConn, 2) + if cs[0], err = ListenMulticastUDP(tt.net, ifi, tt.gaddr); err != nil { + t.Fatal(err) + } + if err := checkMulticastListener(cs[0], tt.gaddr.IP); err != nil { + closer(cs) + t.Fatal(err) + } + if cs[1], err = ListenMulticastUDP(tt.net, ifi, tt.gaddr); err != nil { + closer(cs) + t.Fatal(err) + } + if err := checkMulticastListener(cs[1], tt.gaddr.IP); err != nil { + closer(cs) + t.Fatal(err) + } + closer(cs) + } + } +} + +func checkMulticastListener(c *UDPConn, ip IP) error { + if ok, err := multicastRIBContains(ip); err != nil { + return err + } else if !ok { + return fmt.Errorf("%s not found in multicast rib", ip.String()) + } + la := c.LocalAddr() + if la, ok := la.(*UDPAddr); !ok || la.Port == 0 { + return fmt.Errorf("got %v; want a proper address with non-zero port number", la) + } + return nil +} + +func multicastRIBContains(ip IP) (bool, error) { + switch runtime.GOOS { + case "aix", "dragonfly", "netbsd", "openbsd", "plan9", "solaris", "illumos", "windows": + return true, nil // not implemented yet + case "linux": + if runtime.GOARCH == "arm" || runtime.GOARCH == "alpha" { + return true, nil // not implemented yet + } + } + ift, err := Interfaces() + if err != nil { + return false, err + } + for _, ifi := range ift { + ifmat, err := ifi.MulticastAddrs() + if err != nil { + return false, err + } + for _, ifma := range ifmat { + if ifma.(*IPAddr).IP.Equal(ip) { + return true, nil + } + } + } + return false, nil +} + +// Issue 21856. +func TestClosingListener(t *testing.T) { + ln := newLocalListener(t, "tcp") + addr := ln.Addr() + + go func() { + for { + c, err := ln.Accept() + if err != nil { + return + } + c.Close() + } + }() + + // Let the goroutine start. We don't sleep long: if the + // goroutine doesn't start, the test will pass without really + // testing anything, which is OK. + time.Sleep(time.Millisecond) + + ln.Close() + + ln2, err := Listen("tcp", addr.String()) + if err != nil { + t.Fatal(err) + } + ln2.Close() +} + +func TestListenConfigControl(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + t.Run("StreamListen", func(t *testing.T) { + for _, network := range []string{"tcp", "tcp4", "tcp6", "unix", "unixpacket"} { + if !testableNetwork(network) { + continue + } + ln := newLocalListener(t, network, &ListenConfig{Control: controlOnConnSetup}) + ln.Close() + } + }) + t.Run("PacketListen", func(t *testing.T) { + for _, network := range []string{"udp", "udp4", "udp6", "unixgram"} { + if !testableNetwork(network) { + continue + } + c := newLocalPacketListener(t, network, &ListenConfig{Control: controlOnConnSetup}) + c.Close() + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup.go b/platform/dbops/binaries/go/go/src/net/lookup.go new file mode 100644 index 0000000000000000000000000000000000000000..3ec2660786094ef3360b108e8a510fd194b95c06 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup.go @@ -0,0 +1,920 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "internal/nettrace" + "internal/singleflight" + "net/netip" + "sync" + + "golang.org/x/net/dns/dnsmessage" +) + +// protocols contains minimal mappings between internet protocol +// names and numbers for platforms that don't have a complete list of +// protocol numbers. +// +// See https://www.iana.org/assignments/protocol-numbers +// +// On Unix, this map is augmented by readProtocols via lookupProtocol. +var protocols = map[string]int{ + "icmp": 1, + "igmp": 2, + "tcp": 6, + "udp": 17, + "ipv6-icmp": 58, +} + +// services contains minimal mappings between services names and port +// numbers for platforms that don't have a complete list of port numbers. +// +// See https://www.iana.org/assignments/service-names-port-numbers +// +// On Unix, this map is augmented by readServices via goLookupPort. +var services = map[string]map[string]int{ + "udp": { + "domain": 53, + }, + "tcp": { + "ftp": 21, + "ftps": 990, + "gopher": 70, // ʕ◔ϖ◔ʔ + "http": 80, + "https": 443, + "imap2": 143, + "imap3": 220, + "imaps": 993, + "pop3": 110, + "pop3s": 995, + "smtp": 25, + "submissions": 465, + "ssh": 22, + "telnet": 23, + }, +} + +// dnsWaitGroup can be used by tests to wait for all DNS goroutines to +// complete. This avoids races on the test hooks. +var dnsWaitGroup sync.WaitGroup + +const maxProtoLength = len("RSVP-E2E-IGNORE") + 10 // with room to grow + +func lookupProtocolMap(name string) (int, error) { + var lowerProtocol [maxProtoLength]byte + n := copy(lowerProtocol[:], name) + lowerASCIIBytes(lowerProtocol[:n]) + proto, found := protocols[string(lowerProtocol[:n])] + if !found || n != len(name) { + return 0, &AddrError{Err: "unknown IP protocol specified", Addr: name} + } + return proto, nil +} + +// maxPortBufSize is the longest reasonable name of a service +// (non-numeric port). +// Currently the longest known IANA-unregistered name is +// "mobility-header", so we use that length, plus some slop in case +// something longer is added in the future. +const maxPortBufSize = len("mobility-header") + 10 + +func lookupPortMap(network, service string) (port int, error error) { + switch network { + case "ip": // no hints + if p, err := lookupPortMapWithNetwork("tcp", "ip", service); err == nil { + return p, nil + } + return lookupPortMapWithNetwork("udp", "ip", service) + case "tcp", "tcp4", "tcp6": + return lookupPortMapWithNetwork("tcp", "tcp", service) + case "udp", "udp4", "udp6": + return lookupPortMapWithNetwork("udp", "udp", service) + } + return 0, &DNSError{Err: "unknown network", Name: network + "/" + service} +} + +func lookupPortMapWithNetwork(network, errNetwork, service string) (port int, error error) { + if m, ok := services[network]; ok { + var lowerService [maxPortBufSize]byte + n := copy(lowerService[:], service) + lowerASCIIBytes(lowerService[:n]) + if port, ok := m[string(lowerService[:n])]; ok && n == len(service) { + return port, nil + } + return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true} + } + return 0, &DNSError{Err: "unknown network", Name: errNetwork + "/" + service} +} + +// ipVersion returns the provided network's IP version: '4', '6' or 0 +// if network does not end in a '4' or '6' byte. +func ipVersion(network string) byte { + if network == "" { + return 0 + } + n := network[len(network)-1] + if n != '4' && n != '6' { + n = 0 + } + return n +} + +// DefaultResolver is the resolver used by the package-level Lookup +// functions and by Dialers without a specified Resolver. +var DefaultResolver = &Resolver{} + +// A Resolver looks up names and numbers. +// +// A nil *Resolver is equivalent to a zero Resolver. +type Resolver struct { + // PreferGo controls whether Go's built-in DNS resolver is preferred + // on platforms where it's available. It is equivalent to setting + // GODEBUG=netdns=go, but scoped to just this resolver. + PreferGo bool + + // StrictErrors controls the behavior of temporary errors + // (including timeout, socket errors, and SERVFAIL) when using + // Go's built-in resolver. For a query composed of multiple + // sub-queries (such as an A+AAAA address lookup, or walking the + // DNS search list), this option causes such errors to abort the + // whole query instead of returning a partial result. This is + // not enabled by default because it may affect compatibility + // with resolvers that process AAAA queries incorrectly. + StrictErrors bool + + // Dial optionally specifies an alternate dialer for use by + // Go's built-in DNS resolver to make TCP and UDP connections + // to DNS services. The host in the address parameter will + // always be a literal IP address and not a host name, and the + // port in the address parameter will be a literal port number + // and not a service name. + // If the Conn returned is also a PacketConn, sent and received DNS + // messages must adhere to RFC 1035 section 4.2.1, "UDP usage". + // Otherwise, DNS messages transmitted over Conn must adhere + // to RFC 7766 section 5, "Transport Protocol Selection". + // If nil, the default dialer is used. + Dial func(ctx context.Context, network, address string) (Conn, error) + + // lookupGroup merges LookupIPAddr calls together for lookups for the same + // host. The lookupGroup key is the LookupIPAddr.host argument. + // The return values are ([]IPAddr, error). + lookupGroup singleflight.Group + + // TODO(bradfitz): optional interface impl override hook + // TODO(bradfitz): Timeout time.Duration? +} + +func (r *Resolver) preferGo() bool { return r != nil && r.PreferGo } +func (r *Resolver) strictErrors() bool { return r != nil && r.StrictErrors } + +func (r *Resolver) getLookupGroup() *singleflight.Group { + if r == nil { + return &DefaultResolver.lookupGroup + } + return &r.lookupGroup +} + +// LookupHost looks up the given host using the local resolver. +// It returns a slice of that host's addresses. +// +// LookupHost uses [context.Background] internally; to specify the context, use +// [Resolver.LookupHost]. +func LookupHost(host string) (addrs []string, err error) { + return DefaultResolver.LookupHost(context.Background(), host) +} + +// LookupHost looks up the given host using the local resolver. +// It returns a slice of that host's addresses. +func (r *Resolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) { + // Make sure that no matter what we do later, host=="" is rejected. + if host == "" { + return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true} + } + if _, err := netip.ParseAddr(host); err == nil { + return []string{host}, nil + } + return r.lookupHost(ctx, host) +} + +// LookupIP looks up host using the local resolver. +// It returns a slice of that host's IPv4 and IPv6 addresses. +func LookupIP(host string) ([]IP, error) { + addrs, err := DefaultResolver.LookupIPAddr(context.Background(), host) + if err != nil { + return nil, err + } + ips := make([]IP, len(addrs)) + for i, ia := range addrs { + ips[i] = ia.IP + } + return ips, nil +} + +// LookupIPAddr looks up host using the local resolver. +// It returns a slice of that host's IPv4 and IPv6 addresses. +func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, error) { + return r.lookupIPAddr(ctx, "ip", host) +} + +// LookupIP looks up host for the given network using the local resolver. +// It returns a slice of that host's IP addresses of the type specified by +// network. +// network must be one of "ip", "ip4" or "ip6". +func (r *Resolver) LookupIP(ctx context.Context, network, host string) ([]IP, error) { + afnet, _, err := parseNetwork(ctx, network, false) + if err != nil { + return nil, err + } + switch afnet { + case "ip", "ip4", "ip6": + default: + return nil, UnknownNetworkError(network) + } + + if host == "" { + return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true} + } + addrs, err := r.internetAddrList(ctx, afnet, host) + if err != nil { + return nil, err + } + + ips := make([]IP, 0, len(addrs)) + for _, addr := range addrs { + ips = append(ips, addr.(*IPAddr).IP) + } + return ips, nil +} + +// LookupNetIP looks up host using the local resolver. +// It returns a slice of that host's IP addresses of the type specified by +// network. +// The network must be one of "ip", "ip4" or "ip6". +func (r *Resolver) LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) { + // TODO(bradfitz): make this efficient, making the internal net package + // type throughout be netip.Addr and only converting to the net.IP slice + // version at the edge. But for now (2021-10-20), this is a wrapper around + // the old way. + ips, err := r.LookupIP(ctx, network, host) + if err != nil { + return nil, err + } + ret := make([]netip.Addr, 0, len(ips)) + for _, ip := range ips { + if a, ok := netip.AddrFromSlice(ip); ok { + ret = append(ret, a) + } + } + return ret, nil +} + +// onlyValuesCtx is a context that uses an underlying context +// for value lookup if the underlying context hasn't yet expired. +type onlyValuesCtx struct { + context.Context + lookupValues context.Context +} + +var _ context.Context = (*onlyValuesCtx)(nil) + +// Value performs a lookup if the original context hasn't expired. +func (ovc *onlyValuesCtx) Value(key any) any { + select { + case <-ovc.lookupValues.Done(): + return nil + default: + return ovc.lookupValues.Value(key) + } +} + +// withUnexpiredValuesPreserved returns a context.Context that only uses lookupCtx +// for its values, otherwise it is never canceled and has no deadline. +// If the lookup context expires, any looked up values will return nil. +// See Issue 28600. +func withUnexpiredValuesPreserved(lookupCtx context.Context) context.Context { + return &onlyValuesCtx{Context: context.Background(), lookupValues: lookupCtx} +} + +// lookupIPAddr looks up host using the local resolver and particular network. +// It returns a slice of that host's IPv4 and IPv6 addresses. +func (r *Resolver) lookupIPAddr(ctx context.Context, network, host string) ([]IPAddr, error) { + // Make sure that no matter what we do later, host=="" is rejected. + if host == "" { + return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true} + } + if ip, err := netip.ParseAddr(host); err == nil { + return []IPAddr{{IP: IP(ip.AsSlice()).To16(), Zone: ip.Zone()}}, nil + } + trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace) + if trace != nil && trace.DNSStart != nil { + trace.DNSStart(host) + } + // The underlying resolver func is lookupIP by default but it + // can be overridden by tests. This is needed by net/http, so it + // uses a context key instead of unexported variables. + resolverFunc := r.lookupIP + if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string, string) ([]IPAddr, error)); alt != nil { + resolverFunc = alt + } + + // We don't want a cancellation of ctx to affect the + // lookupGroup operation. Otherwise if our context gets + // canceled it might cause an error to be returned to a lookup + // using a completely different context. However we need to preserve + // only the values in context. See Issue 28600. + lookupGroupCtx, lookupGroupCancel := context.WithCancel(withUnexpiredValuesPreserved(ctx)) + + lookupKey := network + "\000" + host + dnsWaitGroup.Add(1) + ch := r.getLookupGroup().DoChan(lookupKey, func() (any, error) { + return testHookLookupIP(lookupGroupCtx, resolverFunc, network, host) + }) + + dnsWaitGroupDone := func(ch <-chan singleflight.Result, cancelFn context.CancelFunc) { + <-ch + dnsWaitGroup.Done() + cancelFn() + } + select { + case <-ctx.Done(): + // Our context was canceled. If we are the only + // goroutine looking up this key, then drop the key + // from the lookupGroup and cancel the lookup. + // If there are other goroutines looking up this key, + // let the lookup continue uncanceled, and let later + // lookups with the same key share the result. + // See issues 8602, 20703, 22724. + if r.getLookupGroup().ForgetUnshared(lookupKey) { + lookupGroupCancel() + go dnsWaitGroupDone(ch, func() {}) + } else { + go dnsWaitGroupDone(ch, lookupGroupCancel) + } + ctxErr := ctx.Err() + err := &DNSError{ + Err: mapErr(ctxErr).Error(), + Name: host, + IsTimeout: ctxErr == context.DeadlineExceeded, + } + if trace != nil && trace.DNSDone != nil { + trace.DNSDone(nil, false, err) + } + return nil, err + case r := <-ch: + dnsWaitGroup.Done() + lookupGroupCancel() + err := r.Err + if err != nil { + if _, ok := err.(*DNSError); !ok { + isTimeout := false + if err == context.DeadlineExceeded { + isTimeout = true + } else if terr, ok := err.(timeout); ok { + isTimeout = terr.Timeout() + } + err = &DNSError{ + Err: err.Error(), + Name: host, + IsTimeout: isTimeout, + } + } + } + if trace != nil && trace.DNSDone != nil { + addrs, _ := r.Val.([]IPAddr) + trace.DNSDone(ipAddrsEface(addrs), r.Shared, err) + } + return lookupIPReturn(r.Val, err, r.Shared) + } +} + +// lookupIPReturn turns the return values from singleflight.Do into +// the return values from LookupIP. +func lookupIPReturn(addrsi any, err error, shared bool) ([]IPAddr, error) { + if err != nil { + return nil, err + } + addrs := addrsi.([]IPAddr) + if shared { + clone := make([]IPAddr, len(addrs)) + copy(clone, addrs) + addrs = clone + } + return addrs, nil +} + +// ipAddrsEface returns an empty interface slice of addrs. +func ipAddrsEface(addrs []IPAddr) []any { + s := make([]any, len(addrs)) + for i, v := range addrs { + s[i] = v + } + return s +} + +// LookupPort looks up the port for the given network and service. +// +// LookupPort uses [context.Background] internally; to specify the context, use +// [Resolver.LookupPort]. +func LookupPort(network, service string) (port int, err error) { + return DefaultResolver.LookupPort(context.Background(), network, service) +} + +// LookupPort looks up the port for the given network and service. +// +// The network must be one of "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6" or "ip". +func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) { + port, needsLookup := parsePort(service) + if needsLookup { + switch network { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "ip": + case "": // a hint wildcard for Go 1.0 undocumented behavior + network = "ip" + default: + return 0, &AddrError{Err: "unknown network", Addr: network} + } + port, err = r.lookupPort(ctx, network, service) + if err != nil { + return 0, err + } + } + if 0 > port || port > 65535 { + return 0, &AddrError{Err: "invalid port", Addr: service} + } + return port, nil +} + +// LookupCNAME returns the canonical name for the given host. +// Callers that do not care about the canonical name can call +// [LookupHost] or [LookupIP] directly; both take care of resolving +// the canonical name as part of the lookup. +// +// A canonical name is the final name after following zero +// or more CNAME records. +// LookupCNAME does not return an error if host does not +// contain DNS "CNAME" records, as long as host resolves to +// address records. +// +// The returned canonical name is validated to be a properly +// formatted presentation-format domain name. +// +// LookupCNAME uses [context.Background] internally; to specify the context, use +// [Resolver.LookupCNAME]. +func LookupCNAME(host string) (cname string, err error) { + return DefaultResolver.LookupCNAME(context.Background(), host) +} + +// LookupCNAME returns the canonical name for the given host. +// Callers that do not care about the canonical name can call +// [LookupHost] or [LookupIP] directly; both take care of resolving +// the canonical name as part of the lookup. +// +// A canonical name is the final name after following zero +// or more CNAME records. +// LookupCNAME does not return an error if host does not +// contain DNS "CNAME" records, as long as host resolves to +// address records. +// +// The returned canonical name is validated to be a properly +// formatted presentation-format domain name. +func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) { + cname, err := r.lookupCNAME(ctx, host) + if err != nil { + return "", err + } + if !isDomainName(cname) { + return "", &DNSError{Err: errMalformedDNSRecordsDetail, Name: host} + } + return cname, nil +} + +// LookupSRV tries to resolve an [SRV] query of the given service, +// protocol, and domain name. The proto is "tcp" or "udp". +// The returned records are sorted by priority and randomized +// by weight within a priority. +// +// LookupSRV constructs the DNS name to look up following RFC 2782. +// That is, it looks up _service._proto.name. To accommodate services +// publishing SRV records under non-standard names, if both service +// and proto are empty strings, LookupSRV looks up name directly. +// +// The returned service names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) { + return DefaultResolver.LookupSRV(context.Background(), service, proto, name) +} + +// LookupSRV tries to resolve an [SRV] query of the given service, +// protocol, and domain name. The proto is "tcp" or "udp". +// The returned records are sorted by priority and randomized +// by weight within a priority. +// +// LookupSRV constructs the DNS name to look up following RFC 2782. +// That is, it looks up _service._proto.name. To accommodate services +// publishing SRV records under non-standard names, if both service +// and proto are empty strings, LookupSRV looks up name directly. +// +// The returned service names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) { + cname, addrs, err := r.lookupSRV(ctx, service, proto, name) + if err != nil { + return "", nil, err + } + if cname != "" && !isDomainName(cname) { + return "", nil, &DNSError{Err: "SRV header name is invalid", Name: name} + } + filteredAddrs := make([]*SRV, 0, len(addrs)) + for _, addr := range addrs { + if addr == nil { + continue + } + if !isDomainName(addr.Target) { + continue + } + filteredAddrs = append(filteredAddrs, addr) + } + if len(addrs) != len(filteredAddrs) { + return cname, filteredAddrs, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name} + } + return cname, filteredAddrs, nil +} + +// LookupMX returns the DNS MX records for the given domain name sorted by preference. +// +// The returned mail server names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +// +// LookupMX uses [context.Background] internally; to specify the context, use +// [Resolver.LookupMX]. +func LookupMX(name string) ([]*MX, error) { + return DefaultResolver.LookupMX(context.Background(), name) +} + +// LookupMX returns the DNS MX records for the given domain name sorted by preference. +// +// The returned mail server names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) { + records, err := r.lookupMX(ctx, name) + if err != nil { + return nil, err + } + filteredMX := make([]*MX, 0, len(records)) + for _, mx := range records { + if mx == nil { + continue + } + if !isDomainName(mx.Host) { + continue + } + filteredMX = append(filteredMX, mx) + } + if len(records) != len(filteredMX) { + return filteredMX, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name} + } + return filteredMX, nil +} + +// LookupNS returns the DNS NS records for the given domain name. +// +// The returned name server names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +// +// LookupNS uses [context.Background] internally; to specify the context, use +// [Resolver.LookupNS]. +func LookupNS(name string) ([]*NS, error) { + return DefaultResolver.LookupNS(context.Background(), name) +} + +// LookupNS returns the DNS NS records for the given domain name. +// +// The returned name server names are validated to be properly +// formatted presentation-format domain names. If the response contains +// invalid names, those records are filtered out and an error +// will be returned alongside the remaining results, if any. +func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) { + records, err := r.lookupNS(ctx, name) + if err != nil { + return nil, err + } + filteredNS := make([]*NS, 0, len(records)) + for _, ns := range records { + if ns == nil { + continue + } + if !isDomainName(ns.Host) { + continue + } + filteredNS = append(filteredNS, ns) + } + if len(records) != len(filteredNS) { + return filteredNS, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name} + } + return filteredNS, nil +} + +// LookupTXT returns the DNS TXT records for the given domain name. +// +// LookupTXT uses [context.Background] internally; to specify the context, use +// [Resolver.LookupTXT]. +func LookupTXT(name string) ([]string, error) { + return DefaultResolver.lookupTXT(context.Background(), name) +} + +// LookupTXT returns the DNS TXT records for the given domain name. +func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) { + return r.lookupTXT(ctx, name) +} + +// LookupAddr performs a reverse lookup for the given address, returning a list +// of names mapping to that address. +// +// The returned names are validated to be properly formatted presentation-format +// domain names. If the response contains invalid names, those records are filtered +// out and an error will be returned alongside the remaining results, if any. +// +// When using the host C library resolver, at most one result will be +// returned. To bypass the host resolver, use a custom [Resolver]. +// +// LookupAddr uses [context.Background] internally; to specify the context, use +// [Resolver.LookupAddr]. +func LookupAddr(addr string) (names []string, err error) { + return DefaultResolver.LookupAddr(context.Background(), addr) +} + +// LookupAddr performs a reverse lookup for the given address, returning a list +// of names mapping to that address. +// +// The returned names are validated to be properly formatted presentation-format +// domain names. If the response contains invalid names, those records are filtered +// out and an error will be returned alongside the remaining results, if any. +func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) { + names, err := r.lookupAddr(ctx, addr) + if err != nil { + return nil, err + } + filteredNames := make([]string, 0, len(names)) + for _, name := range names { + if isDomainName(name) { + filteredNames = append(filteredNames, name) + } + } + if len(names) != len(filteredNames) { + return filteredNames, &DNSError{Err: errMalformedDNSRecordsDetail, Name: addr} + } + return filteredNames, nil +} + +// errMalformedDNSRecordsDetail is the DNSError detail which is returned when a Resolver.Lookup... +// method receives DNS records which contain invalid DNS names. This may be returned alongside +// results which have had the malformed records filtered out. +var errMalformedDNSRecordsDetail = "DNS response contained records which contain invalid names" + +// dial makes a new connection to the provided server (which must be +// an IP address) with the provided network type, using either r.Dial +// (if both r and r.Dial are non-nil) or else Dialer.DialContext. +func (r *Resolver) dial(ctx context.Context, network, server string) (Conn, error) { + // Calling Dial here is scary -- we have to be sure not to + // dial a name that will require a DNS lookup, or Dial will + // call back here to translate it. The DNS config parser has + // already checked that all the cfg.servers are IP + // addresses, which Dial will use without a DNS lookup. + var c Conn + var err error + if r != nil && r.Dial != nil { + c, err = r.Dial(ctx, network, server) + } else { + var d Dialer + c, err = d.DialContext(ctx, network, server) + } + if err != nil { + return nil, mapErr(err) + } + return c, nil +} + +// goLookupSRV returns the SRV records for a target name, built either +// from its component service ("sip"), protocol ("tcp"), and name +// ("example.com."), or from name directly (if service and proto are +// both empty). +// +// In either case, the returned target name ("_sip._tcp.example.com.") +// is also returned on success. +// +// The records are sorted by weight. +func (r *Resolver) goLookupSRV(ctx context.Context, service, proto, name string) (target string, srvs []*SRV, err error) { + if service == "" && proto == "" { + target = name + } else { + target = "_" + service + "._" + proto + "." + name + } + p, server, err := r.lookup(ctx, target, dnsmessage.TypeSRV, nil) + if err != nil { + return "", nil, err + } + var cname dnsmessage.Name + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return "", nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + if h.Type != dnsmessage.TypeSRV { + if err := p.SkipAnswer(); err != nil { + return "", nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + continue + } + if cname.Length == 0 && h.Name.Length != 0 { + cname = h.Name + } + srv, err := p.SRVResource() + if err != nil { + return "", nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + srvs = append(srvs, &SRV{Target: srv.Target.String(), Port: srv.Port, Priority: srv.Priority, Weight: srv.Weight}) + } + byPriorityWeight(srvs).sort() + return cname.String(), srvs, nil +} + +// goLookupMX returns the MX records for name. +func (r *Resolver) goLookupMX(ctx context.Context, name string) ([]*MX, error) { + p, server, err := r.lookup(ctx, name, dnsmessage.TypeMX, nil) + if err != nil { + return nil, err + } + var mxs []*MX + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + if h.Type != dnsmessage.TypeMX { + if err := p.SkipAnswer(); err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + continue + } + mx, err := p.MXResource() + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + mxs = append(mxs, &MX{Host: mx.MX.String(), Pref: mx.Pref}) + + } + byPref(mxs).sort() + return mxs, nil +} + +// goLookupNS returns the NS records for name. +func (r *Resolver) goLookupNS(ctx context.Context, name string) ([]*NS, error) { + p, server, err := r.lookup(ctx, name, dnsmessage.TypeNS, nil) + if err != nil { + return nil, err + } + var nss []*NS + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + if h.Type != dnsmessage.TypeNS { + if err := p.SkipAnswer(); err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + continue + } + ns, err := p.NSResource() + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + nss = append(nss, &NS{Host: ns.NS.String()}) + } + return nss, nil +} + +// goLookupTXT returns the TXT records from name. +func (r *Resolver) goLookupTXT(ctx context.Context, name string) ([]string, error) { + p, server, err := r.lookup(ctx, name, dnsmessage.TypeTXT, nil) + if err != nil { + return nil, err + } + var txts []string + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + if h.Type != dnsmessage.TypeTXT { + if err := p.SkipAnswer(); err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + continue + } + txt, err := p.TXTResource() + if err != nil { + return nil, &DNSError{ + Err: "cannot unmarshal DNS message", + Name: name, + Server: server, + } + } + // Multiple strings in one TXT record need to be + // concatenated without separator to be consistent + // with previous Go resolver. + n := 0 + for _, s := range txt.TXT { + n += len(s) + } + txtJoin := make([]byte, 0, n) + for _, s := range txt.TXT { + txtJoin = append(txtJoin, s...) + } + if len(txts) == 0 { + txts = make([]string, 0, 1) + } + txts = append(txts, string(txtJoin)) + } + return txts, nil +} + +func parseCNAMEFromResources(resources []dnsmessage.Resource) (string, error) { + if len(resources) == 0 { + return "", errors.New("no CNAME record received") + } + c, ok := resources[0].Body.(*dnsmessage.CNAMEResource) + if !ok { + return "", errors.New("could not parse CNAME record") + } + return c.CNAME.String(), nil +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup_plan9.go b/platform/dbops/binaries/go/go/src/net/lookup_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..8cfc4f6bb3b02d3d99d073ed850be6b84483666f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup_plan9.go @@ -0,0 +1,394 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "internal/bytealg" + "internal/itoa" + "io" + "os" +) + +// cgoAvailable set to true to indicate that the cgo resolver +// is available on Plan 9. Note that on Plan 9 the cgo resolver +// does not actually use cgo. +const cgoAvailable = true + +func query(ctx context.Context, filename, query string, bufSize int) (addrs []string, err error) { + queryAddrs := func() (addrs []string, err error) { + file, err := os.OpenFile(filename, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer file.Close() + + _, err = file.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + _, err = file.WriteString(query) + if err != nil { + return nil, err + } + _, err = file.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + buf := make([]byte, bufSize) + for { + n, _ := file.Read(buf) + if n <= 0 { + break + } + addrs = append(addrs, string(buf[:n])) + } + return addrs, nil + } + + type ret struct { + addrs []string + err error + } + + ch := make(chan ret, 1) + go func() { + addrs, err := queryAddrs() + ch <- ret{addrs: addrs, err: err} + }() + + select { + case r := <-ch: + return r.addrs, r.err + case <-ctx.Done(): + return nil, &DNSError{ + Name: query, + Err: ctx.Err().Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } +} + +func queryCS(ctx context.Context, net, host, service string) (res []string, err error) { + switch net { + case "tcp4", "tcp6": + net = "tcp" + case "udp4", "udp6": + net = "udp" + } + if host == "" { + host = "*" + } + return query(ctx, netdir+"/cs", net+"!"+host+"!"+service, 128) +} + +func queryCS1(ctx context.Context, net string, ip IP, port int) (clone, dest string, err error) { + ips := "*" + if len(ip) != 0 && !ip.IsUnspecified() { + ips = ip.String() + } + lines, err := queryCS(ctx, net, ips, itoa.Itoa(port)) + if err != nil { + return + } + f := getFields(lines[0]) + if len(f) < 2 { + return "", "", errors.New("bad response from ndb/cs") + } + clone, dest = f[0], f[1] + return +} + +func queryDNS(ctx context.Context, addr string, typ string) (res []string, err error) { + return query(ctx, netdir+"/dns", addr+" "+typ, 1024) +} + +func handlePlan9DNSError(err error, name string) error { + if stringsHasSuffix(err.Error(), "dns: name does not exist") || + stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode 0") || + stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode") { + return &DNSError{ + Err: errNoSuchHost.Error(), + Name: name, + IsNotFound: true, + } + } + return &DNSError{ + Err: err.Error(), + Name: name, + } +} + +// toLower returns a lower-case version of in. Restricting us to +// ASCII is sufficient to handle the IP protocol names and allow +// us to not depend on the strings and unicode packages. +func toLower(in string) string { + for _, c := range in { + if 'A' <= c && c <= 'Z' { + // Has upper case; need to fix. + out := []byte(in) + for i := 0; i < len(in); i++ { + c := in[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + out[i] = c + } + return string(out) + } + } + return in +} + +// lookupProtocol looks up IP protocol name and returns +// the corresponding protocol number. +func lookupProtocol(ctx context.Context, name string) (proto int, err error) { + lines, err := query(ctx, netdir+"/cs", "!protocol="+toLower(name), 128) + if err != nil { + return 0, err + } + if len(lines) == 0 { + return 0, UnknownNetworkError(name) + } + f := getFields(lines[0]) + if len(f) < 2 { + return 0, UnknownNetworkError(name) + } + s := f[1] + if n, _, ok := dtoi(s[bytealg.IndexByteString(s, '=')+1:]); ok { + return n, nil + } + return 0, UnknownNetworkError(name) +} + +func (*Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) { + // Use netdir/cs instead of netdir/dns because cs knows about + // host names in local network (e.g. from /lib/ndb/local) + lines, err := queryCS(ctx, "net", host, "1") + if err != nil { + if stringsHasSuffix(err.Error(), "dns failure") { + return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true} + } + return nil, handlePlan9DNSError(err, host) + } +loop: + for _, line := range lines { + f := getFields(line) + if len(f) < 2 { + continue + } + addr := f[1] + if i := bytealg.IndexByteString(addr, '!'); i >= 0 { + addr = addr[:i] // remove port + } + if ParseIP(addr) == nil { + continue + } + // only return unique addresses + for _, a := range addrs { + if a == addr { + continue loop + } + } + addrs = append(addrs, addr) + } + return +} + +func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { + if order, conf := systemConf().hostLookupOrder(r, host); order != hostLookupCgo { + return r.goLookupIP(ctx, network, host, order, conf) + } + + lits, err := r.lookupHost(ctx, host) + if err != nil { + return + } + for _, lit := range lits { + host, zone := splitHostZone(lit) + if ip := ParseIP(host); ip != nil { + addr := IPAddr{IP: ip, Zone: zone} + addrs = append(addrs, addr) + } + } + return +} + +func (r *Resolver) lookupPort(ctx context.Context, network, service string) (port int, err error) { + switch network { + case "ip": // no hints + if p, err := r.lookupPortWithNetwork(ctx, "tcp", "ip", service); err == nil { + return p, nil + } + return r.lookupPortWithNetwork(ctx, "udp", "ip", service) + case "tcp", "tcp4", "tcp6": + return r.lookupPortWithNetwork(ctx, "tcp", "tcp", service) + case "udp", "udp4", "udp6": + return r.lookupPortWithNetwork(ctx, "udp", "udp", service) + default: + return 0, &DNSError{Err: "unknown network", Name: network + "/" + service} + } +} + +func (*Resolver) lookupPortWithNetwork(ctx context.Context, network, errNetwork, service string) (port int, err error) { + lines, err := queryCS(ctx, network, "127.0.0.1", toLower(service)) + if err != nil { + if stringsHasSuffix(err.Error(), "can't translate service") { + return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true} + } + return + } + if len(lines) == 0 { + return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true} + } + f := getFields(lines[0]) + if len(f) < 2 { + return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true} + } + s := f[1] + if i := bytealg.IndexByteString(s, '!'); i >= 0 { + s = s[i+1:] // remove address + } + if n, _, ok := dtoi(s); ok { + return n, nil + } + return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true} +} + +func (r *Resolver) lookupCNAME(ctx context.Context, name string) (cname string, err error) { + if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo { + return r.goLookupCNAME(ctx, name, order, conf) + } + + lines, err := queryDNS(ctx, name, "cname") + if err != nil { + if stringsHasSuffix(err.Error(), "dns failure") || stringsHasSuffix(err.Error(), "resource does not exist; negrcode 0") { + return absDomainName(name), nil + } + return "", handlePlan9DNSError(err, cname) + } + if len(lines) > 0 { + if f := getFields(lines[0]); len(f) >= 3 { + return f[2] + ".", nil + } + } + return "", errors.New("bad response from ndb/dns") +} + +func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*SRV, err error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupSRV(ctx, service, proto, name) + } + var target string + if service == "" && proto == "" { + target = name + } else { + target = "_" + service + "._" + proto + "." + name + } + lines, err := queryDNS(ctx, target, "srv") + if err != nil { + return "", nil, handlePlan9DNSError(err, name) + } + for _, line := range lines { + f := getFields(line) + if len(f) < 6 { + continue + } + port, _, portOk := dtoi(f[4]) + priority, _, priorityOk := dtoi(f[3]) + weight, _, weightOk := dtoi(f[2]) + if !(portOk && priorityOk && weightOk) { + continue + } + addrs = append(addrs, &SRV{absDomainName(f[5]), uint16(port), uint16(priority), uint16(weight)}) + cname = absDomainName(f[0]) + } + byPriorityWeight(addrs).sort() + return +} + +func (r *Resolver) lookupMX(ctx context.Context, name string) (mx []*MX, err error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupMX(ctx, name) + } + lines, err := queryDNS(ctx, name, "mx") + if err != nil { + return nil, handlePlan9DNSError(err, name) + } + for _, line := range lines { + f := getFields(line) + if len(f) < 4 { + continue + } + if pref, _, ok := dtoi(f[2]); ok { + mx = append(mx, &MX{absDomainName(f[3]), uint16(pref)}) + } + } + byPref(mx).sort() + return +} + +func (r *Resolver) lookupNS(ctx context.Context, name string) (ns []*NS, err error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupNS(ctx, name) + } + lines, err := queryDNS(ctx, name, "ns") + if err != nil { + return nil, handlePlan9DNSError(err, name) + } + for _, line := range lines { + f := getFields(line) + if len(f) < 3 { + continue + } + ns = append(ns, &NS{absDomainName(f[2])}) + } + return +} + +func (r *Resolver) lookupTXT(ctx context.Context, name string) (txt []string, err error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupTXT(ctx, name) + } + lines, err := queryDNS(ctx, name, "txt") + if err != nil { + return nil, handlePlan9DNSError(err, name) + } + for _, line := range lines { + if i := bytealg.IndexByteString(line, '\t'); i >= 0 { + txt = append(txt, line[i+1:]) + } + } + return +} + +func (r *Resolver) lookupAddr(ctx context.Context, addr string) (name []string, err error) { + if order, conf := systemConf().addrLookupOrder(r, addr); order != hostLookupCgo { + return r.goLookupPTR(ctx, addr, order, conf) + } + arpa, err := reverseaddr(addr) + if err != nil { + return + } + lines, err := queryDNS(ctx, arpa, "ptr") + if err != nil { + return nil, handlePlan9DNSError(err, addr) + } + for _, line := range lines { + f := getFields(line) + if len(f) < 3 { + continue + } + name = append(name, absDomainName(f[2])) + } + return +} + +// concurrentThreadsLimit returns the number of threads we permit to +// run concurrently doing DNS lookups. +func concurrentThreadsLimit() int { + return 500 +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup_test.go b/platform/dbops/binaries/go/go/src/net/lookup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..57ac9a933a0d3492e79276298ac5cc1ae3b9a383 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup_test.go @@ -0,0 +1,1648 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "fmt" + "internal/testenv" + "net/netip" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +func hasSuffixFold(s, suffix string) bool { + return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix)) +} + +func lookupLocalhost(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { + switch host { + case "localhost": + return []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + }, nil + default: + return fn(ctx, network, host) + } +} + +// The Lookup APIs use various sources such as local database, DNS or +// mDNS, and may use platform-dependent DNS stub resolver if possible. +// The APIs accept any of forms for a query; host name in various +// encodings, UTF-8 encoded net name, domain name, FQDN or absolute +// FQDN, but the result would be one of the forms and it depends on +// the circumstances. + +var lookupGoogleSRVTests = []struct { + service, proto, name string + cname, target string +}{ + { + "ldap", "tcp", "google.com", + "google.com.", "google.com.", + }, + { + "ldap", "tcp", "google.com.", + "google.com.", "google.com.", + }, + + // non-standard back door + { + "", "", "_ldap._tcp.google.com", + "google.com.", "google.com.", + }, + { + "", "", "_ldap._tcp.google.com.", + "google.com.", "google.com.", + }, +} + +var backoffDuration = [...]time.Duration{time.Second, 5 * time.Second, 30 * time.Second} + +func TestLookupGoogleSRV(t *testing.T) { + t.Parallel() + mustHaveExternalNetwork(t) + + if runtime.GOOS == "ios" { + t.Skip("no resolv.conf on iOS") + } + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + attempts := 0 + for i := 0; i < len(lookupGoogleSRVTests); i++ { + tt := lookupGoogleSRVTests[i] + cname, srvs, err := LookupSRV(tt.service, tt.proto, tt.name) + if err != nil { + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + i-- + continue + } + t.Fatal(err) + } + if len(srvs) == 0 { + t.Error("got no record") + } + if !hasSuffixFold(cname, tt.cname) { + t.Errorf("got %s; want %s", cname, tt.cname) + } + for _, srv := range srvs { + if !hasSuffixFold(srv.Target, tt.target) { + t.Errorf("got %v; want a record containing %s", srv, tt.target) + } + } + } +} + +var lookupGmailMXTests = []struct { + name, host string +}{ + {"gmail.com", "google.com."}, + {"gmail.com.", "google.com."}, +} + +func TestLookupGmailMX(t *testing.T) { + t.Parallel() + mustHaveExternalNetwork(t) + + if runtime.GOOS == "ios" { + t.Skip("no resolv.conf on iOS") + } + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + attempts := 0 + for i := 0; i < len(lookupGmailMXTests); i++ { + tt := lookupGmailMXTests[i] + mxs, err := LookupMX(tt.name) + if err != nil { + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + i-- + continue + } + t.Fatal(err) + } + if len(mxs) == 0 { + t.Error("got no record") + } + for _, mx := range mxs { + if !hasSuffixFold(mx.Host, tt.host) { + t.Errorf("got %v; want a record containing %s", mx, tt.host) + } + } + } +} + +var lookupGmailNSTests = []struct { + name, host string +}{ + {"gmail.com", "google.com."}, + {"gmail.com.", "google.com."}, +} + +func TestLookupGmailNS(t *testing.T) { + t.Parallel() + mustHaveExternalNetwork(t) + + if runtime.GOOS == "ios" { + t.Skip("no resolv.conf on iOS") + } + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + attempts := 0 + for i := 0; i < len(lookupGmailNSTests); i++ { + tt := lookupGmailNSTests[i] + nss, err := LookupNS(tt.name) + if err != nil { + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + i-- + continue + } + t.Fatal(err) + } + if len(nss) == 0 { + t.Error("got no record") + } + for _, ns := range nss { + if !hasSuffixFold(ns.Host, tt.host) { + t.Errorf("got %v; want a record containing %s", ns, tt.host) + } + } + } +} + +var lookupGmailTXTTests = []struct { + name, txt, host string +}{ + {"gmail.com", "spf", "google.com"}, + {"gmail.com.", "spf", "google.com"}, +} + +func TestLookupGmailTXT(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/29722") + } + t.Parallel() + mustHaveExternalNetwork(t) + + if runtime.GOOS == "ios" { + t.Skip("no resolv.conf on iOS") + } + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + attempts := 0 + for i := 0; i < len(lookupGmailTXTTests); i++ { + tt := lookupGmailTXTTests[i] + txts, err := LookupTXT(tt.name) + if err != nil { + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + i-- + continue + } + t.Fatal(err) + } + if len(txts) == 0 { + t.Error("got no record") + } + found := false + for _, txt := range txts { + if strings.Contains(txt, tt.txt) && (strings.HasSuffix(txt, tt.host) || strings.HasSuffix(txt, tt.host+".")) { + found = true + break + } + } + if !found { + t.Errorf("got %v; want a record containing %s, %s", txts, tt.txt, tt.host) + } + } +} + +var lookupGooglePublicDNSAddrTests = []string{ + "8.8.8.8", + "8.8.4.4", + "2001:4860:4860::8888", + "2001:4860:4860::8844", +} + +func TestLookupGooglePublicDNSAddr(t *testing.T) { + mustHaveExternalNetwork(t) + + if !supportsIPv4() || !supportsIPv6() || !*testIPv4 || !*testIPv6 { + t.Skip("both IPv4 and IPv6 are required") + } + + defer dnsWaitGroup.Wait() + + for _, ip := range lookupGooglePublicDNSAddrTests { + names, err := LookupAddr(ip) + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Error("got no record") + } + for _, name := range names { + if !hasSuffixFold(name, ".google.com.") && !hasSuffixFold(name, ".google.") { + t.Errorf("got %q; want a record ending in .google.com. or .google.", name) + } + } + } +} + +func TestLookupIPv6LinkLocalAddr(t *testing.T) { + if !supportsIPv6() || !*testIPv6 { + t.Skip("IPv6 is required") + } + + defer dnsWaitGroup.Wait() + + addrs, err := LookupHost("localhost") + if err != nil { + t.Fatal(err) + } + found := false + for _, addr := range addrs { + if addr == "fe80::1%lo0" { + found = true + break + } + } + if !found { + t.Skipf("not supported on %s", runtime.GOOS) + } + if _, err := LookupAddr("fe80::1%lo0"); err != nil { + t.Error(err) + } +} + +func TestLookupIPv6LinkLocalAddrWithZone(t *testing.T) { + if !supportsIPv6() || !*testIPv6 { + t.Skip("IPv6 is required") + } + + ipaddrs, err := DefaultResolver.LookupIPAddr(context.Background(), "fe80::1%lo0") + if err != nil { + t.Error(err) + } + for _, addr := range ipaddrs { + if e, a := "lo0", addr.Zone; e != a { + t.Errorf("wrong zone: want %q, got %q", e, a) + } + } + + addrs, err := DefaultResolver.LookupHost(context.Background(), "fe80::1%lo0") + if err != nil { + t.Error(err) + } + for _, addr := range addrs { + if e, a := "fe80::1%lo0", addr; e != a { + t.Errorf("wrong host: want %q got %q", e, a) + } + } +} + +var lookupCNAMETests = []struct { + name, cname string +}{ + {"www.iana.org", "icann.org."}, + {"www.iana.org.", "icann.org."}, + {"www.google.com", "google.com."}, + {"google.com", "google.com."}, + {"cname-to-txt.go4.org", "test-txt-record.go4.org."}, +} + +func TestLookupCNAME(t *testing.T) { + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + defer dnsWaitGroup.Wait() + + attempts := 0 + for i := 0; i < len(lookupCNAMETests); i++ { + tt := lookupCNAMETests[i] + cname, err := LookupCNAME(tt.name) + if err != nil { + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + i-- + continue + } + t.Fatal(err) + } + if !hasSuffixFold(cname, tt.cname) { + t.Errorf("got %s; want a record containing %s", cname, tt.cname) + } + } +} + +var lookupGoogleHostTests = []struct { + name string +}{ + {"google.com"}, + {"google.com."}, +} + +func TestLookupGoogleHost(t *testing.T) { + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + defer dnsWaitGroup.Wait() + + for _, tt := range lookupGoogleHostTests { + addrs, err := LookupHost(tt.name) + if err != nil { + t.Fatal(err) + } + if len(addrs) == 0 { + t.Error("got no record") + } + for _, addr := range addrs { + if ParseIP(addr) == nil { + t.Errorf("got %q; want a literal IP address", addr) + } + } + } +} + +func TestLookupLongTXT(t *testing.T) { + testenv.SkipFlaky(t, 22857) + mustHaveExternalNetwork(t) + + defer dnsWaitGroup.Wait() + + txts, err := LookupTXT("golang.rsc.io") + if err != nil { + t.Fatal(err) + } + sort.Strings(txts) + want := []string{ + strings.Repeat("abcdefghijklmnopqrstuvwxyABCDEFGHJIKLMNOPQRSTUVWXY", 10), + "gophers rule", + } + if !reflect.DeepEqual(txts, want) { + t.Fatalf("LookupTXT golang.rsc.io incorrect\nhave %q\nwant %q", txts, want) + } +} + +var lookupGoogleIPTests = []struct { + name string +}{ + {"google.com"}, + {"google.com."}, +} + +func TestLookupGoogleIP(t *testing.T) { + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + defer dnsWaitGroup.Wait() + + for _, tt := range lookupGoogleIPTests { + ips, err := LookupIP(tt.name) + if err != nil { + t.Fatal(err) + } + if len(ips) == 0 { + t.Error("got no record") + } + for _, ip := range ips { + if ip.To4() == nil && ip.To16() == nil { + t.Errorf("got %v; want an IP address", ip) + } + } + } +} + +var revAddrTests = []struct { + Addr string + Reverse string + ErrPrefix string +}{ + {"1.2.3.4", "4.3.2.1.in-addr.arpa.", ""}, + {"245.110.36.114", "114.36.110.245.in-addr.arpa.", ""}, + {"::ffff:12.34.56.78", "78.56.34.12.in-addr.arpa.", ""}, + {"::1", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", ""}, + {"1::", "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.", ""}, + {"1234:567::89a:bcde", "e.d.c.b.a.9.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.6.5.0.4.3.2.1.ip6.arpa.", ""}, + {"1234:567:fefe:bcbc:adad:9e4a:89a:bcde", "e.d.c.b.a.9.8.0.a.4.e.9.d.a.d.a.c.b.c.b.e.f.e.f.7.6.5.0.4.3.2.1.ip6.arpa.", ""}, + {"1.2.3", "", "unrecognized address"}, + {"1.2.3.4.5", "", "unrecognized address"}, + {"1234:567:bcbca::89a:bcde", "", "unrecognized address"}, + {"1234:567::bcbc:adad::89a:bcde", "", "unrecognized address"}, +} + +func TestReverseAddress(t *testing.T) { + defer dnsWaitGroup.Wait() + for i, tt := range revAddrTests { + a, err := reverseaddr(tt.Addr) + if len(tt.ErrPrefix) > 0 && err == nil { + t.Errorf("#%d: expected %q, got (error)", i, tt.ErrPrefix) + continue + } + if len(tt.ErrPrefix) == 0 && err != nil { + t.Errorf("#%d: expected , got %q (error)", i, err) + } + if err != nil && err.(*DNSError).Err != tt.ErrPrefix { + t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, err.(*DNSError).Err) + } + if a != tt.Reverse { + t.Errorf("#%d: expected %q, got %q (reverse address)", i, tt.Reverse, a) + } + } +} + +func TestDNSFlood(t *testing.T) { + if !*testDNSFlood { + t.Skip("test disabled; use -dnsflood to enable") + } + + defer dnsWaitGroup.Wait() + + var N = 5000 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + // On Darwin this test consumes kernel threads much + // than other platforms for some reason. + // When we monitor the number of allocated Ms by + // observing on runtime.newm calls, we can see that it + // easily reaches the per process ceiling + // kern.num_threads when CGO_ENABLED=1 and + // GODEBUG=netdns=go. + N = 500 + } + + const timeout = 3 * time.Second + ctxHalfTimeout, cancel := context.WithTimeout(context.Background(), timeout/2) + defer cancel() + ctxTimeout, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + c := make(chan error, 2*N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("%d.net-test.golang.org", i) + go func() { + _, err := DefaultResolver.LookupIPAddr(ctxHalfTimeout, name) + c <- err + }() + go func() { + _, err := DefaultResolver.LookupIPAddr(ctxTimeout, name) + c <- err + }() + } + qstats := struct { + succeeded, failed int + timeout, temporary, other int + unknown int + }{} + deadline := time.After(timeout + time.Second) + for i := 0; i < 2*N; i++ { + select { + case <-deadline: + t.Fatal("deadline exceeded") + case err := <-c: + switch err := err.(type) { + case nil: + qstats.succeeded++ + case Error: + qstats.failed++ + if err.Timeout() { + qstats.timeout++ + } + if err.Temporary() { + qstats.temporary++ + } + if !err.Timeout() && !err.Temporary() { + qstats.other++ + } + default: + qstats.failed++ + qstats.unknown++ + } + } + } + + // A high volume of DNS queries for sub-domain of golang.org + // would be coordinated by authoritative or recursive server, + // or stub resolver which implements query-response rate + // limitation, so we can expect some query successes and more + // failures including timeout, temporary and other here. + // As a rule, unknown must not be shown but it might possibly + // happen due to issue 4856 for now. + t.Logf("%v succeeded, %v failed (%v timeout, %v temporary, %v other, %v unknown)", qstats.succeeded, qstats.failed, qstats.timeout, qstats.temporary, qstats.other, qstats.unknown) +} + +func TestLookupDotsWithLocalSource(t *testing.T) { + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + mustHaveExternalNetwork(t) + + defer dnsWaitGroup.Wait() + + for i, fn := range []func() func(){forceGoDNS, forceCgoDNS} { + fixup := fn() + if fixup == nil { + continue + } + names, err := LookupAddr("127.0.0.1") + fixup() + if err != nil { + t.Logf("#%d: %v", i, err) + continue + } + mode := "netgo" + if i == 1 { + mode = "netcgo" + } + loop: + for i, name := range names { + if strings.Index(name, ".") == len(name)-1 { // "localhost" not "localhost." + for j := range names { + if j == i { + continue + } + if names[j] == name[:len(name)-1] { + // It's OK if we find the name without the dot, + // as some systems say 127.0.0.1 localhost localhost. + continue loop + } + } + t.Errorf("%s: got %s; want %s", mode, name, name[:len(name)-1]) + } else if strings.Contains(name, ".") && !strings.HasSuffix(name, ".") { // "localhost.localdomain." not "localhost.localdomain" + t.Errorf("%s: got %s; want name ending with trailing dot", mode, name) + } + } + } +} + +func TestLookupDotsWithRemoteSource(t *testing.T) { + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + testenv.SkipFlaky(t, 27992) + } + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + if !supportsIPv4() || !*testIPv4 { + t.Skip("IPv4 is required") + } + + if runtime.GOOS == "ios" { + t.Skip("no resolv.conf on iOS") + } + + defer dnsWaitGroup.Wait() + + if fixup := forceGoDNS(); fixup != nil { + testDots(t, "go") + fixup() + } + if fixup := forceCgoDNS(); fixup != nil { + testDots(t, "cgo") + fixup() + } +} + +func testDots(t *testing.T, mode string) { + names, err := LookupAddr("8.8.8.8") // Google dns server + if err != nil { + t.Errorf("LookupAddr(8.8.8.8): %v (mode=%v)", err, mode) + } else { + for _, name := range names { + if !hasSuffixFold(name, ".google.com.") && !hasSuffixFold(name, ".google.") { + t.Errorf("LookupAddr(8.8.8.8) = %v, want names ending in .google.com or .google with trailing dot (mode=%v)", names, mode) + break + } + } + } + + cname, err := LookupCNAME("www.mit.edu") + if err != nil { + t.Errorf("LookupCNAME(www.mit.edu, mode=%v): %v", mode, err) + } else if !strings.HasSuffix(cname, ".") { + t.Errorf("LookupCNAME(www.mit.edu) = %v, want cname ending in . with trailing dot (mode=%v)", cname, mode) + } + + mxs, err := LookupMX("google.com") + if err != nil { + t.Errorf("LookupMX(google.com): %v (mode=%v)", err, mode) + } else { + for _, mx := range mxs { + if !hasSuffixFold(mx.Host, ".google.com.") { + t.Errorf("LookupMX(google.com) = %v, want names ending in .google.com. with trailing dot (mode=%v)", mxString(mxs), mode) + break + } + } + } + + nss, err := LookupNS("google.com") + if err != nil { + t.Errorf("LookupNS(google.com): %v (mode=%v)", err, mode) + } else { + for _, ns := range nss { + if !hasSuffixFold(ns.Host, ".google.com.") { + t.Errorf("LookupNS(google.com) = %v, want names ending in .google.com. with trailing dot (mode=%v)", nsString(nss), mode) + break + } + } + } + + cname, srvs, err := LookupSRV("ldap", "tcp", "google.com") + if err != nil { + t.Errorf("LookupSRV(ldap, tcp, google.com): %v (mode=%v)", err, mode) + } else { + if !hasSuffixFold(cname, ".google.com.") { + t.Errorf("LookupSRV(ldap, tcp, google.com) returned cname=%v, want name ending in .google.com. with trailing dot (mode=%v)", cname, mode) + } + for _, srv := range srvs { + if !hasSuffixFold(srv.Target, ".google.com.") { + t.Errorf("LookupSRV(ldap, tcp, google.com) returned addrs=%v, want names ending in .google.com. with trailing dot (mode=%v)", srvString(srvs), mode) + break + } + } + } +} + +func mxString(mxs []*MX) string { + var buf strings.Builder + sep := "" + fmt.Fprintf(&buf, "[") + for _, mx := range mxs { + fmt.Fprintf(&buf, "%s%s:%d", sep, mx.Host, mx.Pref) + sep = " " + } + fmt.Fprintf(&buf, "]") + return buf.String() +} + +func nsString(nss []*NS) string { + var buf strings.Builder + sep := "" + fmt.Fprintf(&buf, "[") + for _, ns := range nss { + fmt.Fprintf(&buf, "%s%s", sep, ns.Host) + sep = " " + } + fmt.Fprintf(&buf, "]") + return buf.String() +} + +func srvString(srvs []*SRV) string { + var buf strings.Builder + sep := "" + fmt.Fprintf(&buf, "[") + for _, srv := range srvs { + fmt.Fprintf(&buf, "%s%s:%d:%d:%d", sep, srv.Target, srv.Port, srv.Priority, srv.Weight) + sep = " " + } + fmt.Fprintf(&buf, "]") + return buf.String() +} + +func TestLookupPort(t *testing.T) { + // See https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml + // + // Please be careful about adding new test cases. + // There are platforms which have incomplete mappings for + // restricted resource access and security reasons. + type test struct { + network string + name string + port int + ok bool + } + var tests = []test{ + {"tcp", "0", 0, true}, + {"udp", "0", 0, true}, + {"udp", "domain", 53, true}, + + {"--badnet--", "zzz", 0, false}, + {"tcp", "--badport--", 0, false}, + {"tcp", "-1", 0, false}, + {"tcp", "65536", 0, false}, + {"udp", "-1", 0, false}, + {"udp", "65536", 0, false}, + {"tcp", "123456789", 0, false}, + + // Issue 13610: LookupPort("tcp", "") + {"tcp", "", 0, true}, + {"tcp4", "", 0, true}, + {"tcp6", "", 0, true}, + {"udp", "", 0, true}, + {"udp4", "", 0, true}, + {"udp6", "", 0, true}, + } + + switch runtime.GOOS { + case "android": + if netGoBuildTag { + t.Skipf("not supported on %s without cgo; see golang.org/issues/14576", runtime.GOOS) + } + default: + tests = append(tests, test{"tcp", "http", 80, true}) + } + + for _, tt := range tests { + port, err := LookupPort(tt.network, tt.name) + if port != tt.port || (err == nil) != tt.ok { + t.Errorf("LookupPort(%q, %q) = %d, %v; want %d, error=%t", tt.network, tt.name, port, err, tt.port, !tt.ok) + } + if err != nil { + if perr := parseLookupPortError(err); perr != nil { + t.Error(perr) + } + } + } +} + +// Like TestLookupPort but with minimal tests that should always pass +// because the answers are baked-in to the net package. +func TestLookupPort_Minimal(t *testing.T) { + type test struct { + network string + name string + port int + } + var tests = []test{ + {"tcp", "http", 80}, + {"tcp", "HTTP", 80}, // case shouldn't matter + {"tcp", "https", 443}, + {"tcp", "ssh", 22}, + {"tcp", "gopher", 70}, + {"tcp4", "http", 80}, + {"tcp6", "http", 80}, + } + + for _, tt := range tests { + port, err := LookupPort(tt.network, tt.name) + if port != tt.port || err != nil { + t.Errorf("LookupPort(%q, %q) = %d, %v; want %d, error=nil", tt.network, tt.name, port, err, tt.port) + } + } +} + +func TestLookupProtocol_Minimal(t *testing.T) { + type test struct { + name string + want int + } + var tests = []test{ + {"tcp", 6}, + {"TcP", 6}, // case shouldn't matter + {"icmp", 1}, + {"igmp", 2}, + {"udp", 17}, + {"ipv6-icmp", 58}, + } + + for _, tt := range tests { + got, err := lookupProtocol(context.Background(), tt.name) + if got != tt.want || err != nil { + t.Errorf("LookupProtocol(%q) = %d, %v; want %d, error=nil", tt.name, got, err, tt.want) + } + } + +} + +func TestLookupNonLDH(t *testing.T) { + defer dnsWaitGroup.Wait() + + if fixup := forceGoDNS(); fixup != nil { + defer fixup() + } + + // "LDH" stands for letters, digits, and hyphens and is the usual + // description of standard DNS names. + // This test is checking that other kinds of names are reported + // as not found, not reported as invalid names. + addrs, err := LookupHost("!!!.###.bogus..domain.") + if err == nil { + t.Fatalf("lookup succeeded: %v", addrs) + } + if !strings.HasSuffix(err.Error(), errNoSuchHost.Error()) { + t.Fatalf("lookup error = %v, want %v", err, errNoSuchHost) + } + if !err.(*DNSError).IsNotFound { + t.Fatalf("lookup error = %v, want true", err.(*DNSError).IsNotFound) + } +} + +func TestLookupContextCancel(t *testing.T) { + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + origTestHookLookupIP := testHookLookupIP + defer func() { + dnsWaitGroup.Wait() + testHookLookupIP = origTestHookLookupIP + }() + + lookupCtx, cancelLookup := context.WithCancel(context.Background()) + unblockLookup := make(chan struct{}) + + // Set testHookLookupIP to start a new, concurrent call to LookupIPAddr + // and cancel the original one, then block until the canceled call has returned + // (ensuring that it has performed any synchronous cleanup). + testHookLookupIP = func( + ctx context.Context, + fn func(context.Context, string, string) ([]IPAddr, error), + network string, + host string, + ) ([]IPAddr, error) { + select { + case <-unblockLookup: + default: + // Start a concurrent LookupIPAddr for the same host while the caller is + // still blocked, and sleep a little to give it time to be deduplicated + // before we cancel (and unblock) the caller. + // (If the timing doesn't quite work out, we'll end up testing sequential + // calls instead of concurrent ones, but the test should still pass.) + t.Logf("starting concurrent LookupIPAddr") + dnsWaitGroup.Add(1) + go func() { + defer dnsWaitGroup.Done() + _, err := DefaultResolver.LookupIPAddr(context.Background(), host) + if err != nil { + t.Error(err) + } + }() + time.Sleep(1 * time.Millisecond) + } + + cancelLookup() + <-unblockLookup + // If the concurrent lookup above is deduplicated to this one + // (as we expect to happen most of the time), it is important + // that the original call does not cancel the shared Context. + // (See https://go.dev/issue/22724.) Explicitly check for + // cancellation now, just in case fn itself doesn't notice it. + if err := ctx.Err(); err != nil { + t.Logf("testHookLookupIP canceled") + return nil, err + } + t.Logf("testHookLookupIP performing lookup") + return fn(ctx, network, host) + } + + _, err := DefaultResolver.LookupIPAddr(lookupCtx, "google.com") + if dnsErr, ok := err.(*DNSError); !ok || dnsErr.Err != errCanceled.Error() { + t.Errorf("unexpected error from canceled, blocked LookupIPAddr: %v", err) + } + close(unblockLookup) +} + +// Issue 24330: treat the nil *Resolver like a zero value. Verify nothing +// crashes if nil is used. +func TestNilResolverLookup(t *testing.T) { + mustHaveExternalNetwork(t) + var r *Resolver = nil + ctx := context.Background() + + // Don't care about the results, just that nothing panics: + r.LookupAddr(ctx, "8.8.8.8") + r.LookupCNAME(ctx, "google.com") + r.LookupHost(ctx, "google.com") + r.LookupIPAddr(ctx, "google.com") + r.LookupIP(ctx, "ip", "google.com") + r.LookupMX(ctx, "gmail.com") + r.LookupNS(ctx, "google.com") + r.LookupPort(ctx, "tcp", "smtp") + r.LookupSRV(ctx, "service", "proto", "name") + r.LookupTXT(ctx, "gmail.com") +} + +// TestLookupHostCancel verifies that lookup works even after many +// canceled lookups (see golang.org/issue/24178 for details). +func TestLookupHostCancel(t *testing.T) { + mustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + t.Parallel() // Executes 600ms worth of sequential sleeps. + + const ( + google = "www.google.com" + invalidDomain = "invalid.invalid" // RFC 2606 reserves .invalid + n = 600 // this needs to be larger than threadLimit size + ) + + _, err := LookupHost(google) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + for i := 0; i < n; i++ { + addr, err := DefaultResolver.LookupHost(ctx, invalidDomain) + if err == nil { + t.Fatalf("LookupHost(%q): returns %v, but should fail", invalidDomain, addr) + } + + // Don't verify what the actual error is. + // We know that it must be non-nil because the domain is invalid, + // but we don't have any guarantee that LookupHost actually bothers + // to check for cancellation on the fast path. + // (For example, it could use a local cache to avoid blocking entirely.) + + // The lookup may deduplicate in-flight requests, so give it time to settle + // in between. + time.Sleep(time.Millisecond * 1) + } + + _, err = LookupHost(google) + if err != nil { + t.Fatal(err) + } +} + +type lookupCustomResolver struct { + *Resolver + mu sync.RWMutex + dialed bool +} + +func (lcr *lookupCustomResolver) dial() func(ctx context.Context, network, address string) (Conn, error) { + return func(ctx context.Context, network, address string) (Conn, error) { + lcr.mu.Lock() + lcr.dialed = true + lcr.mu.Unlock() + return Dial(network, address) + } +} + +// TestConcurrentPreferGoResolversDial tests that multiple resolvers with the +// PreferGo option used concurrently are all dialed properly. +func TestConcurrentPreferGoResolversDial(t *testing.T) { + switch runtime.GOOS { + case "plan9": + // TODO: plan9 implementation of the resolver uses the Dial function since + // https://go.dev/cl/409234, this test could probably be reenabled. + t.Skipf("skip on %v", runtime.GOOS) + } + + testenv.MustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + + defer dnsWaitGroup.Wait() + + resolvers := make([]*lookupCustomResolver, 2) + for i := range resolvers { + cs := lookupCustomResolver{Resolver: &Resolver{PreferGo: true}} + cs.Dial = cs.dial() + resolvers[i] = &cs + } + + var wg sync.WaitGroup + wg.Add(len(resolvers)) + for i, resolver := range resolvers { + go func(r *Resolver, index int) { + defer wg.Done() + _, err := r.LookupIPAddr(context.Background(), "google.com") + if err != nil { + t.Errorf("lookup failed for resolver %d: %q", index, err) + } + }(resolver.Resolver, i) + } + wg.Wait() + + if t.Failed() { + t.FailNow() + } + + for i, resolver := range resolvers { + if !resolver.dialed { + t.Errorf("custom resolver %d not dialed during lookup", i) + } + } +} + +var ipVersionTests = []struct { + network string + version byte +}{ + {"tcp", 0}, + {"tcp4", '4'}, + {"tcp6", '6'}, + {"udp", 0}, + {"udp4", '4'}, + {"udp6", '6'}, + {"ip", 0}, + {"ip4", '4'}, + {"ip6", '6'}, + {"ip7", 0}, + {"", 0}, +} + +func TestIPVersion(t *testing.T) { + for _, tt := range ipVersionTests { + if version := ipVersion(tt.network); version != tt.version { + t.Errorf("Family for: %s. Expected: %s, Got: %s", tt.network, + string(tt.version), string(version)) + } + } +} + +// Issue 28600: The context that is used to lookup ips should always +// preserve the values from the context that was passed into LookupIPAddr. +func TestLookupIPAddrPreservesContextValues(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + + keyValues := []struct { + key, value any + }{ + {"key-1", 12}, + {384, "value2"}, + {new(float64), 137}, + } + ctx := context.Background() + for _, kv := range keyValues { + ctx = context.WithValue(ctx, kv.key, kv.value) + } + + wantIPs := []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + } + + checkCtxValues := func(ctx_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { + for _, kv := range keyValues { + g, w := ctx_.Value(kv.key), kv.value + if !reflect.DeepEqual(g, w) { + t.Errorf("Value lookup:\n\tGot: %v\n\tWant: %v", g, w) + } + } + return wantIPs, nil + } + testHookLookupIP = checkCtxValues + + resolvers := []*Resolver{ + nil, + new(Resolver), + } + + for i, resolver := range resolvers { + gotIPs, err := resolver.LookupIPAddr(ctx, "golang.org") + if err != nil { + t.Errorf("Resolver #%d: unexpected error: %v", i, err) + } + if !reflect.DeepEqual(gotIPs, wantIPs) { + t.Errorf("#%d: mismatched IPAddr results\n\tGot: %v\n\tWant: %v", i, gotIPs, wantIPs) + } + } +} + +// Issue 30521: The lookup group should call the resolver for each network. +func TestLookupIPAddrConcurrentCallsForNetworks(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + + queries := [][]string{ + {"udp", "golang.org"}, + {"udp4", "golang.org"}, + {"udp6", "golang.org"}, + {"udp", "golang.org"}, + {"udp", "golang.org"}, + } + results := map[[2]string][]IPAddr{ + {"udp", "golang.org"}: { + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + }, + {"udp4", "golang.org"}: { + {IP: IPv4(127, 0, 0, 1)}, + }, + {"udp6", "golang.org"}: { + {IP: IPv6loopback}, + }, + } + calls := int32(0) + waitCh := make(chan struct{}) + testHookLookupIP = func(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { + // We'll block until this is called one time for each different + // expected result. This will ensure that the lookup group would wait + // for the existing call if it was to be reused. + if atomic.AddInt32(&calls, 1) == int32(len(results)) { + close(waitCh) + } + select { + case <-waitCh: + case <-ctx.Done(): + return nil, ctx.Err() + } + return results[[2]string{network, host}], nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + wg := sync.WaitGroup{} + for _, q := range queries { + network := q[0] + host := q[1] + wg.Add(1) + go func() { + defer wg.Done() + gotIPs, err := DefaultResolver.lookupIPAddr(ctx, network, host) + if err != nil { + t.Errorf("lookupIPAddr(%v, %v): unexpected error: %v", network, host, err) + } + wantIPs := results[[2]string{network, host}] + if !reflect.DeepEqual(gotIPs, wantIPs) { + t.Errorf("lookupIPAddr(%v, %v): mismatched IPAddr results\n\tGot: %v\n\tWant: %v", network, host, gotIPs, wantIPs) + } + }() + } + wg.Wait() +} + +// Issue 53995: Resolver.LookupIP should return error for empty host name. +func TestResolverLookupIPWithEmptyHost(t *testing.T) { + _, err := DefaultResolver.LookupIP(context.Background(), "ip", "") + if err == nil { + t.Fatal("DefaultResolver.LookupIP for empty host success, want no host error") + } + if !strings.HasSuffix(err.Error(), errNoSuchHost.Error()) { + t.Fatalf("lookup error = %v, want %v", err, errNoSuchHost) + } +} + +func TestWithUnexpiredValuesPreserved(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + // Insert a value into it. + key, value := "key-1", 2 + ctx = context.WithValue(ctx, key, value) + + // Now use the "values preserving context" like + // we would for LookupIPAddr. See Issue 28600. + ctx = withUnexpiredValuesPreserved(ctx) + + // Lookup before expiry. + if g, w := ctx.Value(key), value; g != w { + t.Errorf("Lookup before expiry: Got %v Want %v", g, w) + } + + // Cancel the context. + cancel() + + // Lookup after expiry should return nil + if g := ctx.Value(key); g != nil { + t.Errorf("Lookup after expiry: Got %v want nil", g) + } +} + +// Issue 31597: don't panic on null byte in name +func TestLookupNullByte(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + testenv.SkipFlakyNet(t) + LookupHost("foo\x00bar") // check that it doesn't panic; it used to on Windows +} + +func TestResolverLookupIP(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + v4Ok := supportsIPv4() && *testIPv4 + v6Ok := supportsIPv6() && *testIPv6 + + defer dnsWaitGroup.Wait() + + for _, impl := range []struct { + name string + fn func() func() + }{ + {"go", forceGoDNS}, + {"cgo", forceCgoDNS}, + } { + t.Run("implementation: "+impl.name, func(t *testing.T) { + fixup := impl.fn() + if fixup == nil { + t.Skip("not supported") + } + defer fixup() + + for _, network := range []string{"ip", "ip4", "ip6"} { + t.Run("network: "+network, func(t *testing.T) { + switch { + case network == "ip4" && !v4Ok: + t.Skip("IPv4 is not supported") + case network == "ip6" && !v6Ok: + t.Skip("IPv6 is not supported") + } + + // google.com has both A and AAAA records. + const host = "google.com" + ips, err := DefaultResolver.LookupIP(context.Background(), network, host) + if err != nil { + testenv.SkipFlakyNet(t) + t.Fatalf("DefaultResolver.LookupIP(%q, %q): failed with unexpected error: %v", network, host, err) + } + + var v4Addrs []netip.Addr + var v6Addrs []netip.Addr + for _, ip := range ips { + if addr, ok := netip.AddrFromSlice(ip); ok { + if addr.Is4() { + v4Addrs = append(v4Addrs, addr) + } else { + v6Addrs = append(v6Addrs, addr) + } + } else { + t.Fatalf("IP=%q is neither IPv4 nor IPv6", ip) + } + } + + // Check that we got the expected addresses. + if network == "ip4" || network == "ip" && v4Ok { + if len(v4Addrs) == 0 { + t.Errorf("DefaultResolver.LookupIP(%q, %q): no IPv4 addresses", network, host) + } + } + if network == "ip6" || network == "ip" && v6Ok { + if len(v6Addrs) == 0 { + t.Errorf("DefaultResolver.LookupIP(%q, %q): no IPv6 addresses", network, host) + } + } + + // Check that we didn't get any unexpected addresses. + if network == "ip6" && len(v4Addrs) > 0 { + t.Errorf("DefaultResolver.LookupIP(%q, %q): unexpected IPv4 addresses: %v", network, host, v4Addrs) + } + if network == "ip4" && len(v6Addrs) > 0 { + t.Errorf("DefaultResolver.LookupIP(%q, %q): unexpected IPv6 or IPv4-mapped IPv6 addresses: %v", network, host, v6Addrs) + } + }) + } + }) + } +} + +// A context timeout should still return a DNSError. +func TestDNSTimeout(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + defer dnsWaitGroup.Wait() + + timeoutHookGo := make(chan bool, 1) + timeoutHook := func(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { + <-timeoutHookGo + return nil, context.DeadlineExceeded + } + testHookLookupIP = timeoutHook + + checkErr := func(err error) { + t.Helper() + if err == nil { + t.Error("expected an error") + } else if dnserr, ok := err.(*DNSError); !ok { + t.Errorf("got error type %T, want %T", err, (*DNSError)(nil)) + } else if !dnserr.IsTimeout { + t.Errorf("got error %#v, want IsTimeout == true", dnserr) + } else if isTimeout := dnserr.Timeout(); !isTimeout { + t.Errorf("got err.Timeout() == %t, want true", isTimeout) + } + } + + // Single lookup. + timeoutHookGo <- true + _, err := LookupIP("golang.org") + checkErr(err) + + // Double lookup. + var err1, err2 error + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + _, err1 = LookupIP("golang1.org") + }() + go func() { + defer wg.Done() + _, err2 = LookupIP("golang1.org") + }() + close(timeoutHookGo) + wg.Wait() + checkErr(err1) + checkErr(err2) + + // Double lookup with context. + timeoutHookGo = make(chan bool) + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + wg.Add(2) + go func() { + defer wg.Done() + _, err1 = DefaultResolver.LookupIPAddr(ctx, "golang2.org") + }() + go func() { + defer wg.Done() + _, err2 = DefaultResolver.LookupIPAddr(ctx, "golang2.org") + }() + time.Sleep(10 * time.Nanosecond) + close(timeoutHookGo) + wg.Wait() + checkErr(err1) + checkErr(err2) + cancel() +} + +func TestLookupNoData(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("not supported on plan9") + } + + mustHaveExternalNetwork(t) + + testLookupNoData(t, "default resolver") + + func() { + defer forceGoDNS()() + testLookupNoData(t, "forced go resolver") + }() + + func() { + defer forceCgoDNS()() + testLookupNoData(t, "forced cgo resolver") + }() +} + +func testLookupNoData(t *testing.T, prefix string) { + attempts := 0 + for { + // Domain that doesn't have any A/AAAA RRs, but has different one (in this case a TXT), + // so that it returns an empty response without any error codes (NXDOMAIN). + _, err := LookupHost("golang.rsc.io.") + if err == nil { + t.Errorf("%v: unexpected success", prefix) + return + } + + var dnsErr *DNSError + if errors.As(err, &dnsErr) { + succeeded := true + if !dnsErr.IsNotFound { + succeeded = false + t.Logf("%v: IsNotFound is set to false", prefix) + } + + if dnsErr.Err != errNoSuchHost.Error() { + succeeded = false + t.Logf("%v: error message is not equal to: %v", prefix, errNoSuchHost.Error()) + } + + if succeeded { + return + } + } + + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("%v: backoff %v after failure %v\n", prefix, dur, err) + time.Sleep(dur) + attempts++ + continue + } + + t.Errorf("%v: unexpected error: %v", prefix, err) + return + } +} + +func TestLookupPortNotFound(t *testing.T) { + allResolvers(t, func(t *testing.T) { + _, err := LookupPort("udp", "_-unknown-service-") + var dnsErr *DNSError + if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +// submissions service is only available through a tcp network, see: +// https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=submissions +var tcpOnlyService = func() string { + // plan9 does not have submissions service defined in the service database. + if runtime.GOOS == "plan9" { + return "https" + } + return "submissions" +}() + +func TestLookupPortDifferentNetwork(t *testing.T) { + allResolvers(t, func(t *testing.T) { + _, err := LookupPort("udp", tcpOnlyService) + var dnsErr *DNSError + if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestLookupPortEmptyNetworkString(t *testing.T) { + allResolvers(t, func(t *testing.T) { + _, err := LookupPort("", tcpOnlyService) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestLookupPortIPNetworkString(t *testing.T) { + allResolvers(t, func(t *testing.T) { + _, err := LookupPort("ip", tcpOnlyService) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func allResolvers(t *testing.T, f func(t *testing.T)) { + t.Run("default resolver", f) + t.Run("forced go resolver", func(t *testing.T) { + if fixup := forceGoDNS(); fixup != nil { + defer fixup() + f(t) + } + }) + t.Run("forced cgo resolver", func(t *testing.T) { + if fixup := forceCgoDNS(); fixup != nil { + defer fixup() + f(t) + } + }) +} + +func TestLookupNoSuchHost(t *testing.T) { + mustHaveExternalNetwork(t) + + const testNXDOMAIN = "invalid.invalid." + const testNODATA = "_ldap._tcp.google.com." + + tests := []struct { + name string + query func() error + }{ + { + name: "LookupCNAME NXDOMAIN", + query: func() error { + _, err := LookupCNAME(testNXDOMAIN) + return err + }, + }, + { + name: "LookupHost NXDOMAIN", + query: func() error { + _, err := LookupHost(testNXDOMAIN) + return err + }, + }, + { + name: "LookupHost NODATA", + query: func() error { + _, err := LookupHost(testNODATA) + return err + }, + }, + { + name: "LookupMX NXDOMAIN", + query: func() error { + _, err := LookupMX(testNXDOMAIN) + return err + }, + }, + { + name: "LookupMX NODATA", + query: func() error { + _, err := LookupMX(testNODATA) + return err + }, + }, + { + name: "LookupNS NXDOMAIN", + query: func() error { + _, err := LookupNS(testNXDOMAIN) + return err + }, + }, + { + name: "LookupNS NODATA", + query: func() error { + _, err := LookupNS(testNODATA) + return err + }, + }, + { + name: "LookupSRV NXDOMAIN", + query: func() error { + _, _, err := LookupSRV("unknown", "tcp", testNXDOMAIN) + return err + }, + }, + { + name: "LookupTXT NXDOMAIN", + query: func() error { + _, err := LookupTXT(testNXDOMAIN) + return err + }, + }, + { + name: "LookupTXT NODATA", + query: func() error { + _, err := LookupTXT(testNODATA) + return err + }, + }, + } + + for _, v := range tests { + t.Run(v.name, func(t *testing.T) { + allResolvers(t, func(t *testing.T) { + attempts := 0 + for { + err := v.query() + if err == nil { + t.Errorf("unexpected success") + return + } + if dnsErr, ok := err.(*DNSError); ok { + succeeded := true + if !dnsErr.IsNotFound { + succeeded = false + t.Log("IsNotFound is set to false") + } + if dnsErr.Err != errNoSuchHost.Error() { + succeeded = false + t.Logf("error message is not equal to: %v", errNoSuchHost.Error()) + } + if succeeded { + return + } + } + testenv.SkipFlakyNet(t) + if attempts < len(backoffDuration) { + dur := backoffDuration[attempts] + t.Logf("backoff %v after failure %v\n", dur, err) + time.Sleep(dur) + attempts++ + continue + } + t.Errorf("unexpected error: %v", err) + return + } + }) + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup_unix.go b/platform/dbops/binaries/go/go/src/net/lookup_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..382a2d44bb5cfdbc58a53b121875b98e7c7f28e7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup_unix.go @@ -0,0 +1,121 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 + +package net + +import ( + "context" + "internal/bytealg" + "sync" +) + +var onceReadProtocols sync.Once + +// readProtocols loads contents of /etc/protocols into protocols map +// for quick access. +func readProtocols() { + file, err := open("/etc/protocols") + if err != nil { + return + } + defer file.close() + + for line, ok := file.readLine(); ok; line, ok = file.readLine() { + // tcp 6 TCP # transmission control protocol + if i := bytealg.IndexByteString(line, '#'); i >= 0 { + line = line[0:i] + } + f := getFields(line) + if len(f) < 2 { + continue + } + if proto, _, ok := dtoi(f[1]); ok { + if _, ok := protocols[f[0]]; !ok { + protocols[f[0]] = proto + } + for _, alias := range f[2:] { + if _, ok := protocols[alias]; !ok { + protocols[alias] = proto + } + } + } + } +} + +// lookupProtocol looks up IP protocol name in /etc/protocols and +// returns correspondent protocol number. +func lookupProtocol(_ context.Context, name string) (int, error) { + onceReadProtocols.Do(readProtocols) + return lookupProtocolMap(name) +} + +func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) { + order, conf := systemConf().hostLookupOrder(r, host) + if order == hostLookupCgo { + return cgoLookupHost(ctx, host) + } + return r.goLookupHostOrder(ctx, host, order, conf) +} + +func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { + order, conf := systemConf().hostLookupOrder(r, host) + if order == hostLookupCgo { + return cgoLookupIP(ctx, network, host) + } + ips, _, err := r.goLookupIPCNAMEOrder(ctx, network, host, order, conf) + return ips, err +} + +func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int, error) { + // Port lookup is not a DNS operation. + // Prefer the cgo resolver if possible. + if !systemConf().mustUseGoResolver(r) { + port, err := cgoLookupPort(ctx, network, service) + if err != nil { + // Issue 18213: if cgo fails, first check to see whether we + // have the answer baked-in to the net package. + if port, err := goLookupPort(network, service); err == nil { + return port, nil + } + } + return port, err + } + return goLookupPort(network, service) +} + +func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) { + order, conf := systemConf().hostLookupOrder(r, name) + if order == hostLookupCgo { + if cname, err, ok := cgoLookupCNAME(ctx, name); ok { + return cname, err + } + } + return r.goLookupCNAME(ctx, name, order, conf) +} + +func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) { + return r.goLookupSRV(ctx, service, proto, name) +} + +func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) { + return r.goLookupMX(ctx, name) +} + +func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) { + return r.goLookupNS(ctx, name) +} + +func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) { + return r.goLookupTXT(ctx, name) +} + +func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error) { + order, conf := systemConf().addrLookupOrder(r, addr) + if order == hostLookupCgo { + return cgoLookupPTR(ctx, addr) + } + return r.goLookupPTR(ctx, addr, order, conf) +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup_windows.go b/platform/dbops/binaries/go/go/src/net/lookup_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..3048f3269b003e05c34cb049411f3db72ff746c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup_windows.go @@ -0,0 +1,471 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "internal/syscall/windows" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +// cgoAvailable set to true to indicate that the cgo resolver +// is available on Windows. Note that on Windows the cgo resolver +// does not actually use cgo. +const cgoAvailable = true + +const ( + _DNS_ERROR_RCODE_NAME_ERROR = syscall.Errno(9003) + _DNS_INFO_NO_RECORDS = syscall.Errno(9501) + + _WSAHOST_NOT_FOUND = syscall.Errno(11001) + _WSATRY_AGAIN = syscall.Errno(11002) + _WSATYPE_NOT_FOUND = syscall.Errno(10109) +) + +func winError(call string, err error) error { + switch err { + case _WSAHOST_NOT_FOUND, _DNS_ERROR_RCODE_NAME_ERROR, _DNS_INFO_NO_RECORDS: + return errNoSuchHost + } + return os.NewSyscallError(call, err) +} + +func getprotobyname(name string) (proto int, err error) { + p, err := syscall.GetProtoByName(name) + if err != nil { + return 0, winError("getprotobyname", err) + } + return int(p.Proto), nil +} + +// lookupProtocol looks up IP protocol name and returns correspondent protocol number. +func lookupProtocol(ctx context.Context, name string) (int, error) { + // GetProtoByName return value is stored in thread local storage. + // Start new os thread before the call to prevent races. + type result struct { + proto int + err error + } + ch := make(chan result) // unbuffered + go func() { + acquireThread() + defer releaseThread() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + proto, err := getprotobyname(name) + select { + case ch <- result{proto: proto, err: err}: + case <-ctx.Done(): + } + }() + select { + case r := <-ch: + if r.err != nil { + if proto, err := lookupProtocolMap(name); err == nil { + return proto, nil + } + + dnsError := &DNSError{Err: r.err.Error(), Name: name} + if r.err == errNoSuchHost { + dnsError.IsNotFound = true + } + r.err = dnsError + } + return r.proto, r.err + case <-ctx.Done(): + return 0, mapErr(ctx.Err()) + } +} + +func (r *Resolver) lookupHost(ctx context.Context, name string) ([]string, error) { + ips, err := r.lookupIP(ctx, "ip", name) + if err != nil { + return nil, err + } + addrs := make([]string, 0, len(ips)) + for _, ip := range ips { + addrs = append(addrs, ip.String()) + } + return addrs, nil +} + +func (r *Resolver) lookupIP(ctx context.Context, network, name string) ([]IPAddr, error) { + if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo { + return r.goLookupIP(ctx, network, name, order, conf) + } + + // TODO(bradfitz,brainman): use ctx more. See TODO below. + + var family int32 = syscall.AF_UNSPEC + switch ipVersion(network) { + case '4': + family = syscall.AF_INET + case '6': + family = syscall.AF_INET6 + } + + getaddr := func() ([]IPAddr, error) { + acquireThread() + defer releaseThread() + hints := syscall.AddrinfoW{ + Family: family, + Socktype: syscall.SOCK_STREAM, + Protocol: syscall.IPPROTO_IP, + } + var result *syscall.AddrinfoW + name16p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, &DNSError{Name: name, Err: err.Error()} + } + + dnsConf := getSystemDNSConfig() + start := time.Now() + + var e error + for i := 0; i < dnsConf.attempts; i++ { + e = syscall.GetAddrInfoW(name16p, nil, &hints, &result) + if e == nil || e != _WSATRY_AGAIN || time.Since(start) > dnsConf.timeout { + break + } + } + if e != nil { + err := winError("getaddrinfow", e) + dnsError := &DNSError{Err: err.Error(), Name: name} + if err == errNoSuchHost { + dnsError.IsNotFound = true + } + return nil, dnsError + } + defer syscall.FreeAddrInfoW(result) + addrs := make([]IPAddr, 0, 5) + for ; result != nil; result = result.Next { + addr := unsafe.Pointer(result.Addr) + switch result.Family { + case syscall.AF_INET: + a := (*syscall.RawSockaddrInet4)(addr).Addr + addrs = append(addrs, IPAddr{IP: copyIP(a[:])}) + case syscall.AF_INET6: + a := (*syscall.RawSockaddrInet6)(addr).Addr + zone := zoneCache.name(int((*syscall.RawSockaddrInet6)(addr).Scope_id)) + addrs = append(addrs, IPAddr{IP: copyIP(a[:]), Zone: zone}) + default: + return nil, &DNSError{Err: syscall.EWINDOWS.Error(), Name: name} + } + } + return addrs, nil + } + + type ret struct { + addrs []IPAddr + err error + } + + var ch chan ret + if ctx.Err() == nil { + ch = make(chan ret, 1) + go func() { + addr, err := getaddr() + ch <- ret{addrs: addr, err: err} + }() + } + + select { + case r := <-ch: + return r.addrs, r.err + case <-ctx.Done(): + // TODO(bradfitz,brainman): cancel the ongoing + // GetAddrInfoW? It would require conditionally using + // GetAddrInfoEx with lpOverlapped, which requires + // Windows 8 or newer. I guess we'll need oldLookupIP, + // newLookupIP, and newerLookUP. + // + // For now we just let it finish and write to the + // buffered channel. + return nil, &DNSError{ + Name: name, + Err: ctx.Err().Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } +} + +func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int, error) { + if systemConf().mustUseGoResolver(r) { + return lookupPortMap(network, service) + } + + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + + var hints syscall.AddrinfoW + + switch network { + case "ip": // no hints + case "tcp", "tcp4", "tcp6": + hints.Socktype = syscall.SOCK_STREAM + hints.Protocol = syscall.IPPROTO_TCP + case "udp", "udp4", "udp6": + hints.Socktype = syscall.SOCK_DGRAM + hints.Protocol = syscall.IPPROTO_UDP + default: + return 0, &DNSError{Err: "unknown network", Name: network + "/" + service} + } + + switch ipVersion(network) { + case '4': + hints.Family = syscall.AF_INET + case '6': + hints.Family = syscall.AF_INET6 + } + + var result *syscall.AddrinfoW + e := syscall.GetAddrInfoW(nil, syscall.StringToUTF16Ptr(service), &hints, &result) + if e != nil { + if port, err := lookupPortMap(network, service); err == nil { + return port, nil + } + + // The _WSATYPE_NOT_FOUND error is returned by GetAddrInfoW + // when the service name is unknown. We are also checking + // for _WSAHOST_NOT_FOUND here to match the cgo (unix) version + // cgo_unix.go (cgoLookupServicePort). + if e == _WSATYPE_NOT_FOUND || e == _WSAHOST_NOT_FOUND { + return 0, &DNSError{Err: "unknown port", Name: network + "/" + service, IsNotFound: true} + } + err := os.NewSyscallError("getaddrinfow", e) + return 0, &DNSError{Err: err.Error(), Name: network + "/" + service} + } + defer syscall.FreeAddrInfoW(result) + if result == nil { + return 0, &DNSError{Err: syscall.EINVAL.Error(), Name: network + "/" + service} + } + addr := unsafe.Pointer(result.Addr) + switch result.Family { + case syscall.AF_INET: + a := (*syscall.RawSockaddrInet4)(addr) + return int(syscall.Ntohs(a.Port)), nil + case syscall.AF_INET6: + a := (*syscall.RawSockaddrInet6)(addr) + return int(syscall.Ntohs(a.Port)), nil + } + return 0, &DNSError{Err: syscall.EINVAL.Error(), Name: network + "/" + service} +} + +func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) { + if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo { + return r.goLookupCNAME(ctx, name, order, conf) + } + + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + var rec *syscall.DNSRecord + e := syscall.DnsQuery(name, syscall.DNS_TYPE_CNAME, 0, nil, &rec, nil) + // windows returns DNS_INFO_NO_RECORDS if there are no CNAME-s + if errno, ok := e.(syscall.Errno); ok && errno == syscall.DNS_INFO_NO_RECORDS { + // if there are no aliases, the canonical name is the input name + return absDomainName(name), nil + } + if e != nil { + err := winError("dnsquery", e) + return "", &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + resolved := resolveCNAME(syscall.StringToUTF16Ptr(name), rec) + cname := windows.UTF16PtrToString(resolved) + return absDomainName(cname), nil +} + +func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupSRV(ctx, service, proto, name) + } + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + var target string + if service == "" && proto == "" { + target = name + } else { + target = "_" + service + "._" + proto + "." + name + } + var rec *syscall.DNSRecord + e := syscall.DnsQuery(target, syscall.DNS_TYPE_SRV, 0, nil, &rec, nil) + if e != nil { + err := winError("dnsquery", e) + return "", nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + srvs := make([]*SRV, 0, 10) + for _, p := range validRecs(rec, syscall.DNS_TYPE_SRV, target) { + v := (*syscall.DNSSRVData)(unsafe.Pointer(&p.Data[0])) + srvs = append(srvs, &SRV{absDomainName(syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Target))[:])), v.Port, v.Priority, v.Weight}) + } + byPriorityWeight(srvs).sort() + return absDomainName(target), srvs, nil +} + +func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupMX(ctx, name) + } + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + var rec *syscall.DNSRecord + e := syscall.DnsQuery(name, syscall.DNS_TYPE_MX, 0, nil, &rec, nil) + if e != nil { + err := winError("dnsquery", e) + return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + mxs := make([]*MX, 0, 10) + for _, p := range validRecs(rec, syscall.DNS_TYPE_MX, name) { + v := (*syscall.DNSMXData)(unsafe.Pointer(&p.Data[0])) + mxs = append(mxs, &MX{absDomainName(windows.UTF16PtrToString(v.NameExchange)), v.Preference}) + } + byPref(mxs).sort() + return mxs, nil +} + +func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupNS(ctx, name) + } + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + var rec *syscall.DNSRecord + e := syscall.DnsQuery(name, syscall.DNS_TYPE_NS, 0, nil, &rec, nil) + if e != nil { + err := winError("dnsquery", e) + return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + nss := make([]*NS, 0, 10) + for _, p := range validRecs(rec, syscall.DNS_TYPE_NS, name) { + v := (*syscall.DNSPTRData)(unsafe.Pointer(&p.Data[0])) + nss = append(nss, &NS{absDomainName(syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Host))[:]))}) + } + return nss, nil +} + +func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) { + if systemConf().mustUseGoResolver(r) { + return r.goLookupTXT(ctx, name) + } + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + var rec *syscall.DNSRecord + e := syscall.DnsQuery(name, syscall.DNS_TYPE_TEXT, 0, nil, &rec, nil) + if e != nil { + err := winError("dnsquery", e) + return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + txts := make([]string, 0, 10) + for _, p := range validRecs(rec, syscall.DNS_TYPE_TEXT, name) { + d := (*syscall.DNSTXTData)(unsafe.Pointer(&p.Data[0])) + s := "" + for _, v := range (*[1 << 10]*uint16)(unsafe.Pointer(&(d.StringArray[0])))[:d.StringCount:d.StringCount] { + s += windows.UTF16PtrToString(v) + } + txts = append(txts, s) + } + return txts, nil +} + +func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error) { + if order, conf := systemConf().addrLookupOrder(r, addr); order != hostLookupCgo { + return r.goLookupPTR(ctx, addr, order, conf) + } + + // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. + acquireThread() + defer releaseThread() + arpa, err := reverseaddr(addr) + if err != nil { + return nil, err + } + var rec *syscall.DNSRecord + e := syscall.DnsQuery(arpa, syscall.DNS_TYPE_PTR, 0, nil, &rec, nil) + if e != nil { + err := winError("dnsquery", e) + return nil, &DNSError{Err: err.Error(), Name: addr, IsNotFound: err == errNoSuchHost} + } + defer syscall.DnsRecordListFree(rec, 1) + + ptrs := make([]string, 0, 10) + for _, p := range validRecs(rec, syscall.DNS_TYPE_PTR, arpa) { + v := (*syscall.DNSPTRData)(unsafe.Pointer(&p.Data[0])) + ptrs = append(ptrs, absDomainName(windows.UTF16PtrToString(v.Host))) + } + return ptrs, nil +} + +const dnsSectionMask = 0x0003 + +// returns only results applicable to name and resolves CNAME entries. +func validRecs(r *syscall.DNSRecord, dnstype uint16, name string) []*syscall.DNSRecord { + cname := syscall.StringToUTF16Ptr(name) + if dnstype != syscall.DNS_TYPE_CNAME { + cname = resolveCNAME(cname, r) + } + rec := make([]*syscall.DNSRecord, 0, 10) + for p := r; p != nil; p = p.Next { + // in case of a local machine, DNS records are returned with DNSREC_QUESTION flag instead of DNS_ANSWER + if p.Dw&dnsSectionMask != syscall.DnsSectionAnswer && p.Dw&dnsSectionMask != syscall.DnsSectionQuestion { + continue + } + if p.Type != dnstype { + continue + } + if !syscall.DnsNameCompare(cname, p.Name) { + continue + } + rec = append(rec, p) + } + return rec +} + +// returns the last CNAME in chain. +func resolveCNAME(name *uint16, r *syscall.DNSRecord) *uint16 { + // limit cname resolving to 10 in case of an infinite CNAME loop +Cname: + for cnameloop := 0; cnameloop < 10; cnameloop++ { + for p := r; p != nil; p = p.Next { + if p.Dw&dnsSectionMask != syscall.DnsSectionAnswer { + continue + } + if p.Type != syscall.DNS_TYPE_CNAME { + continue + } + if !syscall.DnsNameCompare(name, p.Name) { + continue + } + name = (*syscall.DNSPTRData)(unsafe.Pointer(&r.Data[0])).Host + continue Cname + } + break + } + return name +} + +// concurrentThreadsLimit returns the number of threads we permit to +// run concurrently doing DNS lookups. +func concurrentThreadsLimit() int { + return 500 +} diff --git a/platform/dbops/binaries/go/go/src/net/lookup_windows_test.go b/platform/dbops/binaries/go/go/src/net/lookup_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c618a05bb44e44a22ff2d0382ab58a0a7b8e002b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/lookup_windows_test.go @@ -0,0 +1,340 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "internal/testenv" + "os/exec" + "reflect" + "regexp" + "sort" + "strings" + "syscall" + "testing" +) + +var nslookupTestServers = []string{"mail.golang.com", "gmail.com"} +var lookupTestIPs = []string{"8.8.8.8", "1.1.1.1"} + +func toJson(v any) string { + data, _ := json.Marshal(v) + return string(data) +} + +func testLookup(t *testing.T, fn func(*testing.T, *Resolver, string)) { + for _, def := range []bool{true, false} { + def := def + for _, server := range nslookupTestServers { + server := server + var name string + if def { + name = "default/" + } else { + name = "go/" + } + t.Run(name+server, func(t *testing.T) { + t.Parallel() + r := DefaultResolver + if !def { + r = &Resolver{PreferGo: true} + } + fn(t, r, server) + }) + } + } +} + +func TestNSLookupMX(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + testLookup(t, func(t *testing.T, r *Resolver, server string) { + mx, err := r.LookupMX(context.Background(), server) + if err != nil { + t.Fatal(err) + } + if len(mx) == 0 { + t.Fatal("no results") + } + expected, err := nslookupMX(server) + if err != nil { + t.Skipf("skipping failed nslookup %s test: %s", server, err) + } + sort.Sort(byPrefAndHost(expected)) + sort.Sort(byPrefAndHost(mx)) + if !reflect.DeepEqual(expected, mx) { + t.Errorf("different results %s:\texp:%v\tgot:%v", server, toJson(expected), toJson(mx)) + } + }) +} + +func TestNSLookupCNAME(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + testLookup(t, func(t *testing.T, r *Resolver, server string) { + cname, err := r.LookupCNAME(context.Background(), server) + if err != nil { + t.Fatalf("failed %s: %s", server, err) + } + if cname == "" { + t.Fatalf("no result %s", server) + } + expected, err := nslookupCNAME(server) + if err != nil { + t.Skipf("skipping failed nslookup %s test: %s", server, err) + } + if expected != cname { + t.Errorf("different results %s:\texp:%v\tgot:%v", server, expected, cname) + } + }) +} + +func TestNSLookupNS(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + testLookup(t, func(t *testing.T, r *Resolver, server string) { + ns, err := r.LookupNS(context.Background(), server) + if err != nil { + t.Fatalf("failed %s: %s", server, err) + } + if len(ns) == 0 { + t.Fatal("no results") + } + expected, err := nslookupNS(server) + if err != nil { + t.Skipf("skipping failed nslookup %s test: %s", server, err) + } + sort.Sort(byHost(expected)) + sort.Sort(byHost(ns)) + if !reflect.DeepEqual(expected, ns) { + t.Errorf("different results %s:\texp:%v\tgot:%v", toJson(server), toJson(expected), ns) + } + }) +} + +func TestNSLookupTXT(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + testLookup(t, func(t *testing.T, r *Resolver, server string) { + txt, err := r.LookupTXT(context.Background(), server) + if err != nil { + t.Fatalf("failed %s: %s", server, err) + } + if len(txt) == 0 { + t.Fatalf("no results") + } + expected, err := nslookupTXT(server) + if err != nil { + t.Skipf("skipping failed nslookup %s test: %s", server, err) + } + sort.Strings(expected) + sort.Strings(txt) + if !reflect.DeepEqual(expected, txt) { + t.Errorf("different results %s:\texp:%v\tgot:%v", server, toJson(expected), toJson(txt)) + } + }) +} + +func TestLookupLocalPTR(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + addr, err := localIP() + if err != nil { + t.Errorf("failed to get local ip: %s", err) + } + names, err := LookupAddr(addr.String()) + if err != nil { + t.Errorf("failed %s: %s", addr, err) + } + if len(names) == 0 { + t.Errorf("no results") + } + expected, err := lookupPTR(addr.String()) + if err != nil { + t.Skipf("skipping failed lookup %s test: %s", addr.String(), err) + } + sort.Strings(expected) + sort.Strings(names) + if !reflect.DeepEqual(expected, names) { + t.Errorf("different results %s:\texp:%v\tgot:%v", addr, toJson(expected), toJson(names)) + } +} + +func TestLookupPTR(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + for _, addr := range lookupTestIPs { + names, err := LookupAddr(addr) + if err != nil { + // The DNSError type stores the error as a string, so it cannot wrap the + // original error code and we cannot check for it here. However, we can at + // least use its error string to identify the correct localized text for + // the error to skip. + var DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 + if strings.HasSuffix(err.Error(), DNS_ERROR_RCODE_SERVER_FAILURE.Error()) { + testenv.SkipFlaky(t, 38111) + } + t.Errorf("failed %s: %s", addr, err) + } + if len(names) == 0 { + t.Errorf("no results") + } + expected, err := lookupPTR(addr) + if err != nil { + t.Logf("skipping failed lookup %s test: %s", addr, err) + continue + } + sort.Strings(expected) + sort.Strings(names) + if !reflect.DeepEqual(expected, names) { + t.Errorf("different results %s:\texp:%v\tgot:%v", addr, toJson(expected), toJson(names)) + } + } +} + +type byPrefAndHost []*MX + +func (s byPrefAndHost) Len() int { return len(s) } +func (s byPrefAndHost) Less(i, j int) bool { + if s[i].Pref != s[j].Pref { + return s[i].Pref < s[j].Pref + } + return s[i].Host < s[j].Host +} +func (s byPrefAndHost) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byHost []*NS + +func (s byHost) Len() int { return len(s) } +func (s byHost) Less(i, j int) bool { return s[i].Host < s[j].Host } +func (s byHost) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func nslookup(qtype, name string) (string, error) { + var out strings.Builder + var err strings.Builder + cmd := exec.Command("nslookup", "-querytype="+qtype, name) + cmd.Stdout = &out + cmd.Stderr = &err + if err := cmd.Run(); err != nil { + return "", err + } + r := strings.ReplaceAll(out.String(), "\r\n", "\n") + // nslookup stderr output contains also debug information such as + // "Non-authoritative answer" and it doesn't return the correct errcode + if strings.Contains(err.String(), "can't find") { + return r, errors.New(err.String()) + } + return r, nil +} + +func nslookupMX(name string) (mx []*MX, err error) { + var r string + if r, err = nslookup("mx", name); err != nil { + return + } + mx = make([]*MX, 0, 10) + // linux nslookup syntax + // golang.org mail exchanger = 2 alt1.aspmx.l.google.com. + rx := regexp.MustCompile(`(?m)^([a-z0-9.\-]+)\s+mail exchanger\s*=\s*([0-9]+)\s*([a-z0-9.\-]+)$`) + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + pref, _, _ := dtoi(ans[2]) + mx = append(mx, &MX{absDomainName(ans[3]), uint16(pref)}) + } + // windows nslookup syntax + // gmail.com MX preference = 30, mail exchanger = alt3.gmail-smtp-in.l.google.com + rx = regexp.MustCompile(`(?m)^([a-z0-9.\-]+)\s+MX preference\s*=\s*([0-9]+)\s*,\s*mail exchanger\s*=\s*([a-z0-9.\-]+)$`) + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + pref, _, _ := dtoi(ans[2]) + mx = append(mx, &MX{absDomainName(ans[3]), uint16(pref)}) + } + return +} + +func nslookupNS(name string) (ns []*NS, err error) { + var r string + if r, err = nslookup("ns", name); err != nil { + return + } + ns = make([]*NS, 0, 10) + // golang.org nameserver = ns1.google.com. + rx := regexp.MustCompile(`(?m)^([a-z0-9.\-]+)\s+nameserver\s*=\s*([a-z0-9.\-]+)$`) + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + ns = append(ns, &NS{absDomainName(ans[2])}) + } + return +} + +func nslookupCNAME(name string) (cname string, err error) { + var r string + if r, err = nslookup("cname", name); err != nil { + return + } + // mail.golang.com canonical name = golang.org. + rx := regexp.MustCompile(`(?m)^([a-z0-9.\-]+)\s+canonical name\s*=\s*([a-z0-9.\-]+)$`) + // assumes the last CNAME is the correct one + last := name + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + last = ans[2] + } + return absDomainName(last), nil +} + +func nslookupTXT(name string) (txt []string, err error) { + var r string + if r, err = nslookup("txt", name); err != nil { + return + } + txt = make([]string, 0, 10) + // linux + // golang.org text = "v=spf1 redirect=_spf.google.com" + + // windows + // golang.org text = + // + // "v=spf1 redirect=_spf.google.com" + rx := regexp.MustCompile(`(?m)^([a-z0-9.\-]+)\s+text\s*=\s*"(.*)"$`) + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + txt = append(txt, ans[2]) + } + return +} + +func ping(name string) (string, error) { + cmd := exec.Command("ping", "-n", "1", "-a", name) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("%v: %v", err, string(stdoutStderr)) + } + r := strings.ReplaceAll(string(stdoutStderr), "\r\n", "\n") + return r, nil +} + +func lookupPTR(name string) (ptr []string, err error) { + var r string + if r, err = ping(name); err != nil { + return + } + ptr = make([]string, 0, 10) + rx := regexp.MustCompile(`(?m)^Pinging\s+([a-zA-Z0-9.\-]+)\s+\[.*$`) + for _, ans := range rx.FindAllStringSubmatch(r, -1) { + ptr = append(ptr, absDomainName(ans[1])) + } + return +} + +func localIP() (ip IP, err error) { + conn, err := Dial("udp", "golang.org:80") + if err != nil { + return nil, err + } + defer conn.Close() + + localAddr := conn.LocalAddr().(*UDPAddr) + + return localAddr.IP, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/mac.go b/platform/dbops/binaries/go/go/src/net/mac.go new file mode 100644 index 0000000000000000000000000000000000000000..53d5b2dbf596b43b8b05b25169bfce8b6705cb39 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mac.go @@ -0,0 +1,86 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +const hexDigit = "0123456789abcdef" + +// A HardwareAddr represents a physical hardware address. +type HardwareAddr []byte + +func (a HardwareAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +// ParseMAC parses s as an IEEE 802 MAC-48, EUI-48, EUI-64, or a 20-octet +// IP over InfiniBand link-layer address using one of the following formats: +// +// 00:00:5e:00:53:01 +// 02:00:5e:10:00:00:00:01 +// 00:00:00:00:fe:80:00:00:00:00:00:00:02:00:5e:10:00:00:00:01 +// 00-00-5e-00-53-01 +// 02-00-5e-10-00-00-00-01 +// 00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01 +// 0000.5e00.5301 +// 0200.5e10.0000.0001 +// 0000.0000.fe80.0000.0000.0000.0200.5e10.0000.0001 +func ParseMAC(s string) (hw HardwareAddr, err error) { + if len(s) < 14 { + goto error + } + + if s[2] == ':' || s[2] == '-' { + if (len(s)+1)%3 != 0 { + goto error + } + n := (len(s) + 1) / 3 + if n != 6 && n != 8 && n != 20 { + goto error + } + hw = make(HardwareAddr, n) + for x, i := 0, 0; i < n; i++ { + var ok bool + if hw[i], ok = xtoi2(s[x:], s[2]); !ok { + goto error + } + x += 3 + } + } else if s[4] == '.' { + if (len(s)+1)%5 != 0 { + goto error + } + n := 2 * (len(s) + 1) / 5 + if n != 6 && n != 8 && n != 20 { + goto error + } + hw = make(HardwareAddr, n) + for x, i := 0, 0; i < n; i += 2 { + var ok bool + if hw[i], ok = xtoi2(s[x:x+2], 0); !ok { + goto error + } + if hw[i+1], ok = xtoi2(s[x+2:], s[4]); !ok { + goto error + } + x += 5 + } + } else { + goto error + } + return hw, nil + +error: + return nil, &AddrError{Err: "invalid MAC address", Addr: s} +} diff --git a/platform/dbops/binaries/go/go/src/net/mac_test.go b/platform/dbops/binaries/go/go/src/net/mac_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cad884fcf5d42df96fefd720e63f4823ae86c388 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mac_test.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "reflect" + "strings" + "testing" +) + +var parseMACTests = []struct { + in string + out HardwareAddr + err string +}{ + // See RFC 7042, Section 2.1.1. + {"00:00:5e:00:53:01", HardwareAddr{0x00, 0x00, 0x5e, 0x00, 0x53, 0x01}, ""}, + {"00-00-5e-00-53-01", HardwareAddr{0x00, 0x00, 0x5e, 0x00, 0x53, 0x01}, ""}, + {"0000.5e00.5301", HardwareAddr{0x00, 0x00, 0x5e, 0x00, 0x53, 0x01}, ""}, + + // See RFC 7042, Section 2.2.2. + {"02:00:5e:10:00:00:00:01", HardwareAddr{0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01}, ""}, + {"02-00-5e-10-00-00-00-01", HardwareAddr{0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01}, ""}, + {"0200.5e10.0000.0001", HardwareAddr{0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01}, ""}, + + // See RFC 4391, Section 9.1.1. + { + "00:00:00:00:fe:80:00:00:00:00:00:00:02:00:5e:10:00:00:00:01", + HardwareAddr{ + 0x00, 0x00, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01, + }, + "", + }, + { + "00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01", + HardwareAddr{ + 0x00, 0x00, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01, + }, + "", + }, + { + "0000.0000.fe80.0000.0000.0000.0200.5e10.0000.0001", + HardwareAddr{ + 0x00, 0x00, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x5e, 0x10, 0x00, 0x00, 0x00, 0x01, + }, + "", + }, + + {"ab:cd:ef:AB:CD:EF", HardwareAddr{0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef}, ""}, + {"ab:cd:ef:AB:CD:EF:ab:cd", HardwareAddr{0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd}, ""}, + { + "ab:cd:ef:AB:CD:EF:ab:cd:ef:AB:CD:EF:ab:cd:ef:AB:CD:EF:ab:cd", + HardwareAddr{ + 0xab, 0xcd, 0xef, 0xab, + 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, + 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, + }, + "", + }, + + {"01.02.03.04.05.06", nil, "invalid MAC address"}, + {"01:02:03:04:05:06:", nil, "invalid MAC address"}, + {"x1:02:03:04:05:06", nil, "invalid MAC address"}, + {"01002:03:04:05:06", nil, "invalid MAC address"}, + {"01:02003:04:05:06", nil, "invalid MAC address"}, + {"01:02:03004:05:06", nil, "invalid MAC address"}, + {"01:02:03:04005:06", nil, "invalid MAC address"}, + {"01:02:03:04:05006", nil, "invalid MAC address"}, + {"01-02:03:04:05:06", nil, "invalid MAC address"}, + {"01:02-03-04-05-06", nil, "invalid MAC address"}, + {"0123:4567:89AF", nil, "invalid MAC address"}, + {"0123-4567-89AF", nil, "invalid MAC address"}, +} + +func TestParseMAC(t *testing.T) { + match := func(err error, s string) bool { + if s == "" { + return err == nil + } + return err != nil && strings.Contains(err.Error(), s) + } + + for i, tt := range parseMACTests { + out, err := ParseMAC(tt.in) + if !reflect.DeepEqual(out, tt.out) || !match(err, tt.err) { + t.Errorf("ParseMAC(%q) = %v, %v, want %v, %v", tt.in, out, err, tt.out, tt.err) + } + if tt.err == "" { + // Verify that serialization works too, and that it round-trips. + s := out.String() + out2, err := ParseMAC(s) + if err != nil { + t.Errorf("%d. ParseMAC(%q) = %v", i, s, err) + continue + } + if !reflect.DeepEqual(out2, out) { + t.Errorf("%d. ParseMAC(%q) = %v, want %v", i, s, out2, out) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/main_cloexec_test.go b/platform/dbops/binaries/go/go/src/net/main_cloexec_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6ea99ad6469cc954b8e4489554b37fdd269a3b2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_cloexec_test.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package net + +import "internal/poll" + +func init() { + extraTestHookInstallers = append(extraTestHookInstallers, installAccept4TestHook) + extraTestHookUninstallers = append(extraTestHookUninstallers, uninstallAccept4TestHook) +} + +var ( + // Placeholders for saving original socket system calls. + origAccept4 = poll.Accept4Func +) + +func installAccept4TestHook() { + poll.Accept4Func = sw.Accept4 +} + +func uninstallAccept4TestHook() { + poll.Accept4Func = origAccept4 +} diff --git a/platform/dbops/binaries/go/go/src/net/main_conf_test.go b/platform/dbops/binaries/go/go/src/net/main_conf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..307ff5dd8c47cb2d38492210fa38ad4be67bed4a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_conf_test.go @@ -0,0 +1,59 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package net + +import "testing" + +// forceGoDNS forces the resolver configuration to use the pure Go resolver +// and returns a fixup function to restore the old settings. +func forceGoDNS() func() { + c := systemConf() + oldGo := c.netGo + oldCgo := c.netCgo + fixup := func() { + c.netGo = oldGo + c.netCgo = oldCgo + } + c.netGo = true + c.netCgo = false + return fixup +} + +// forceCgoDNS forces the resolver configuration to use the cgo resolver +// and returns a fixup function to restore the old settings. +// (On non-Unix systems forceCgoDNS returns nil.) +func forceCgoDNS() func() { + c := systemConf() + oldGo := c.netGo + oldCgo := c.netCgo + fixup := func() { + c.netGo = oldGo + c.netCgo = oldCgo + } + c.netGo = false + c.netCgo = true + return fixup +} + +func TestForceCgoDNS(t *testing.T) { + if !cgoAvailable { + t.Skip("cgo resolver not available") + } + defer forceCgoDNS()() + order, _ := systemConf().hostLookupOrder(nil, "go.dev") + if order != hostLookupCgo { + t.Fatalf("hostLookupOrder returned: %v, want cgo", order) + } +} + +func TestForceGoDNS(t *testing.T) { + defer forceGoDNS()() + order, _ := systemConf().hostLookupOrder(nil, "go.dev") + if order == hostLookupCgo { + t.Fatalf("hostLookupOrder returned: %v, want go resolver order", order) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/main_noconf_test.go b/platform/dbops/binaries/go/go/src/net/main_noconf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cdd7c5480566cdf5c93133a8cd8fb540aa3d135f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_noconf_test.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package net + +import "runtime" + +// See main_conf_test.go for what these (don't) do. +func forceGoDNS() func() { + switch runtime.GOOS { + case "plan9": + return func() {} + default: + return nil + } +} + +// See main_conf_test.go for what these (don't) do. +func forceCgoDNS() func() { return nil } diff --git a/platform/dbops/binaries/go/go/src/net/main_plan9_test.go b/platform/dbops/binaries/go/go/src/net/main_plan9_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2bc5be88be6481b815b422cdfdde60882e60210f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_plan9_test.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +func installTestHooks() {} + +func uninstallTestHooks() {} + +// forceCloseSockets must be called only from TestMain. +func forceCloseSockets() {} + +func enableSocketConnect() {} + +func disableSocketConnect(network string) {} diff --git a/platform/dbops/binaries/go/go/src/net/main_posix_test.go b/platform/dbops/binaries/go/go/src/net/main_posix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..24a2a556605f738a6dd91705123a68832df49ac8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_posix_test.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package net + +import ( + "net/internal/socktest" + "strings" + "syscall" +) + +func enableSocketConnect() { + sw.Set(socktest.FilterConnect, nil) +} + +func disableSocketConnect(network string) { + net, _, _ := strings.Cut(network, ":") + sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) { + switch net { + case "tcp4": + if so.Cookie.Family() == syscall.AF_INET && so.Cookie.Type() == syscall.SOCK_STREAM { + return nil, syscall.EHOSTUNREACH + } + case "udp4": + if so.Cookie.Family() == syscall.AF_INET && so.Cookie.Type() == syscall.SOCK_DGRAM { + return nil, syscall.EHOSTUNREACH + } + case "ip4": + if so.Cookie.Family() == syscall.AF_INET && so.Cookie.Type() == syscall.SOCK_RAW { + return nil, syscall.EHOSTUNREACH + } + case "tcp6": + if so.Cookie.Family() == syscall.AF_INET6 && so.Cookie.Type() == syscall.SOCK_STREAM { + return nil, syscall.EHOSTUNREACH + } + case "udp6": + if so.Cookie.Family() == syscall.AF_INET6 && so.Cookie.Type() == syscall.SOCK_DGRAM { + return nil, syscall.EHOSTUNREACH + } + case "ip6": + if so.Cookie.Family() == syscall.AF_INET6 && so.Cookie.Type() == syscall.SOCK_RAW { + return nil, syscall.EHOSTUNREACH + } + } + return nil, nil + }) +} diff --git a/platform/dbops/binaries/go/go/src/net/main_test.go b/platform/dbops/binaries/go/go/src/net/main_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7dc1e3ee0dadabf4215457e57eddca6c7bf31359 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_test.go @@ -0,0 +1,222 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "flag" + "fmt" + "net/internal/socktest" + "os" + "runtime" + "sort" + "strings" + "sync" + "testing" + "time" +) + +var ( + sw socktest.Switch + + // uninstallTestHooks runs just before a run of benchmarks. + testHookUninstaller sync.Once +) + +var ( + testTCPBig = flag.Bool("tcpbig", false, "whether to test massive size of data per read or write call on TCP connection") + + testDNSFlood = flag.Bool("dnsflood", false, "whether to test DNS query flooding") + + // If external IPv4 connectivity exists, we can try dialing + // non-node/interface local scope IPv4 addresses. + // On Windows, Lookup APIs may not return IPv4-related + // resource records when a node has no external IPv4 + // connectivity. + testIPv4 = flag.Bool("ipv4", true, "assume external IPv4 connectivity exists") + + // If external IPv6 connectivity exists, we can try dialing + // non-node/interface local scope IPv6 addresses. + // On Windows, Lookup APIs may not return IPv6-related + // resource records when a node has no external IPv6 + // connectivity. + testIPv6 = flag.Bool("ipv6", false, "assume external IPv6 connectivity exists") +) + +func TestMain(m *testing.M) { + setupTestData() + installTestHooks() + + st := m.Run() + + testHookUninstaller.Do(uninstallTestHooks) + if testing.Verbose() { + printRunningGoroutines() + printInflightSockets() + printSocketStats() + } + forceCloseSockets() + os.Exit(st) +} + +// mustSetDeadline calls the bound method m to set a deadline on a Conn. +// If the call fails, mustSetDeadline skips t if the current GOOS is believed +// not to support deadlines, or fails the test otherwise. +func mustSetDeadline(t testing.TB, m func(time.Time) error, d time.Duration) { + err := m(time.Now().Add(d)) + if err != nil { + t.Helper() + if runtime.GOOS == "plan9" { + t.Skipf("skipping: %s does not support deadlines", runtime.GOOS) + } + t.Fatal(err) + } +} + +type ipv6LinkLocalUnicastTest struct { + network, address string + nameLookup bool +} + +var ( + ipv6LinkLocalUnicastTCPTests []ipv6LinkLocalUnicastTest + ipv6LinkLocalUnicastUDPTests []ipv6LinkLocalUnicastTest +) + +func setupTestData() { + if supportsIPv4() { + resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{ + {"tcp", "localhost:1", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 1}, nil}, + {"tcp4", "localhost:2", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 2}, nil}, + }...) + resolveUDPAddrTests = append(resolveUDPAddrTests, []resolveUDPAddrTest{ + {"udp", "localhost:1", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 1}, nil}, + {"udp4", "localhost:2", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 2}, nil}, + }...) + resolveIPAddrTests = append(resolveIPAddrTests, []resolveIPAddrTest{ + {"ip", "localhost", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, + {"ip4", "localhost", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil}, + }...) + } + + if supportsIPv6() { + resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp6", "localhost:3", &TCPAddr{IP: IPv6loopback, Port: 3}, nil}) + resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp6", "localhost:3", &UDPAddr{IP: IPv6loopback, Port: 3}, nil}) + resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip6", "localhost", &IPAddr{IP: IPv6loopback}, nil}) + + // Issue 20911: don't return IPv4 addresses for + // Resolve*Addr calls of the IPv6 unspecified address. + resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp", "[::]:4", &TCPAddr{IP: IPv6unspecified, Port: 4}, nil}) + resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp", "[::]:4", &UDPAddr{IP: IPv6unspecified, Port: 4}, nil}) + resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip", "::", &IPAddr{IP: IPv6unspecified}, nil}) + } + + ifi := loopbackInterface() + if ifi != nil { + index := fmt.Sprintf("%v", ifi.Index) + resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{ + {"tcp6", "[fe80::1%" + ifi.Name + "]:1", &TCPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil}, + {"tcp6", "[fe80::1%" + index + "]:2", &TCPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil}, + }...) + resolveUDPAddrTests = append(resolveUDPAddrTests, []resolveUDPAddrTest{ + {"udp6", "[fe80::1%" + ifi.Name + "]:1", &UDPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil}, + {"udp6", "[fe80::1%" + index + "]:2", &UDPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil}, + }...) + resolveIPAddrTests = append(resolveIPAddrTests, []resolveIPAddrTest{ + {"ip6", "fe80::1%" + ifi.Name, &IPAddr{IP: ParseIP("fe80::1"), Zone: zoneCache.name(ifi.Index)}, nil}, + {"ip6", "fe80::1%" + index, &IPAddr{IP: ParseIP("fe80::1"), Zone: index}, nil}, + }...) + } + + addr := ipv6LinkLocalUnicastAddr(ifi) + if addr != "" { + if runtime.GOOS != "dragonfly" { + ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{ + {"tcp", "[" + addr + "%" + ifi.Name + "]:0", false}, + }...) + ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{ + {"udp", "[" + addr + "%" + ifi.Name + "]:0", false}, + }...) + } + ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{ + {"tcp6", "[" + addr + "%" + ifi.Name + "]:0", false}, + }...) + ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{ + {"udp6", "[" + addr + "%" + ifi.Name + "]:0", false}, + }...) + switch runtime.GOOS { + case "darwin", "ios", "dragonfly", "freebsd", "openbsd", "netbsd": + ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{ + {"tcp", "[localhost%" + ifi.Name + "]:0", true}, + {"tcp6", "[localhost%" + ifi.Name + "]:0", true}, + }...) + ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{ + {"udp", "[localhost%" + ifi.Name + "]:0", true}, + {"udp6", "[localhost%" + ifi.Name + "]:0", true}, + }...) + case "linux": + ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{ + {"tcp", "[ip6-localhost%" + ifi.Name + "]:0", true}, + {"tcp6", "[ip6-localhost%" + ifi.Name + "]:0", true}, + }...) + ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{ + {"udp", "[ip6-localhost%" + ifi.Name + "]:0", true}, + {"udp6", "[ip6-localhost%" + ifi.Name + "]:0", true}, + }...) + } + } +} + +func printRunningGoroutines() { + gss := runningGoroutines() + if len(gss) == 0 { + return + } + fmt.Fprintf(os.Stderr, "Running goroutines:\n") + for _, gs := range gss { + fmt.Fprintf(os.Stderr, "%v\n", gs) + } + fmt.Fprintf(os.Stderr, "\n") +} + +// runningGoroutines returns a list of remaining goroutines. +func runningGoroutines() []string { + var gss []string + b := make([]byte, 2<<20) + b = b[:runtime.Stack(b, true)] + for _, s := range strings.Split(string(b), "\n\n") { + _, stack, _ := strings.Cut(s, "\n") + stack = strings.TrimSpace(stack) + if !strings.Contains(stack, "created by net") { + continue + } + gss = append(gss, stack) + } + sort.Strings(gss) + return gss +} + +func printInflightSockets() { + sos := sw.Sockets() + if len(sos) == 0 { + return + } + fmt.Fprintf(os.Stderr, "Inflight sockets:\n") + for s, so := range sos { + fmt.Fprintf(os.Stderr, "%v: %v\n", s, so) + } + fmt.Fprintf(os.Stderr, "\n") +} + +func printSocketStats() { + sts := sw.Stats() + if len(sts) == 0 { + return + } + fmt.Fprintf(os.Stderr, "Socket statistical information:\n") + for _, st := range sts { + fmt.Fprintf(os.Stderr, "%v\n", st) + } + fmt.Fprintf(os.Stderr, "\n") +} diff --git a/platform/dbops/binaries/go/go/src/net/main_unix_test.go b/platform/dbops/binaries/go/go/src/net/main_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e7a5b4fe9ad4eface4f990349e5e9abfc80099be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_unix_test.go @@ -0,0 +1,55 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import "internal/poll" + +var ( + // Placeholders for saving original socket system calls. + origSocket = socketFunc + origClose = poll.CloseFunc + origConnect = connectFunc + origListen = listenFunc + origAccept = poll.AcceptFunc + origGetsockoptInt = getsockoptIntFunc + + extraTestHookInstallers []func() + extraTestHookUninstallers []func() +) + +func installTestHooks() { + socketFunc = sw.Socket + poll.CloseFunc = sw.Close + connectFunc = sw.Connect + listenFunc = sw.Listen + poll.AcceptFunc = sw.Accept + getsockoptIntFunc = sw.GetsockoptInt + + for _, fn := range extraTestHookInstallers { + fn() + } +} + +func uninstallTestHooks() { + socketFunc = origSocket + poll.CloseFunc = origClose + connectFunc = origConnect + listenFunc = origListen + poll.AcceptFunc = origAccept + getsockoptIntFunc = origGetsockoptInt + + for _, fn := range extraTestHookUninstallers { + fn() + } +} + +// forceCloseSockets must be called only from TestMain. +func forceCloseSockets() { + for s := range sw.Sockets() { + poll.CloseFunc(s) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/main_wasm_test.go b/platform/dbops/binaries/go/go/src/net/main_wasm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b8196bb283dc1095923189bbcfef18cff67fcc43 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_wasm_test.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 || js + +package net + +func installTestHooks() {} + +func uninstallTestHooks() {} + +func forceCloseSockets() {} diff --git a/platform/dbops/binaries/go/go/src/net/main_windows_test.go b/platform/dbops/binaries/go/go/src/net/main_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bc024c0bbd82d01939bc0fd191739875a4423f4f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/main_windows_test.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import "internal/poll" + +var ( + // Placeholders for saving original socket system calls. + origWSASocket = wsaSocketFunc + origClosesocket = poll.CloseFunc + origConnect = connectFunc + origConnectEx = poll.ConnectExFunc + origListen = listenFunc + origAccept = poll.AcceptFunc +) + +func installTestHooks() { + wsaSocketFunc = sw.WSASocket + poll.CloseFunc = sw.Closesocket + connectFunc = sw.Connect + poll.ConnectExFunc = sw.ConnectEx + listenFunc = sw.Listen + poll.AcceptFunc = sw.AcceptEx +} + +func uninstallTestHooks() { + wsaSocketFunc = origWSASocket + poll.CloseFunc = origClosesocket + connectFunc = origConnect + poll.ConnectExFunc = origConnectEx + listenFunc = origListen + poll.AcceptFunc = origAccept +} + +// forceCloseSockets must be called only from TestMain. +func forceCloseSockets() { + for s := range sw.Sockets() { + poll.CloseFunc(s) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/mockserver_test.go b/platform/dbops/binaries/go/go/src/net/mockserver_test.go new file mode 100644 index 0000000000000000000000000000000000000000..46b2a57321107b4d9ea2c8aeec892a6ea61e65eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mockserver_test.go @@ -0,0 +1,508 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + "time" +) + +// testUnixAddr uses os.MkdirTemp to get a name that is unique. +func testUnixAddr(t testing.TB) string { + // Pass an empty pattern to get a directory name that is as short as possible. + // If we end up with a name longer than the sun_path field in the sockaddr_un + // struct, we won't be able to make the syscall to open the socket. + d, err := os.MkdirTemp("", "") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.RemoveAll(d); err != nil { + t.Error(err) + } + }) + return filepath.Join(d, "sock") +} + +func newLocalListener(t testing.TB, network string, lcOpt ...*ListenConfig) Listener { + var lc *ListenConfig + switch len(lcOpt) { + case 0: + lc = new(ListenConfig) + case 1: + lc = lcOpt[0] + default: + t.Helper() + t.Fatal("too many ListenConfigs passed to newLocalListener: want 0 or 1") + } + + listen := func(net, addr string) Listener { + ln, err := lc.Listen(context.Background(), net, addr) + if err != nil { + t.Helper() + t.Fatal(err) + } + return ln + } + + switch network { + case "tcp": + if supportsIPv4() { + if !supportsIPv6() { + return listen("tcp4", "127.0.0.1:0") + } + if ln, err := Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln + } + } + if supportsIPv6() { + return listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4() { + return listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6() { + return listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return listen(network, testUnixAddr(t)) + } + + t.Helper() + t.Fatalf("%s is not supported", network) + return nil +} + +func newDualStackListener() (lns []*TCPListener, err error) { + var args = []struct { + network string + TCPAddr + }{ + {"tcp4", TCPAddr{IP: IPv4(127, 0, 0, 1)}}, + {"tcp6", TCPAddr{IP: IPv6loopback}}, + } + for i := 0; i < 64; i++ { + var port int + var lns []*TCPListener + for _, arg := range args { + arg.TCPAddr.Port = port + ln, err := ListenTCP(arg.network, &arg.TCPAddr) + if err != nil { + continue + } + port = ln.Addr().(*TCPAddr).Port + lns = append(lns, ln) + } + if len(lns) != len(args) { + for _, ln := range lns { + ln.Close() + } + continue + } + return lns, nil + } + return nil, errors.New("no dualstack port available") +} + +type localServer struct { + lnmu sync.RWMutex + Listener + done chan bool // signal that indicates server stopped + cl []Conn // accepted connection list +} + +func (ls *localServer) buildup(handler func(*localServer, Listener)) error { + go func() { + handler(ls, ls.Listener) + close(ls.done) + }() + return nil +} + +func (ls *localServer) teardown() error { + ls.lnmu.Lock() + defer ls.lnmu.Unlock() + if ls.Listener != nil { + network := ls.Listener.Addr().Network() + address := ls.Listener.Addr().String() + ls.Listener.Close() + for _, c := range ls.cl { + if err := c.Close(); err != nil { + return err + } + } + <-ls.done + ls.Listener = nil + switch network { + case "unix", "unixpacket": + os.Remove(address) + } + } + return nil +} + +func newLocalServer(t testing.TB, network string) *localServer { + t.Helper() + ln := newLocalListener(t, network) + return &localServer{Listener: ln, done: make(chan bool)} +} + +type streamListener struct { + network, address string + Listener + done chan bool // signal that indicates server stopped +} + +func (sl *streamListener) newLocalServer() *localServer { + return &localServer{Listener: sl.Listener, done: make(chan bool)} +} + +type dualStackServer struct { + lnmu sync.RWMutex + lns []streamListener + port string + + cmu sync.RWMutex + cs []Conn // established connections at the passive open side +} + +func (dss *dualStackServer) buildup(handler func(*dualStackServer, Listener)) error { + for i := range dss.lns { + go func(i int) { + handler(dss, dss.lns[i].Listener) + close(dss.lns[i].done) + }(i) + } + return nil +} + +func (dss *dualStackServer) teardownNetwork(network string) error { + dss.lnmu.Lock() + for i := range dss.lns { + if network == dss.lns[i].network && dss.lns[i].Listener != nil { + dss.lns[i].Listener.Close() + <-dss.lns[i].done + dss.lns[i].Listener = nil + } + } + dss.lnmu.Unlock() + return nil +} + +func (dss *dualStackServer) teardown() error { + dss.lnmu.Lock() + for i := range dss.lns { + if dss.lns[i].Listener != nil { + dss.lns[i].Listener.Close() + <-dss.lns[i].done + } + } + dss.lns = dss.lns[:0] + dss.lnmu.Unlock() + dss.cmu.Lock() + for _, c := range dss.cs { + c.Close() + } + dss.cs = dss.cs[:0] + dss.cmu.Unlock() + return nil +} + +func newDualStackServer() (*dualStackServer, error) { + lns, err := newDualStackListener() + if err != nil { + return nil, err + } + _, port, err := SplitHostPort(lns[0].Addr().String()) + if err != nil { + lns[0].Close() + lns[1].Close() + return nil, err + } + return &dualStackServer{ + lns: []streamListener{ + {network: "tcp4", address: lns[0].Addr().String(), Listener: lns[0], done: make(chan bool)}, + {network: "tcp6", address: lns[1].Addr().String(), Listener: lns[1], done: make(chan bool)}, + }, + port: port, + }, nil +} + +func (ls *localServer) transponder(ln Listener, ch chan<- error) { + defer close(ch) + + switch ln := ln.(type) { + case *TCPListener: + ln.SetDeadline(time.Now().Add(someTimeout)) + case *UnixListener: + ln.SetDeadline(time.Now().Add(someTimeout)) + } + c, err := ln.Accept() + if err != nil { + if perr := parseAcceptError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + ls.cl = append(ls.cl, c) + + network := ln.Addr().Network() + if c.LocalAddr().Network() != network || c.RemoteAddr().Network() != network { + ch <- fmt.Errorf("got %v->%v; expected %v->%v", c.LocalAddr().Network(), c.RemoteAddr().Network(), network, network) + return + } + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + + b := make([]byte, 256) + n, err := c.Read(b) + if err != nil { + if perr := parseReadError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if _, err := c.Write(b[:n]); err != nil { + if perr := parseWriteError(err); perr != nil { + ch <- perr + } + ch <- err + return + } +} + +func transceiver(c Conn, wb []byte, ch chan<- error) { + defer close(ch) + + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + + n, err := c.Write(wb) + if err != nil { + if perr := parseWriteError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if n != len(wb) { + ch <- fmt.Errorf("wrote %d; want %d", n, len(wb)) + } + rb := make([]byte, len(wb)) + n, err = c.Read(rb) + if err != nil { + if perr := parseReadError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if n != len(wb) { + ch <- fmt.Errorf("read %d; want %d", n, len(wb)) + } +} + +func newLocalPacketListener(t testing.TB, network string, lcOpt ...*ListenConfig) PacketConn { + var lc *ListenConfig + switch len(lcOpt) { + case 0: + lc = new(ListenConfig) + case 1: + lc = lcOpt[0] + default: + t.Helper() + t.Fatal("too many ListenConfigs passed to newLocalListener: want 0 or 1") + } + + listenPacket := func(net, addr string) PacketConn { + c, err := lc.ListenPacket(context.Background(), net, addr) + if err != nil { + t.Helper() + t.Fatal(err) + } + return c + } + + t.Helper() + switch network { + case "udp": + if supportsIPv4() { + return listenPacket("udp4", "127.0.0.1:0") + } + if supportsIPv6() { + return listenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4() { + return listenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6() { + return listenPacket("udp6", "[::1]:0") + } + case "unixgram": + return listenPacket(network, testUnixAddr(t)) + } + + t.Fatalf("%s is not supported", network) + return nil +} + +func newDualStackPacketListener() (cs []*UDPConn, err error) { + var args = []struct { + network string + UDPAddr + }{ + {"udp4", UDPAddr{IP: IPv4(127, 0, 0, 1)}}, + {"udp6", UDPAddr{IP: IPv6loopback}}, + } + for i := 0; i < 64; i++ { + var port int + var cs []*UDPConn + for _, arg := range args { + arg.UDPAddr.Port = port + c, err := ListenUDP(arg.network, &arg.UDPAddr) + if err != nil { + continue + } + port = c.LocalAddr().(*UDPAddr).Port + cs = append(cs, c) + } + if len(cs) != len(args) { + for _, c := range cs { + c.Close() + } + continue + } + return cs, nil + } + return nil, errors.New("no dualstack port available") +} + +type localPacketServer struct { + pcmu sync.RWMutex + PacketConn + done chan bool // signal that indicates server stopped +} + +func (ls *localPacketServer) buildup(handler func(*localPacketServer, PacketConn)) error { + go func() { + handler(ls, ls.PacketConn) + close(ls.done) + }() + return nil +} + +func (ls *localPacketServer) teardown() error { + ls.pcmu.Lock() + if ls.PacketConn != nil { + network := ls.PacketConn.LocalAddr().Network() + address := ls.PacketConn.LocalAddr().String() + ls.PacketConn.Close() + <-ls.done + ls.PacketConn = nil + switch network { + case "unixgram": + os.Remove(address) + } + } + ls.pcmu.Unlock() + return nil +} + +func newLocalPacketServer(t testing.TB, network string) *localPacketServer { + t.Helper() + c := newLocalPacketListener(t, network) + return &localPacketServer{PacketConn: c, done: make(chan bool)} +} + +type packetListener struct { + PacketConn +} + +func (pl *packetListener) newLocalServer() *localPacketServer { + return &localPacketServer{PacketConn: pl.PacketConn, done: make(chan bool)} +} + +func packetTransponder(c PacketConn, ch chan<- error) { + defer close(ch) + + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + + b := make([]byte, 256) + n, peer, err := c.ReadFrom(b) + if err != nil { + if perr := parseReadError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if peer == nil { // for connected-mode sockets + switch c.LocalAddr().Network() { + case "udp": + peer, err = ResolveUDPAddr("udp", string(b[:n])) + case "unixgram": + peer, err = ResolveUnixAddr("unixgram", string(b[:n])) + } + if err != nil { + ch <- err + return + } + } + if _, err := c.WriteTo(b[:n], peer); err != nil { + if perr := parseWriteError(err); perr != nil { + ch <- perr + } + ch <- err + return + } +} + +func packetTransceiver(c PacketConn, wb []byte, dst Addr, ch chan<- error) { + defer close(ch) + + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + + n, err := c.WriteTo(wb, dst) + if err != nil { + if perr := parseWriteError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if n != len(wb) { + ch <- fmt.Errorf("wrote %d; want %d", n, len(wb)) + } + rb := make([]byte, len(wb)) + n, _, err = c.ReadFrom(rb) + if err != nil { + if perr := parseReadError(err); perr != nil { + ch <- perr + } + ch <- err + return + } + if n != len(wb) { + ch <- fmt.Errorf("read %d; want %d", n, len(wb)) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/mptcpsock_linux.go b/platform/dbops/binaries/go/go/src/net/mptcpsock_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..b2ac3ee7182a09c27d8a3e6400f470e6419c4ee8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mptcpsock_linux.go @@ -0,0 +1,127 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "internal/poll" + "internal/syscall/unix" + "sync" + "syscall" +) + +var ( + mptcpOnce sync.Once + mptcpAvailable bool + hasSOLMPTCP bool +) + +// These constants aren't in the syscall package, which is frozen +const ( + _IPPROTO_MPTCP = 0x106 + _SOL_MPTCP = 0x11c + _MPTCP_INFO = 0x1 +) + +func supportsMultipathTCP() bool { + mptcpOnce.Do(initMPTCPavailable) + return mptcpAvailable +} + +// Check that MPTCP is supported by attempting to create an MPTCP socket and by +// looking at the returned error if any. +func initMPTCPavailable() { + s, err := sysSocket(syscall.AF_INET, syscall.SOCK_STREAM, _IPPROTO_MPTCP) + switch { + case errors.Is(err, syscall.EPROTONOSUPPORT): // Not supported: >= v5.6 + case errors.Is(err, syscall.EINVAL): // Not supported: < v5.6 + case err == nil: // Supported and no error + poll.CloseFunc(s) + fallthrough + default: + // another error: MPTCP was not available but it might be later + mptcpAvailable = true + } + + major, minor := unix.KernelVersion() + // SOL_MPTCP only supported from kernel 5.16 + hasSOLMPTCP = major > 5 || (major == 5 && minor >= 16) +} + +func (sd *sysDialer) dialMPTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + if supportsMultipathTCP() { + if conn, err := sd.doDialTCPProto(ctx, laddr, raddr, _IPPROTO_MPTCP); err == nil { + return conn, nil + } + } + + // Fallback to dialTCP if Multipath TCP isn't supported on this operating + // system. But also fallback in case of any error with MPTCP. + // + // Possible MPTCP specific error: ENOPROTOOPT (sysctl net.mptcp.enabled=0) + // But just in case MPTCP is blocked differently (SELinux, etc.), just + // retry with "plain" TCP. + return sd.dialTCP(ctx, laddr, raddr) +} + +func (sl *sysListener) listenMPTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) { + if supportsMultipathTCP() { + if dial, err := sl.listenTCPProto(ctx, laddr, _IPPROTO_MPTCP); err == nil { + return dial, nil + } + } + + // Fallback to listenTCP if Multipath TCP isn't supported on this operating + // system. But also fallback in case of any error with MPTCP. + // + // Possible MPTCP specific error: ENOPROTOOPT (sysctl net.mptcp.enabled=0) + // But just in case MPTCP is blocked differently (SELinux, etc.), just + // retry with "plain" TCP. + return sl.listenTCP(ctx, laddr) +} + +// hasFallenBack reports whether the MPTCP connection has fallen back to "plain" +// TCP. +// +// A connection can fallback to TCP for different reasons, e.g. the other peer +// doesn't support it, a middle box "accidentally" drops the option, etc. +// +// If the MPTCP protocol has not been requested when creating the socket, this +// method will return true: MPTCP is not being used. +// +// Kernel >= 5.16 returns EOPNOTSUPP/ENOPROTOOPT in case of fallback. +// Older kernels will always return them even if MPTCP is used: not usable. +func hasFallenBack(fd *netFD) bool { + _, err := fd.pfd.GetsockoptInt(_SOL_MPTCP, _MPTCP_INFO) + + // 2 expected errors in case of fallback depending on the address family + // - AF_INET: EOPNOTSUPP + // - AF_INET6: ENOPROTOOPT + return err == syscall.EOPNOTSUPP || err == syscall.ENOPROTOOPT +} + +// isUsingMPTCPProto reports whether the socket protocol is MPTCP. +// +// Compared to hasFallenBack method, here only the socket protocol being used is +// checked: it can be MPTCP but it doesn't mean MPTCP is used on the wire, maybe +// a fallback to TCP has been done. +func isUsingMPTCPProto(fd *netFD) bool { + proto, _ := fd.pfd.GetsockoptInt(syscall.SOL_SOCKET, syscall.SO_PROTOCOL) + + return proto == _IPPROTO_MPTCP +} + +// isUsingMultipathTCP reports whether MPTCP is still being used. +// +// Please look at the description of hasFallenBack (kernel >=5.16) and +// isUsingMPTCPProto methods for more details about what is being checked here. +func isUsingMultipathTCP(fd *netFD) bool { + if hasSOLMPTCP { + return !hasFallenBack(fd) + } + + return isUsingMPTCPProto(fd) +} diff --git a/platform/dbops/binaries/go/go/src/net/mptcpsock_linux_test.go b/platform/dbops/binaries/go/go/src/net/mptcpsock_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5134aba75eabb5d36e168fa1c5733ae7a01f6695 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mptcpsock_linux_test.go @@ -0,0 +1,192 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "context" + "errors" + "syscall" + "testing" +) + +func newLocalListenerMPTCP(t *testing.T, envVar bool) Listener { + lc := &ListenConfig{} + + if envVar { + if !lc.MultipathTCP() { + t.Fatal("MultipathTCP Listen is not on despite GODEBUG=multipathtcp=1") + } + } else { + if lc.MultipathTCP() { + t.Error("MultipathTCP should be off by default") + } + + lc.SetMultipathTCP(true) + if !lc.MultipathTCP() { + t.Fatal("MultipathTCP is not on after having been forced to on") + } + } + + ln, err := lc.Listen(context.Background(), "tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func postAcceptMPTCP(ls *localServer, ch chan<- error) { + defer close(ch) + + if len(ls.cl) == 0 { + ch <- errors.New("no accepted stream") + return + } + + c := ls.cl[0] + + tcp, ok := c.(*TCPConn) + if !ok { + ch <- errors.New("struct is not a TCPConn") + return + } + + mptcp, err := tcp.MultipathTCP() + if err != nil { + ch <- err + return + } + + if !mptcp { + ch <- errors.New("incoming connection is not with MPTCP") + return + } + + // Also check the method for the older kernels if not tested before + if hasSOLMPTCP && !isUsingMPTCPProto(tcp.fd) { + ch <- errors.New("incoming connection is not an MPTCP proto") + return + } +} + +func dialerMPTCP(t *testing.T, addr string, envVar bool) { + d := &Dialer{} + + if envVar { + if !d.MultipathTCP() { + t.Fatal("MultipathTCP Dialer is not on despite GODEBUG=multipathtcp=1") + } + } else { + if d.MultipathTCP() { + t.Error("MultipathTCP should be off by default") + } + + d.SetMultipathTCP(true) + if !d.MultipathTCP() { + t.Fatal("MultipathTCP is not on after having been forced to on") + } + } + + c, err := d.Dial("tcp", addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + tcp, ok := c.(*TCPConn) + if !ok { + t.Fatal("struct is not a TCPConn") + } + + // Transfer a bit of data to make sure everything is still OK + snt := []byte("MPTCP TEST") + if _, err := c.Write(snt); err != nil { + t.Fatal(err) + } + b := make([]byte, len(snt)) + if _, err := c.Read(b); err != nil { + t.Fatal(err) + } + if !bytes.Equal(snt, b) { + t.Errorf("sent bytes (%s) are different from received ones (%s)", snt, b) + } + + mptcp, err := tcp.MultipathTCP() + if err != nil { + t.Fatal(err) + } + + t.Logf("outgoing connection from %s with mptcp: %t", addr, mptcp) + + if !mptcp { + t.Error("outgoing connection is not with MPTCP") + } + + // Also check the method for the older kernels if not tested before + if hasSOLMPTCP && !isUsingMPTCPProto(tcp.fd) { + t.Error("outgoing connection is not an MPTCP proto") + } +} + +func canCreateMPTCPSocket() bool { + // We want to know if we can create an MPTCP socket, not just if it is + // available (mptcpAvailable()): it could be blocked by the admin + fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, _IPPROTO_MPTCP) + if err != nil { + return false + } + + syscall.Close(fd) + return true +} + +func testMultiPathTCP(t *testing.T, envVar bool) { + if envVar { + t.Log("Test with GODEBUG=multipathtcp=1") + t.Setenv("GODEBUG", "multipathtcp=1") + } else { + t.Log("Test with GODEBUG=multipathtcp=0") + t.Setenv("GODEBUG", "multipathtcp=0") + } + + ln := newLocalListenerMPTCP(t, envVar) + + // similar to tcpsock_test:TestIPv6LinkLocalUnicastTCP + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + + if g, w := ls.Listener.Addr().Network(), "tcp"; g != w { + t.Fatalf("Network type mismatch: got %q, want %q", g, w) + } + + genericCh := make(chan error) + mptcpCh := make(chan error) + handler := func(ls *localServer, ln Listener) { + ls.transponder(ln, genericCh) + postAcceptMPTCP(ls, mptcpCh) + } + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + dialerMPTCP(t, ln.Addr().String(), envVar) + + if err := <-genericCh; err != nil { + t.Error(err) + } + if err := <-mptcpCh; err != nil { + t.Error(err) + } +} + +func TestMultiPathTCP(t *testing.T) { + if !canCreateMPTCPSocket() { + t.Skip("Cannot create MPTCP sockets") + } + + for _, envVar := range []bool{false, true} { + testMultiPathTCP(t, envVar) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/mptcpsock_stub.go b/platform/dbops/binaries/go/go/src/net/mptcpsock_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..458c1530d75d6c80fcab2ef0767a3451cd084692 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/mptcpsock_stub.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package net + +import ( + "context" +) + +func (sd *sysDialer) dialMPTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + return sd.dialTCP(ctx, laddr, raddr) +} + +func (sl *sysListener) listenMPTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) { + return sl.listenTCP(ctx, laddr) +} + +func isUsingMultipathTCP(fd *netFD) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/net/net.go b/platform/dbops/binaries/go/go/src/net/net.go new file mode 100644 index 0000000000000000000000000000000000000000..2dd1b5865e5dc60b898288c2fc379830e152374e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/net.go @@ -0,0 +1,815 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package net provides a portable interface for network I/O, including +TCP/IP, UDP, domain name resolution, and Unix domain sockets. + +Although the package provides access to low-level networking +primitives, most clients will need only the basic interface provided +by the [Dial], [Listen], and Accept functions and the associated +[Conn] and [Listener] interfaces. The crypto/tls package uses +the same interfaces and similar Dial and Listen functions. + +The Dial function connects to a server: + + conn, err := net.Dial("tcp", "golang.org:80") + if err != nil { + // handle error + } + fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n") + status, err := bufio.NewReader(conn).ReadString('\n') + // ... + +The Listen function creates servers: + + ln, err := net.Listen("tcp", ":8080") + if err != nil { + // handle error + } + for { + conn, err := ln.Accept() + if err != nil { + // handle error + } + go handleConnection(conn) + } + +# Name Resolution + +The method for resolving domain names, whether indirectly with functions like Dial +or directly with functions like [LookupHost] and [LookupAddr], varies by operating system. + +On Unix systems, the resolver has two options for resolving names. +It can use a pure Go resolver that sends DNS requests directly to the servers +listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C +library routines such as getaddrinfo and getnameinfo. + +By default the pure Go resolver is used, because a blocked DNS request consumes +only a goroutine, while a blocked C call consumes an operating system thread. +When cgo is available, the cgo-based resolver is used instead under a variety of +conditions: on systems that do not let programs make direct DNS requests (OS X), +when the LOCALDOMAIN environment variable is present (even if empty), +when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, +when the ASR_CONFIG environment variable is non-empty (OpenBSD only), +when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the +Go resolver does not implement, and when the name being looked up ends in .local +or is an mDNS name. + +The resolver decision can be overridden by setting the netdns value of the +GODEBUG environment variable (see package runtime) to go or cgo, as in: + + export GODEBUG=netdns=go # force pure Go resolver + export GODEBUG=netdns=cgo # force native resolver (cgo, win32) + +The decision can also be forced while building the Go source tree +by setting the netgo or netcgo build tag. + +A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver +to print debugging information about its decisions. +To force a particular resolver while also printing debugging information, +join the two settings by a plus sign, as in GODEBUG=netdns=go+1. + +The Go resolver will send an EDNS0 additional header with a DNS request, +to signal a willingness to accept a larger DNS packet size. +This can reportedly cause sporadic failures with the DNS server run +by some modems and routers. Setting GODEBUG=netedns0=0 will disable +sending the additional header. + +On macOS, if Go code that uses the net package is built with +-buildmode=c-archive, linking the resulting archive into a C program +requires passing -lresolv when linking the C code. + +On Plan 9, the resolver always accesses /net/cs and /net/dns. + +On Windows, in Go 1.18.x and earlier, the resolver always used C +library functions, such as GetAddrInfo and DnsQuery. +*/ +package net + +import ( + "context" + "errors" + "internal/poll" + "io" + "os" + "sync" + "syscall" + "time" +) + +// Addr represents a network end point address. +// +// The two methods [Addr.Network] and [Addr.String] conventionally return strings +// that can be passed as the arguments to [Dial], but the exact form +// and meaning of the strings is up to the implementation. +type Addr interface { + Network() string // name of the network (for example, "tcp", "udp") + String() string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") +} + +// Conn is a generic stream-oriented network connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn interface { + // Read reads data from the connection. + // Read can be made to time out and return an error after a fixed + // time limit; see SetDeadline and SetReadDeadline. + Read(b []byte) (n int, err error) + + // Write writes data to the connection. + // Write can be made to time out and return an error after a fixed + // time limit; see SetDeadline and SetWriteDeadline. + Write(b []byte) (n int, err error) + + // Close closes the connection. + // Any blocked Read or Write operations will be unblocked and return errors. + Close() error + + // LocalAddr returns the local network address, if known. + LocalAddr() Addr + + // RemoteAddr returns the remote network address, if known. + RemoteAddr() Addr + + // SetDeadline sets the read and write deadlines associated + // with the connection. It is equivalent to calling both + // SetReadDeadline and SetWriteDeadline. + // + // A deadline is an absolute time after which I/O operations + // fail instead of blocking. The deadline applies to all future + // and pending I/O, not just the immediately following call to + // Read or Write. After a deadline has been exceeded, the + // connection can be refreshed by setting a deadline in the future. + // + // If the deadline is exceeded a call to Read or Write or to other + // I/O methods will return an error that wraps os.ErrDeadlineExceeded. + // This can be tested using errors.Is(err, os.ErrDeadlineExceeded). + // The error's Timeout method will return true, but note that there + // are other possible errors for which the Timeout method will + // return true even if the deadline has not been exceeded. + // + // An idle timeout can be implemented by repeatedly extending + // the deadline after successful Read or Write calls. + // + // A zero value for t means I/O operations will not time out. + SetDeadline(t time.Time) error + + // SetReadDeadline sets the deadline for future Read calls + // and any currently-blocked Read call. + // A zero value for t means Read will not time out. + SetReadDeadline(t time.Time) error + + // SetWriteDeadline sets the deadline for future Write calls + // and any currently-blocked Write call. + // Even if write times out, it may return n > 0, indicating that + // some of the data was successfully written. + // A zero value for t means Write will not time out. + SetWriteDeadline(t time.Time) error +} + +type conn struct { + fd *netFD +} + +func (c *conn) ok() bool { return c != nil && c.fd != nil } + +// Implementation of the Conn interface. + +// Read implements the Conn Read method. +func (c *conn) Read(b []byte) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.fd.Read(b) + if err != nil && err != io.EOF { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, err +} + +// Write implements the Conn Write method. +func (c *conn) Write(b []byte) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.fd.Write(b) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, err +} + +// Close closes the connection. +func (c *conn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + err := c.fd.Close() + if err != nil { + err = &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return err +} + +// LocalAddr returns the local network address. +// The Addr returned is shared by all invocations of LocalAddr, so +// do not modify it. +func (c *conn) LocalAddr() Addr { + if !c.ok() { + return nil + } + return c.fd.laddr +} + +// RemoteAddr returns the remote network address. +// The Addr returned is shared by all invocations of RemoteAddr, so +// do not modify it. +func (c *conn) RemoteAddr() Addr { + if !c.ok() { + return nil + } + return c.fd.raddr +} + +// SetDeadline implements the Conn SetDeadline method. +func (c *conn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.SetDeadline(t); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return nil +} + +// SetReadDeadline implements the Conn SetReadDeadline method. +func (c *conn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.SetReadDeadline(t); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return nil +} + +// SetWriteDeadline implements the Conn SetWriteDeadline method. +func (c *conn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.SetWriteDeadline(t); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return nil +} + +// SetReadBuffer sets the size of the operating system's +// receive buffer associated with the connection. +func (c *conn) SetReadBuffer(bytes int) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setReadBuffer(c.fd, bytes); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return nil +} + +// SetWriteBuffer sets the size of the operating system's +// transmit buffer associated with the connection. +func (c *conn) SetWriteBuffer(bytes int) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setWriteBuffer(c.fd, bytes); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return nil +} + +// File returns a copy of the underlying [os.File]. +// It is the caller's responsibility to close f when finished. +// Closing c does not affect f, and closing f does not affect c. +// +// The returned os.File's file descriptor is different from the connection's. +// Attempting to change properties of the original using this duplicate +// may or may not have the desired effect. +func (c *conn) File() (f *os.File, err error) { + f, err = c.fd.dup() + if err != nil { + err = &OpError{Op: "file", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return +} + +// PacketConn is a generic packet-oriented network connection. +// +// Multiple goroutines may invoke methods on a PacketConn simultaneously. +type PacketConn interface { + // ReadFrom reads a packet from the connection, + // copying the payload into p. It returns the number of + // bytes copied into p and the return address that + // was on the packet. + // It returns the number of bytes read (0 <= n <= len(p)) + // and any error encountered. Callers should always process + // the n > 0 bytes returned before considering the error err. + // ReadFrom can be made to time out and return an error after a + // fixed time limit; see SetDeadline and SetReadDeadline. + ReadFrom(p []byte) (n int, addr Addr, err error) + + // WriteTo writes a packet with payload p to addr. + // WriteTo can be made to time out and return an Error after a + // fixed time limit; see SetDeadline and SetWriteDeadline. + // On packet-oriented connections, write timeouts are rare. + WriteTo(p []byte, addr Addr) (n int, err error) + + // Close closes the connection. + // Any blocked ReadFrom or WriteTo operations will be unblocked and return errors. + Close() error + + // LocalAddr returns the local network address, if known. + LocalAddr() Addr + + // SetDeadline sets the read and write deadlines associated + // with the connection. It is equivalent to calling both + // SetReadDeadline and SetWriteDeadline. + // + // A deadline is an absolute time after which I/O operations + // fail instead of blocking. The deadline applies to all future + // and pending I/O, not just the immediately following call to + // Read or Write. After a deadline has been exceeded, the + // connection can be refreshed by setting a deadline in the future. + // + // If the deadline is exceeded a call to Read or Write or to other + // I/O methods will return an error that wraps os.ErrDeadlineExceeded. + // This can be tested using errors.Is(err, os.ErrDeadlineExceeded). + // The error's Timeout method will return true, but note that there + // are other possible errors for which the Timeout method will + // return true even if the deadline has not been exceeded. + // + // An idle timeout can be implemented by repeatedly extending + // the deadline after successful ReadFrom or WriteTo calls. + // + // A zero value for t means I/O operations will not time out. + SetDeadline(t time.Time) error + + // SetReadDeadline sets the deadline for future ReadFrom calls + // and any currently-blocked ReadFrom call. + // A zero value for t means ReadFrom will not time out. + SetReadDeadline(t time.Time) error + + // SetWriteDeadline sets the deadline for future WriteTo calls + // and any currently-blocked WriteTo call. + // Even if write times out, it may return n > 0, indicating that + // some of the data was successfully written. + // A zero value for t means WriteTo will not time out. + SetWriteDeadline(t time.Time) error +} + +var listenerBacklogCache struct { + sync.Once + val int +} + +// listenerBacklog is a caching wrapper around maxListenerBacklog. +func listenerBacklog() int { + listenerBacklogCache.Do(func() { listenerBacklogCache.val = maxListenerBacklog() }) + return listenerBacklogCache.val +} + +// A Listener is a generic network listener for stream-oriented protocols. +// +// Multiple goroutines may invoke methods on a Listener simultaneously. +type Listener interface { + // Accept waits for and returns the next connection to the listener. + Accept() (Conn, error) + + // Close closes the listener. + // Any blocked Accept operations will be unblocked and return errors. + Close() error + + // Addr returns the listener's network address. + Addr() Addr +} + +// An Error represents a network error. +type Error interface { + error + Timeout() bool // Is the error a timeout? + + // Deprecated: Temporary errors are not well-defined. + // Most "temporary" errors are timeouts, and the few exceptions are surprising. + // Do not use this method. + Temporary() bool +} + +// Various errors contained in OpError. +var ( + // For connection setup operations. + errNoSuitableAddress = errors.New("no suitable address found") + + // For connection setup and write operations. + errMissingAddress = errors.New("missing address") + + // For both read and write operations. + errCanceled = canceledError{} + ErrWriteToConnected = errors.New("use of WriteTo with pre-connected connection") +) + +// canceledError lets us return the same error string we have always +// returned, while still being Is context.Canceled. +type canceledError struct{} + +func (canceledError) Error() string { return "operation was canceled" } + +func (canceledError) Is(err error) bool { return err == context.Canceled } + +// mapErr maps from the context errors to the historical internal net +// error values. +func mapErr(err error) error { + switch err { + case context.Canceled: + return errCanceled + case context.DeadlineExceeded: + return errTimeout + default: + return err + } +} + +// OpError is the error type usually returned by functions in the net +// package. It describes the operation, network type, and address of +// an error. +type OpError struct { + // Op is the operation which caused the error, such as + // "read" or "write". + Op string + + // Net is the network type on which this error occurred, + // such as "tcp" or "udp6". + Net string + + // For operations involving a remote network connection, like + // Dial, Read, or Write, Source is the corresponding local + // network address. + Source Addr + + // Addr is the network address for which this error occurred. + // For local operations, like Listen or SetDeadline, Addr is + // the address of the local endpoint being manipulated. + // For operations involving a remote network connection, like + // Dial, Read, or Write, Addr is the remote address of that + // connection. + Addr Addr + + // Err is the error that occurred during the operation. + // The Error method panics if the error is nil. + Err error +} + +func (e *OpError) Unwrap() error { return e.Err } + +func (e *OpError) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Net != "" { + s += " " + e.Net + } + if e.Source != nil { + s += " " + e.Source.String() + } + if e.Addr != nil { + if e.Source != nil { + s += "->" + } else { + s += " " + } + s += e.Addr.String() + } + s += ": " + e.Err.Error() + return s +} + +var ( + // aLongTimeAgo is a non-zero time, far in the past, used for + // immediate cancellation of dials. + aLongTimeAgo = time.Unix(1, 0) + + // noDeadline and noCancel are just zero values for + // readability with functions taking too many parameters. + noDeadline = time.Time{} + noCancel = (chan struct{})(nil) +) + +type timeout interface { + Timeout() bool +} + +func (e *OpError) Timeout() bool { + if ne, ok := e.Err.(*os.SyscallError); ok { + t, ok := ne.Err.(timeout) + return ok && t.Timeout() + } + t, ok := e.Err.(timeout) + return ok && t.Timeout() +} + +type temporary interface { + Temporary() bool +} + +func (e *OpError) Temporary() bool { + // Treat ECONNRESET and ECONNABORTED as temporary errors when + // they come from calling accept. See issue 6163. + if e.Op == "accept" && isConnError(e.Err) { + return true + } + + if ne, ok := e.Err.(*os.SyscallError); ok { + t, ok := ne.Err.(temporary) + return ok && t.Temporary() + } + t, ok := e.Err.(temporary) + return ok && t.Temporary() +} + +// A ParseError is the error type of literal network address parsers. +type ParseError struct { + // Type is the type of string that was expected, such as + // "IP address", "CIDR address". + Type string + + // Text is the malformed text string. + Text string +} + +func (e *ParseError) Error() string { return "invalid " + e.Type + ": " + e.Text } + +func (e *ParseError) Timeout() bool { return false } +func (e *ParseError) Temporary() bool { return false } + +type AddrError struct { + Err string + Addr string +} + +func (e *AddrError) Error() string { + if e == nil { + return "" + } + s := e.Err + if e.Addr != "" { + s = "address " + e.Addr + ": " + s + } + return s +} + +func (e *AddrError) Timeout() bool { return false } +func (e *AddrError) Temporary() bool { return false } + +type UnknownNetworkError string + +func (e UnknownNetworkError) Error() string { return "unknown network " + string(e) } +func (e UnknownNetworkError) Timeout() bool { return false } +func (e UnknownNetworkError) Temporary() bool { return false } + +type InvalidAddrError string + +func (e InvalidAddrError) Error() string { return string(e) } +func (e InvalidAddrError) Timeout() bool { return false } +func (e InvalidAddrError) Temporary() bool { return false } + +// errTimeout exists to return the historical "i/o timeout" string +// for context.DeadlineExceeded. See mapErr. +// It is also used when Dialer.Deadline is exceeded. +// error.Is(errTimeout, context.DeadlineExceeded) returns true. +// +// TODO(iant): We could consider changing this to os.ErrDeadlineExceeded +// in the future, if we make +// +// errors.Is(os.ErrDeadlineExceeded, context.DeadlineExceeded) +// +// return true. +var errTimeout error = &timeoutError{} + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +func (e *timeoutError) Is(err error) bool { + return err == context.DeadlineExceeded +} + +// DNSConfigError represents an error reading the machine's DNS configuration. +// (No longer used; kept for compatibility.) +type DNSConfigError struct { + Err error +} + +func (e *DNSConfigError) Unwrap() error { return e.Err } +func (e *DNSConfigError) Error() string { return "error reading DNS config: " + e.Err.Error() } +func (e *DNSConfigError) Timeout() bool { return false } +func (e *DNSConfigError) Temporary() bool { return false } + +// Various errors contained in DNSError. +var ( + errNoSuchHost = errors.New("no such host") +) + +// DNSError represents a DNS lookup error. +type DNSError struct { + Err string // description of the error + Name string // name looked for + Server string // server used + IsTimeout bool // if true, timed out; not all timeouts set this + IsTemporary bool // if true, error is temporary; not all errors set this + + // IsNotFound is set to true when the requested name does not + // contain any records of the requested type (data not found), + // or the name itself was not found (NXDOMAIN). + IsNotFound bool +} + +func (e *DNSError) Error() string { + if e == nil { + return "" + } + s := "lookup " + e.Name + if e.Server != "" { + s += " on " + e.Server + } + s += ": " + e.Err + return s +} + +// Timeout reports whether the DNS lookup is known to have timed out. +// This is not always known; a DNS lookup may fail due to a timeout +// and return a [DNSError] for which Timeout returns false. +func (e *DNSError) Timeout() bool { return e.IsTimeout } + +// Temporary reports whether the DNS error is known to be temporary. +// This is not always known; a DNS lookup may fail due to a temporary +// error and return a [DNSError] for which Temporary returns false. +func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary } + +// errClosed exists just so that the docs for ErrClosed don't mention +// the internal package poll. +var errClosed = poll.ErrNetClosing + +// ErrClosed is the error returned by an I/O call on a network +// connection that has already been closed, or that is closed by +// another goroutine before the I/O is completed. This may be wrapped +// in another error, and should normally be tested using +// errors.Is(err, net.ErrClosed). +var ErrClosed error = errClosed + +// noReadFrom can be embedded alongside another type to +// hide the ReadFrom method of that other type. +type noReadFrom struct{} + +// ReadFrom hides another ReadFrom method. +// It should never be called. +func (noReadFrom) ReadFrom(io.Reader) (int64, error) { + panic("can't happen") +} + +// tcpConnWithoutReadFrom implements all the methods of *TCPConn other +// than ReadFrom. This is used to permit ReadFrom to call io.Copy +// without leading to a recursive call to ReadFrom. +type tcpConnWithoutReadFrom struct { + noReadFrom + *TCPConn +} + +// Fallback implementation of io.ReaderFrom's ReadFrom, when sendfile isn't +// applicable. +func genericReadFrom(c *TCPConn, r io.Reader) (n int64, err error) { + // Use wrapper to hide existing r.ReadFrom from io.Copy. + return io.Copy(tcpConnWithoutReadFrom{TCPConn: c}, r) +} + +// noWriteTo can be embedded alongside another type to +// hide the WriteTo method of that other type. +type noWriteTo struct{} + +// WriteTo hides another WriteTo method. +// It should never be called. +func (noWriteTo) WriteTo(io.Writer) (int64, error) { + panic("can't happen") +} + +// tcpConnWithoutWriteTo implements all the methods of *TCPConn other +// than WriteTo. This is used to permit WriteTo to call io.Copy +// without leading to a recursive call to WriteTo. +type tcpConnWithoutWriteTo struct { + noWriteTo + *TCPConn +} + +// Fallback implementation of io.WriterTo's WriteTo, when zero-copy isn't applicable. +func genericWriteTo(c *TCPConn, w io.Writer) (n int64, err error) { + // Use wrapper to hide existing w.WriteTo from io.Copy. + return io.Copy(w, tcpConnWithoutWriteTo{TCPConn: c}) +} + +// Limit the number of concurrent cgo-using goroutines, because +// each will block an entire operating system thread. The usual culprit +// is resolving many DNS names in separate goroutines but the DNS +// server is not responding. Then the many lookups each use a different +// thread, and the system or the program runs out of threads. + +var threadLimit chan struct{} + +var threadOnce sync.Once + +func acquireThread() { + threadOnce.Do(func() { + threadLimit = make(chan struct{}, concurrentThreadsLimit()) + }) + threadLimit <- struct{}{} +} + +func releaseThread() { + <-threadLimit +} + +// buffersWriter is the interface implemented by Conns that support a +// "writev"-like batch write optimization. +// writeBuffers should fully consume and write all chunks from the +// provided Buffers, else it should report a non-nil error. +type buffersWriter interface { + writeBuffers(*Buffers) (int64, error) +} + +// Buffers contains zero or more runs of bytes to write. +// +// On certain machines, for certain types of connections, this is +// optimized into an OS-specific batch write operation (such as +// "writev"). +type Buffers [][]byte + +var ( + _ io.WriterTo = (*Buffers)(nil) + _ io.Reader = (*Buffers)(nil) +) + +// WriteTo writes contents of the buffers to w. +// +// WriteTo implements [io.WriterTo] for [Buffers]. +// +// WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v), +// but does not modify v[i][j] for any i, j. +func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) { + if wv, ok := w.(buffersWriter); ok { + return wv.writeBuffers(v) + } + for _, b := range *v { + nb, err := w.Write(b) + n += int64(nb) + if err != nil { + v.consume(n) + return n, err + } + } + v.consume(n) + return n, nil +} + +// Read from the buffers. +// +// Read implements [io.Reader] for [Buffers]. +// +// Read modifies the slice v as well as v[i] for 0 <= i < len(v), +// but does not modify v[i][j] for any i, j. +func (v *Buffers) Read(p []byte) (n int, err error) { + for len(p) > 0 && len(*v) > 0 { + n0 := copy(p, (*v)[0]) + v.consume(int64(n0)) + p = p[n0:] + n += n0 + } + if len(*v) == 0 { + err = io.EOF + } + return +} + +func (v *Buffers) consume(n int64) { + for len(*v) > 0 { + ln0 := int64(len((*v)[0])) + if ln0 > n { + (*v)[0] = (*v)[0][n:] + return + } + n -= ln0 + (*v)[0] = nil + *v = (*v)[1:] + } +} diff --git a/platform/dbops/binaries/go/go/src/net/net_fake.go b/platform/dbops/binaries/go/go/src/net/net_fake.go new file mode 100644 index 0000000000000000000000000000000000000000..525ff3229683342315cbdc66eb400f1c52e00785 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/net_fake.go @@ -0,0 +1,1170 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fake networking for js/wasm and wasip1/wasm. +// It is intended to allow tests of other package to pass. + +//go:build js || wasip1 + +package net + +import ( + "context" + "errors" + "io" + "os" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +var ( + sockets sync.Map // fakeSockAddr → *netFD + fakeSocketIDs sync.Map // fakeNetFD.id → *netFD + fakePorts sync.Map // int (port #) → *netFD + nextPortCounter atomic.Int32 +) + +const defaultBuffer = 65535 + +type fakeSockAddr struct { + family int + address string +} + +func fakeAddr(sa sockaddr) fakeSockAddr { + return fakeSockAddr{ + family: sa.family(), + address: sa.String(), + } +} + +// socket returns a network file descriptor that is ready for +// I/O using the fake network. +func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) (*netFD, error) { + if raddr != nil && ctrlCtxFn != nil { + return nil, os.NewSyscallError("socket", syscall.ENOTSUP) + } + switch sotype { + case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET, syscall.SOCK_DGRAM: + default: + return nil, os.NewSyscallError("socket", syscall.ENOTSUP) + } + + fd := &netFD{ + family: family, + sotype: sotype, + net: net, + } + fd.fakeNetFD = newFakeNetFD(fd) + + if raddr == nil { + if err := fakeListen(fd, laddr); err != nil { + fd.Close() + return nil, err + } + return fd, nil + } + + if err := fakeConnect(ctx, fd, laddr, raddr); err != nil { + fd.Close() + return nil, err + } + return fd, nil +} + +func validateResolvedAddr(net string, family int, sa sockaddr) error { + validateIP := func(ip IP) error { + switch family { + case syscall.AF_INET: + if len(ip) != 4 { + return &AddrError{ + Err: "non-IPv4 address", + Addr: ip.String(), + } + } + case syscall.AF_INET6: + if len(ip) != 16 { + return &AddrError{ + Err: "non-IPv6 address", + Addr: ip.String(), + } + } + default: + panic("net: unexpected address family in validateResolvedAddr") + } + return nil + } + + switch net { + case "tcp", "tcp4", "tcp6": + sa, ok := sa.(*TCPAddr) + if !ok { + return &AddrError{ + Err: "non-TCP address for " + net + " network", + Addr: sa.String(), + } + } + if err := validateIP(sa.IP); err != nil { + return err + } + if sa.Port <= 0 || sa.Port >= 1<<16 { + return &AddrError{ + Err: "port out of range", + Addr: sa.String(), + } + } + return nil + + case "udp", "udp4", "udp6": + sa, ok := sa.(*UDPAddr) + if !ok { + return &AddrError{ + Err: "non-UDP address for " + net + " network", + Addr: sa.String(), + } + } + if err := validateIP(sa.IP); err != nil { + return err + } + if sa.Port <= 0 || sa.Port >= 1<<16 { + return &AddrError{ + Err: "port out of range", + Addr: sa.String(), + } + } + return nil + + case "unix", "unixgram", "unixpacket": + sa, ok := sa.(*UnixAddr) + if !ok { + return &AddrError{ + Err: "non-Unix address for " + net + " network", + Addr: sa.String(), + } + } + if sa.Name != "" { + i := len(sa.Name) - 1 + for i > 0 && !os.IsPathSeparator(sa.Name[i]) { + i-- + } + for i > 0 && os.IsPathSeparator(sa.Name[i]) { + i-- + } + if i <= 0 { + return &AddrError{ + Err: "unix socket name missing path component", + Addr: sa.Name, + } + } + if _, err := os.Stat(sa.Name[:i+1]); err != nil { + return &AddrError{ + Err: err.Error(), + Addr: sa.Name, + } + } + } + return nil + + default: + return &AddrError{ + Err: syscall.EAFNOSUPPORT.Error(), + Addr: sa.String(), + } + } +} + +func matchIPFamily(family int, addr sockaddr) sockaddr { + convertIP := func(ip IP) IP { + switch family { + case syscall.AF_INET: + return ip.To4() + case syscall.AF_INET6: + return ip.To16() + default: + return ip + } + } + + switch addr := addr.(type) { + case *TCPAddr: + ip := convertIP(addr.IP) + if ip == nil || len(ip) == len(addr.IP) { + return addr + } + return &TCPAddr{IP: ip, Port: addr.Port, Zone: addr.Zone} + case *UDPAddr: + ip := convertIP(addr.IP) + if ip == nil || len(ip) == len(addr.IP) { + return addr + } + return &UDPAddr{IP: ip, Port: addr.Port, Zone: addr.Zone} + default: + return addr + } +} + +type fakeNetFD struct { + fd *netFD + assignedPort int // 0 if no port has been assigned for this socket + + queue *packetQueue // incoming packets + peer *netFD // connected peer (for outgoing packets); nil for listeners and PacketConns + readDeadline atomic.Pointer[deadlineTimer] + writeDeadline atomic.Pointer[deadlineTimer] + + fakeAddr fakeSockAddr // cached fakeSockAddr equivalent of fd.laddr + + // The incoming channels hold incoming connections that have not yet been accepted. + // All of these channels are 1-buffered. + incoming chan []*netFD // holds the queue when it has >0 but = q.readBufferBytes: + pq.full <- q + case q.head == nil: + if q.nBytes > 0 { + defer panic("net: put with nil packet list and nonzero nBytes") + } + pq.empty <- q + default: + pq.ready <- q + } +} + +func (pq *packetQueue) closeRead() error { + q := pq.get() + + // Discard any unread packets. + for q.head != nil { + p := q.head + q.head = p.next + p.clear() + packetPool.Put(p) + } + q.nBytes = 0 + + q.readClosed = true + pq.put(q) + return nil +} + +func (pq *packetQueue) closeWrite() error { + q := pq.get() + q.writeClosed = true + pq.put(q) + return nil +} + +func (pq *packetQueue) setLinger(linger bool) error { + q := pq.get() + defer func() { pq.put(q) }() + + if q.writeClosed { + return ErrClosed + } + q.noLinger = !linger + return nil +} + +func (pq *packetQueue) write(dt *deadlineTimer, b []byte, from sockaddr) (n int, err error) { + for { + dn := len(b) + if dn > maxPacketSize { + dn = maxPacketSize + } + + dn, err = pq.send(dt, b[:dn], from, true) + n += dn + if err != nil { + return n, err + } + + b = b[dn:] + if len(b) == 0 { + return n, nil + } + } +} + +func (pq *packetQueue) send(dt *deadlineTimer, b []byte, from sockaddr, block bool) (n int, err error) { + if from == nil { + return 0, os.NewSyscallError("send", syscall.EINVAL) + } + if len(b) > maxPacketSize { + return 0, os.NewSyscallError("send", syscall.EMSGSIZE) + } + + var q packetQueueState + var full chan packetQueueState + if !block { + full = pq.full + } + + // Before we check dt.expired, yield to other goroutines. + // This may help to prevent starvation of the goroutine that runs the + // deadlineTimer's time.After callback. + // + // TODO(#65178): Remove this when the runtime scheduler no longer starves + // runnable goroutines. + runtime.Gosched() + + select { + case <-dt.expired: + return 0, os.ErrDeadlineExceeded + + case q = <-full: + pq.put(q) + return 0, os.NewSyscallError("send", syscall.ENOBUFS) + + case q = <-pq.empty: + case q = <-pq.ready: + } + defer func() { pq.put(q) }() + + // Don't allow a packet to be sent if the deadline has expired, + // even if the select above chose a different branch. + select { + case <-dt.expired: + return 0, os.ErrDeadlineExceeded + default: + } + if q.writeClosed { + return 0, ErrClosed + } else if q.readClosed { + return 0, os.NewSyscallError("send", syscall.ECONNRESET) + } + + p := packetPool.Get().(*packet) + p.buf = append(p.buf[:0], b...) + p.from = from + + if q.head == nil { + q.head = p + } else { + q.tail.next = p + } + q.tail = p + q.nBytes += len(p.buf) + + return len(b), nil +} + +func (pq *packetQueue) recvfrom(dt *deadlineTimer, b []byte, wholePacket bool, checkFrom func(sockaddr) error) (n int, from sockaddr, err error) { + var q packetQueueState + var empty chan packetQueueState + if len(b) == 0 { + // For consistency with the implementation on Unix platforms, + // allow a zero-length Read to proceed if the queue is empty. + // (Without this, TestZeroByteRead deadlocks.) + empty = pq.empty + } + + // Before we check dt.expired, yield to other goroutines. + // This may help to prevent starvation of the goroutine that runs the + // deadlineTimer's time.After callback. + // + // TODO(#65178): Remove this when the runtime scheduler no longer starves + // runnable goroutines. + runtime.Gosched() + + select { + case <-dt.expired: + return 0, nil, os.ErrDeadlineExceeded + case q = <-empty: + case q = <-pq.ready: + case q = <-pq.full: + } + defer func() { pq.put(q) }() + + p := q.head + if p == nil { + switch { + case q.readClosed: + return 0, nil, ErrClosed + case q.writeClosed: + if q.noLinger { + return 0, nil, os.NewSyscallError("recvfrom", syscall.ECONNRESET) + } + return 0, nil, io.EOF + case len(b) == 0: + return 0, nil, nil + default: + // This should be impossible: pq.full should only contain a non-empty list, + // pq.ready should either contain a non-empty list or indicate that the + // connection is closed, and we should only receive from pq.empty if + // len(b) == 0. + panic("net: nil packet list from non-closed packetQueue") + } + } + + select { + case <-dt.expired: + return 0, nil, os.ErrDeadlineExceeded + default: + } + + if checkFrom != nil { + if err := checkFrom(p.from); err != nil { + return 0, nil, err + } + } + + n = copy(b, p.buf[p.bufOffset:]) + from = p.from + if wholePacket || p.bufOffset+n == len(p.buf) { + q.head = p.next + q.nBytes -= len(p.buf) + p.clear() + packetPool.Put(p) + } else { + p.bufOffset += n + } + + return n, from, nil +} + +// setReadBuffer sets a soft limit on the number of bytes available to read +// from the pipe. +func (pq *packetQueue) setReadBuffer(bytes int) error { + if bytes <= 0 { + return os.NewSyscallError("setReadBuffer", syscall.EINVAL) + } + q := pq.get() // Use the queue as a lock. + q.readBufferBytes = bytes + pq.put(q) + return nil +} + +type deadlineTimer struct { + timer chan *time.Timer + expired chan struct{} +} + +func newDeadlineTimer(deadline time.Time) *deadlineTimer { + dt := &deadlineTimer{ + timer: make(chan *time.Timer, 1), + expired: make(chan struct{}), + } + dt.timer <- nil + dt.Reset(deadline) + return dt +} + +// Reset attempts to reset the timer. +// If the timer has already expired, Reset returns false. +func (dt *deadlineTimer) Reset(deadline time.Time) bool { + timer := <-dt.timer + defer func() { dt.timer <- timer }() + + if deadline.Equal(noDeadline) { + if timer != nil && timer.Stop() { + timer = nil + } + return timer == nil + } + + d := time.Until(deadline) + if d < 0 { + // Ensure that a deadline in the past takes effect immediately. + defer func() { <-dt.expired }() + } + + if timer == nil { + timer = time.AfterFunc(d, func() { close(dt.expired) }) + return true + } + if !timer.Stop() { + return false + } + timer.Reset(d) + return true +} + +func sysSocket(family, sotype, proto int) (int, error) { + return 0, os.NewSyscallError("sysSocket", syscall.ENOSYS) +} + +func fakeListen(fd *netFD, laddr sockaddr) (err error) { + wrapErr := func(err error) error { + if errno, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("listen", errno) + } + if errors.Is(err, syscall.EADDRINUSE) { + return err + } + if laddr != nil { + if _, ok := err.(*AddrError); !ok { + err = &AddrError{ + Err: err.Error(), + Addr: laddr.String(), + } + } + } + return err + } + + ffd := newFakeNetFD(fd) + defer func() { + if fd.fakeNetFD != ffd { + // Failed to register listener; clean up. + ffd.Close() + } + }() + + if err := ffd.assignFakeAddr(matchIPFamily(fd.family, laddr)); err != nil { + return wrapErr(err) + } + + ffd.fakeAddr = fakeAddr(fd.laddr.(sockaddr)) + switch fd.sotype { + case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET: + ffd.incoming = make(chan []*netFD, 1) + ffd.incomingFull = make(chan []*netFD, 1) + ffd.incomingEmpty = make(chan bool, 1) + ffd.incomingEmpty <- true + case syscall.SOCK_DGRAM: + ffd.queue = newPacketQueue(defaultBuffer) + default: + return wrapErr(syscall.EINVAL) + } + + fd.fakeNetFD = ffd + if _, dup := sockets.LoadOrStore(ffd.fakeAddr, fd); dup { + fd.fakeNetFD = nil + return wrapErr(syscall.EADDRINUSE) + } + + return nil +} + +func fakeConnect(ctx context.Context, fd *netFD, laddr, raddr sockaddr) error { + wrapErr := func(err error) error { + if errno, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("connect", errno) + } + if errors.Is(err, syscall.EADDRINUSE) { + return err + } + if terr, ok := err.(interface{ Timeout() bool }); !ok || !terr.Timeout() { + // For consistency with the net implementation on other platforms, + // if we don't need to preserve the Timeout-ness of err we should + // wrap it in an AddrError. (Unfortunately we can't wrap errors + // that convey structured information, because AddrError reduces + // the wrapped Err to a flat string.) + if _, ok := err.(*AddrError); !ok { + err = &AddrError{ + Err: err.Error(), + Addr: raddr.String(), + } + } + } + return err + } + + if fd.isConnected { + return wrapErr(syscall.EISCONN) + } + if ctx.Err() != nil { + return wrapErr(syscall.ETIMEDOUT) + } + + fd.raddr = matchIPFamily(fd.family, raddr) + if err := validateResolvedAddr(fd.net, fd.family, fd.raddr.(sockaddr)); err != nil { + return wrapErr(err) + } + + if err := fd.fakeNetFD.assignFakeAddr(laddr); err != nil { + return wrapErr(err) + } + fd.fakeNetFD.queue = newPacketQueue(defaultBuffer) + + switch fd.sotype { + case syscall.SOCK_DGRAM: + if ua, ok := fd.laddr.(*UnixAddr); !ok || ua.Name != "" { + fd.fakeNetFD.fakeAddr = fakeAddr(fd.laddr.(sockaddr)) + if _, dup := sockets.LoadOrStore(fd.fakeNetFD.fakeAddr, fd); dup { + return wrapErr(syscall.EADDRINUSE) + } + } + fd.isConnected = true + return nil + + case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET: + default: + return wrapErr(syscall.EINVAL) + } + + fa := fakeAddr(raddr) + lni, ok := sockets.Load(fa) + if !ok { + return wrapErr(syscall.ECONNREFUSED) + } + ln := lni.(*netFD) + if ln.sotype != fd.sotype { + return wrapErr(syscall.EPROTOTYPE) + } + if ln.incoming == nil { + return wrapErr(syscall.ECONNREFUSED) + } + + peer := &netFD{ + family: ln.family, + sotype: ln.sotype, + net: ln.net, + laddr: ln.laddr, + raddr: fd.laddr, + isConnected: true, + } + peer.fakeNetFD = newFakeNetFD(fd) + peer.fakeNetFD.queue = newPacketQueue(defaultBuffer) + defer func() { + if fd.peer != peer { + // Failed to connect; clean up. + peer.Close() + } + }() + + var incoming []*netFD + select { + case <-ctx.Done(): + return wrapErr(syscall.ETIMEDOUT) + case ok = <-ln.incomingEmpty: + case incoming, ok = <-ln.incoming: + } + if !ok { + return wrapErr(syscall.ECONNREFUSED) + } + + fd.isConnected = true + fd.peer = peer + peer.peer = fd + + incoming = append(incoming, peer) + if len(incoming) >= listenerBacklog() { + ln.incomingFull <- incoming + } else { + ln.incoming <- incoming + } + return nil +} + +func (ffd *fakeNetFD) assignFakeAddr(addr sockaddr) error { + validate := func(sa sockaddr) error { + if err := validateResolvedAddr(ffd.fd.net, ffd.fd.family, sa); err != nil { + return err + } + ffd.fd.laddr = sa + return nil + } + + assignIP := func(addr sockaddr) error { + var ( + ip IP + port int + zone string + ) + switch addr := addr.(type) { + case *TCPAddr: + if addr != nil { + ip = addr.IP + port = addr.Port + zone = addr.Zone + } + case *UDPAddr: + if addr != nil { + ip = addr.IP + port = addr.Port + zone = addr.Zone + } + default: + return validate(addr) + } + + if ip == nil { + ip = IPv4(127, 0, 0, 1) + } + switch ffd.fd.family { + case syscall.AF_INET: + if ip4 := ip.To4(); ip4 != nil { + ip = ip4 + } + case syscall.AF_INET6: + if ip16 := ip.To16(); ip16 != nil { + ip = ip16 + } + } + if ip == nil { + return syscall.EINVAL + } + + if port == 0 { + var prevPort int32 + portWrapped := false + nextPort := func() (int, bool) { + for { + port := nextPortCounter.Add(1) + if port <= 0 || port >= 1<<16 { + // nextPortCounter ran off the end of the port space. + // Bump it back into range. + for { + if nextPortCounter.CompareAndSwap(port, 0) { + break + } + if port = nextPortCounter.Load(); port >= 0 && port < 1<<16 { + break + } + } + if portWrapped { + // This is the second wraparound, so we've scanned the whole port space + // at least once already and it's time to give up. + return 0, false + } + portWrapped = true + prevPort = 0 + continue + } + + if port <= prevPort { + // nextPortCounter has wrapped around since the last time we read it. + if portWrapped { + // This is the second wraparound, so we've scanned the whole port space + // at least once already and it's time to give up. + return 0, false + } else { + portWrapped = true + } + } + + prevPort = port + return int(port), true + } + } + + for { + var ok bool + port, ok = nextPort() + if !ok { + ffd.assignedPort = 0 + return syscall.EADDRINUSE + } + + ffd.assignedPort = int(port) + if _, dup := fakePorts.LoadOrStore(ffd.assignedPort, ffd.fd); !dup { + break + } + } + } + + switch addr.(type) { + case *TCPAddr: + return validate(&TCPAddr{IP: ip, Port: port, Zone: zone}) + case *UDPAddr: + return validate(&UDPAddr{IP: ip, Port: port, Zone: zone}) + default: + panic("unreachable") + } + } + + switch ffd.fd.net { + case "tcp", "tcp4", "tcp6": + if addr == nil { + return assignIP(new(TCPAddr)) + } + return assignIP(addr) + + case "udp", "udp4", "udp6": + if addr == nil { + return assignIP(new(UDPAddr)) + } + return assignIP(addr) + + case "unix", "unixgram", "unixpacket": + uaddr, ok := addr.(*UnixAddr) + if !ok && addr != nil { + return &AddrError{ + Err: "non-Unix address for " + ffd.fd.net + " network", + Addr: addr.String(), + } + } + if uaddr == nil { + return validate(&UnixAddr{Net: ffd.fd.net}) + } + return validate(&UnixAddr{Net: ffd.fd.net, Name: uaddr.Name}) + + default: + return &AddrError{ + Err: syscall.EAFNOSUPPORT.Error(), + Addr: addr.String(), + } + } +} + +func (ffd *fakeNetFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) { + if ffd.queue == nil { + return 0, nil, os.NewSyscallError("readFrom", syscall.EINVAL) + } + + n, from, err := ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, nil) + + if from != nil { + // Convert the net.sockaddr to a syscall.Sockaddr type. + var saErr error + sa, saErr = from.sockaddr(ffd.fd.family) + if err == nil { + err = saErr + } + } + + return n, sa, err +} + +func (ffd *fakeNetFD) readFromInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) { + n, _, err = ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, func(from sockaddr) error { + fromSA, err := from.sockaddr(syscall.AF_INET) + if err != nil { + return err + } + if fromSA == nil { + return os.NewSyscallError("readFromInet4", syscall.EINVAL) + } + *sa = *(fromSA.(*syscall.SockaddrInet4)) + return nil + }) + return n, err +} + +func (ffd *fakeNetFD) readFromInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) { + n, _, err = ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, func(from sockaddr) error { + fromSA, err := from.sockaddr(syscall.AF_INET6) + if err != nil { + return err + } + if fromSA == nil { + return os.NewSyscallError("readFromInet6", syscall.EINVAL) + } + *sa = *(fromSA.(*syscall.SockaddrInet6)) + return nil + }) + return n, err +} + +func (ffd *fakeNetFD) readMsg(p []byte, oob []byte, flags int) (n, oobn, retflags int, sa syscall.Sockaddr, err error) { + if flags != 0 { + return 0, 0, 0, nil, os.NewSyscallError("readMsg", syscall.ENOTSUP) + } + n, sa, err = ffd.readFrom(p) + return n, 0, 0, sa, err +} + +func (ffd *fakeNetFD) readMsgInet4(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet4) (n, oobn, retflags int, err error) { + if flags != 0 { + return 0, 0, 0, os.NewSyscallError("readMsgInet4", syscall.ENOTSUP) + } + n, err = ffd.readFromInet4(p, sa) + return n, 0, 0, err +} + +func (ffd *fakeNetFD) readMsgInet6(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet6) (n, oobn, retflags int, err error) { + if flags != 0 { + return 0, 0, 0, os.NewSyscallError("readMsgInet6", syscall.ENOTSUP) + } + n, err = ffd.readFromInet6(p, sa) + return n, 0, 0, err +} + +func (ffd *fakeNetFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) { + if len(oob) > 0 { + return 0, 0, os.NewSyscallError("writeMsg", syscall.ENOTSUP) + } + n, err = ffd.writeTo(p, sa) + return n, 0, err +} + +func (ffd *fakeNetFD) writeMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (n int, oobn int, err error) { + return ffd.writeMsg(p, oob, sa) +} + +func (ffd *fakeNetFD) writeMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (n int, oobn int, err error) { + return ffd.writeMsg(p, oob, sa) +} + +func (ffd *fakeNetFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) { + raddr := ffd.fd.raddr + if sa != nil { + if ffd.fd.isConnected { + return 0, os.NewSyscallError("writeTo", syscall.EISCONN) + } + raddr = ffd.fd.addrFunc()(sa) + } + if raddr == nil { + return 0, os.NewSyscallError("writeTo", syscall.EINVAL) + } + + peeri, _ := sockets.Load(fakeAddr(raddr.(sockaddr))) + if peeri == nil { + if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" { + return len(p), nil + } + return 0, os.NewSyscallError("writeTo", syscall.ECONNRESET) + } + peer := peeri.(*netFD) + if peer.queue == nil { + if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" { + return len(p), nil + } + return 0, os.NewSyscallError("writeTo", syscall.ECONNRESET) + } + + block := true + if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" { + block = false + } + return peer.queue.send(ffd.writeDeadline.Load(), p, ffd.fd.laddr.(sockaddr), block) +} + +func (ffd *fakeNetFD) writeToInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) { + return ffd.writeTo(p, sa) +} + +func (ffd *fakeNetFD) writeToInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) { + return ffd.writeTo(p, sa) +} + +func (ffd *fakeNetFD) dup() (f *os.File, err error) { + return nil, os.NewSyscallError("dup", syscall.ENOSYS) +} + +func (ffd *fakeNetFD) setReadBuffer(bytes int) error { + if ffd.queue == nil { + return os.NewSyscallError("setReadBuffer", syscall.EINVAL) + } + ffd.queue.setReadBuffer(bytes) + return nil +} + +func (ffd *fakeNetFD) setWriteBuffer(bytes int) error { + return os.NewSyscallError("setWriteBuffer", syscall.ENOTSUP) +} + +func (ffd *fakeNetFD) setLinger(sec int) error { + if sec < 0 || ffd.peer == nil { + return os.NewSyscallError("setLinger", syscall.EINVAL) + } + ffd.peer.queue.setLinger(sec > 0) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/net/net_fake_test.go b/platform/dbops/binaries/go/go/src/net/net_fake_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4542228fbc5827f458dac13b033e29111467ddf3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/net_fake_test.go @@ -0,0 +1,107 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package net + +// GOOS=js and GOOS=wasip1 do not have typical socket networking capabilities +// found on other platforms. To help run test suites of the stdlib packages, +// an in-memory "fake network" facility is implemented. +// +// The tests in this files are intended to validate the behavior of the fake +// network stack on these platforms. + +import ( + "errors" + "syscall" + "testing" +) + +func TestFakePortExhaustion(t *testing.T) { + if testing.Short() { + t.Skipf("skipping test that opens 1<<16 connections") + } + + ln := newLocalListener(t, "tcp") + done := make(chan struct{}) + go func() { + var accepted []Conn + defer func() { + for _, c := range accepted { + c.Close() + } + close(done) + }() + + for { + c, err := ln.Accept() + if err != nil { + return + } + accepted = append(accepted, c) + } + }() + + var dialed []Conn + defer func() { + ln.Close() + for _, c := range dialed { + c.Close() + } + <-done + }() + + // Since this test is not running in parallel, we expect to be able to open + // all 65535 valid (fake) ports. The listener is already using one, so + // we should be able to Dial the remaining 65534. + for len(dialed) < (1<<16)-2 { + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatalf("unexpected error from Dial with %v connections: %v", len(dialed), err) + } + dialed = append(dialed, c) + if testing.Verbose() && len(dialed)%(1<<12) == 0 { + t.Logf("dialed %d connections", len(dialed)) + } + } + t.Logf("dialed %d connections", len(dialed)) + + // Now that all of the ports are in use, dialing another should fail due + // to port exhaustion, which (for POSIX-like socket APIs) should return + // an EADDRINUSE error. + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err == nil { + c.Close() + } + if errors.Is(err, syscall.EADDRINUSE) { + t.Logf("Dial returned expected error: %v", err) + } else { + t.Errorf("unexpected error from Dial: %v\nwant: %v", err, syscall.EADDRINUSE) + } + + // Opening a Listener should fail at this point too. + ln2, err := Listen("tcp", "localhost:0") + if err == nil { + ln2.Close() + } + if errors.Is(err, syscall.EADDRINUSE) { + t.Logf("Listen returned expected error: %v", err) + } else { + t.Errorf("unexpected error from Listen: %v\nwant: %v", err, syscall.EADDRINUSE) + } + + // When we close an arbitrary connection, we should be able to reuse its port + // even if the server hasn't yet seen the ECONNRESET for the connection. + dialed[0].Close() + dialed = dialed[1:] + t.Logf("closed one connection") + c, err = Dial(ln.Addr().Network(), ln.Addr().String()) + if err == nil { + c.Close() + t.Logf("Dial succeeded") + } else { + t.Errorf("unexpected error from Dial: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/net_test.go b/platform/dbops/binaries/go/go/src/net/net_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b448a79cce79d8926e80b97a391197185fe1b61d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/net_test.go @@ -0,0 +1,601 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "errors" + "fmt" + "io" + "net/internal/socktest" + "os" + "runtime" + "testing" + "time" +) + +func TestCloseRead(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Parallel() + + for _, network := range []string{"tcp", "unix", "unixpacket"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + ln := newLocalListener(t, network) + switch network { + case "unix", "unixpacket": + defer os.Remove(ln.Addr().String()) + } + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + switch network { + case "unix", "unixpacket": + defer os.Remove(c.LocalAddr().String()) + } + defer c.Close() + + switch c := c.(type) { + case *TCPConn: + err = c.CloseRead() + case *UnixConn: + err = c.CloseRead() + } + if err != nil { + if perr := parseCloseError(err, true); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + var b [1]byte + n, err := c.Read(b[:]) + if n != 0 || err == nil { + t.Fatalf("got (%d, %v); want (0, error)", n, err) + } + }) + } +} + +func TestCloseWrite(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + t.Parallel() + deadline, _ := t.Deadline() + if !deadline.IsZero() { + // Leave 10% headroom on the deadline to report errors and clean up. + deadline = deadline.Add(-time.Until(deadline) / 10) + } + + for _, network := range []string{"tcp", "unix", "unixpacket"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + handler := func(ls *localServer, ln Listener) { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + + // Workaround for https://go.dev/issue/49352. + // On arm64 macOS (current as of macOS 12.4), + // reading from a socket at the same time as the client + // is closing it occasionally hangs for 60 seconds before + // returning ECONNRESET. Sleep for a bit to give the + // socket time to close before trying to read from it. + if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { + time.Sleep(10 * time.Millisecond) + } + + if !deadline.IsZero() { + c.SetDeadline(deadline) + } + defer c.Close() + + var b [1]byte + n, err := c.Read(b[:]) + if n != 0 || err != io.EOF { + t.Errorf("got (%d, %v); want (0, io.EOF)", n, err) + return + } + switch c := c.(type) { + case *TCPConn: + err = c.CloseWrite() + case *UnixConn: + err = c.CloseWrite() + } + if err != nil { + if perr := parseCloseError(err, true); perr != nil { + t.Error(perr) + } + t.Error(err) + return + } + n, err = c.Write(b[:]) + if err == nil { + t.Errorf("got (%d, %v); want (any, error)", n, err) + return + } + } + + ls := newLocalServer(t, network) + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + if !deadline.IsZero() { + c.SetDeadline(deadline) + } + switch network { + case "unix", "unixpacket": + defer os.Remove(c.LocalAddr().String()) + } + defer c.Close() + + switch c := c.(type) { + case *TCPConn: + err = c.CloseWrite() + case *UnixConn: + err = c.CloseWrite() + } + if err != nil { + if perr := parseCloseError(err, true); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + var b [1]byte + n, err := c.Read(b[:]) + if n != 0 || err != io.EOF { + t.Fatalf("got (%d, %v); want (0, io.EOF)", n, err) + } + n, err = c.Write(b[:]) + if err == nil { + t.Fatalf("got (%d, %v); want (any, error)", n, err) + } + }) + } +} + +func TestConnClose(t *testing.T) { + t.Parallel() + for _, network := range []string{"tcp", "unix", "unixpacket"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + ln := newLocalListener(t, network) + switch network { + case "unix", "unixpacket": + defer os.Remove(ln.Addr().String()) + } + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + switch network { + case "unix", "unixpacket": + defer os.Remove(c.LocalAddr().String()) + } + defer c.Close() + + if err := c.Close(); err != nil { + if perr := parseCloseError(err, false); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + var b [1]byte + n, err := c.Read(b[:]) + if n != 0 || err == nil { + t.Fatalf("got (%d, %v); want (0, error)", n, err) + } + }) + } +} + +func TestListenerClose(t *testing.T) { + t.Parallel() + for _, network := range []string{"tcp", "unix", "unixpacket"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + ln := newLocalListener(t, network) + switch network { + case "unix", "unixpacket": + defer os.Remove(ln.Addr().String()) + } + + if err := ln.Close(); err != nil { + if perr := parseCloseError(err, false); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + c, err := ln.Accept() + if err == nil { + c.Close() + t.Fatal("should fail") + } + + // Note: we cannot ensure that a subsequent Dial does not succeed, because + // we do not in general have any guarantee that ln.Addr is not immediately + // reused. (TCP sockets enter a TIME_WAIT state when closed, but that only + // applies to existing connections for the port — it does not prevent the + // port itself from being used for entirely new connections in the + // meantime.) + }) + } +} + +func TestPacketConnClose(t *testing.T) { + t.Parallel() + for _, network := range []string{"udp", "unixgram"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + c := newLocalPacketListener(t, network) + switch network { + case "unixgram": + defer os.Remove(c.LocalAddr().String()) + } + defer c.Close() + + if err := c.Close(); err != nil { + if perr := parseCloseError(err, false); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + var b [1]byte + n, _, err := c.ReadFrom(b[:]) + if n != 0 || err == nil { + t.Fatalf("got (%d, %v); want (0, error)", n, err) + } + }) + } +} + +func TestListenCloseListen(t *testing.T) { + const maxTries = 10 + for tries := 0; tries < maxTries; tries++ { + ln := newLocalListener(t, "tcp") + addr := ln.Addr().String() + // TODO: This is racy. The selected address could be reused in between this + // Close and the subsequent Listen. + if err := ln.Close(); err != nil { + if perr := parseCloseError(err, false); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + ln, err := Listen("tcp", addr) + if err == nil { + // Success. (This test didn't always make it here earlier.) + ln.Close() + return + } + t.Errorf("failed on try %d/%d: %v", tries+1, maxTries, err) + } + t.Fatalf("failed to listen/close/listen on same address after %d tries", maxTries) +} + +// See golang.org/issue/6163, golang.org/issue/6987. +func TestAcceptIgnoreAbortedConnRequest(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("%s does not have full support of socktest", runtime.GOOS) + } + + syserr := make(chan error) + go func() { + defer close(syserr) + for _, err := range abortedConnRequestErrors { + syserr <- err + } + }() + sw.Set(socktest.FilterAccept, func(so *socktest.Status) (socktest.AfterFilter, error) { + if err, ok := <-syserr; ok { + return nil, err + } + return nil, nil + }) + defer sw.Set(socktest.FilterAccept, nil) + + operr := make(chan error, 1) + handler := func(ls *localServer, ln Listener) { + defer close(operr) + c, err := ln.Accept() + if err != nil { + if perr := parseAcceptError(err); perr != nil { + operr <- perr + } + operr <- err + return + } + c.Close() + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + c.Close() + + for err := range operr { + t.Error(err) + } +} + +func TestZeroByteRead(t *testing.T) { + t.Parallel() + for _, network := range []string{"tcp", "unix", "unixpacket"} { + network := network + t.Run(network, func(t *testing.T) { + if !testableNetwork(network) { + t.Skipf("network %s is not testable on the current platform", network) + } + t.Parallel() + + ln := newLocalListener(t, network) + connc := make(chan Conn, 1) + defer func() { + ln.Close() + for c := range connc { + if c != nil { + c.Close() + } + } + }() + go func() { + defer close(connc) + c, err := ln.Accept() + if err != nil { + t.Error(err) + } + connc <- c // might be nil + }() + c, err := Dial(network, ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + sc := <-connc + if sc == nil { + return + } + defer sc.Close() + + if runtime.GOOS == "windows" { + // A zero byte read on Windows caused a wait for readability first. + // Rather than change that behavior, satisfy it in this test. + // See Issue 15735. + go io.WriteString(sc, "a") + } + + n, err := c.Read(nil) + if n != 0 || err != nil { + t.Errorf("%s: zero byte client read = %v, %v; want 0, nil", network, n, err) + } + + if runtime.GOOS == "windows" { + // Same as comment above. + go io.WriteString(c, "a") + } + n, err = sc.Read(nil) + if n != 0 || err != nil { + t.Errorf("%s: zero byte server read = %v, %v; want 0, nil", network, n, err) + } + }) + } +} + +// withTCPConnPair sets up a TCP connection between two peers, then +// runs peer1 and peer2 concurrently. withTCPConnPair returns when +// both have completed. +func withTCPConnPair(t *testing.T, peer1, peer2 func(c *TCPConn) error) { + t.Helper() + ln := newLocalListener(t, "tcp") + defer ln.Close() + errc := make(chan error, 2) + go func() { + c1, err := ln.Accept() + if err != nil { + errc <- err + return + } + err = peer1(c1.(*TCPConn)) + c1.Close() + errc <- err + }() + go func() { + c2, err := Dial("tcp", ln.Addr().String()) + if err != nil { + errc <- err + return + } + err = peer2(c2.(*TCPConn)) + c2.Close() + errc <- err + }() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + t.Error(err) + } + } +} + +// Tests that a blocked Read is interrupted by a concurrent SetReadDeadline +// modifying that Conn's read deadline to the past. +// See golang.org/cl/30164 which documented this. The net/http package +// depends on this. +func TestReadTimeoutUnblocksRead(t *testing.T) { + serverDone := make(chan struct{}) + server := func(cs *TCPConn) error { + defer close(serverDone) + errc := make(chan error, 1) + go func() { + defer close(errc) + go func() { + // TODO: find a better way to wait + // until we're blocked in the cs.Read + // call below. Sleep is lame. + time.Sleep(100 * time.Millisecond) + + // Interrupt the upcoming Read, unblocking it: + cs.SetReadDeadline(time.Unix(123, 0)) // time in the past + }() + var buf [1]byte + n, err := cs.Read(buf[:1]) + if n != 0 || err == nil { + errc <- fmt.Errorf("Read = %v, %v; want 0, non-nil", n, err) + } + }() + select { + case err := <-errc: + return err + case <-time.After(5 * time.Second): + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + println("Stacks at timeout:\n", string(buf)) + return errors.New("timeout waiting for Read to finish") + } + + } + // Do nothing in the client. Never write. Just wait for the + // server's half to be done. + client := func(*TCPConn) error { + <-serverDone + return nil + } + withTCPConnPair(t, client, server) +} + +// Issue 17695: verify that a blocked Read is woken up by a Close. +func TestCloseUnblocksRead(t *testing.T) { + t.Parallel() + server := func(cs *TCPConn) error { + // Give the client time to get stuck in a Read: + time.Sleep(20 * time.Millisecond) + cs.Close() + return nil + } + client := func(ss *TCPConn) error { + n, err := ss.Read([]byte{0}) + if n != 0 || err != io.EOF { + return fmt.Errorf("Read = %v, %v; want 0, EOF", n, err) + } + return nil + } + withTCPConnPair(t, client, server) +} + +// Issue 24808: verify that ECONNRESET is not temporary for read. +func TestNotTemporaryRead(t *testing.T) { + t.Parallel() + + ln := newLocalListener(t, "tcp") + serverDone := make(chan struct{}) + dialed := make(chan struct{}) + go func() { + defer close(serverDone) + + cs, err := ln.Accept() + if err != nil { + return + } + <-dialed + cs.(*TCPConn).SetLinger(0) + cs.Close() + }() + defer func() { + ln.Close() + <-serverDone + }() + + ss, err := Dial("tcp", ln.Addr().String()) + close(dialed) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + + _, err = ss.Read([]byte{0}) + if err == nil { + t.Fatal("Read succeeded unexpectedly") + } else if err == io.EOF { + // This happens on Plan 9, but for some reason (prior to CL 385314) it was + // accepted everywhere else too. + if runtime.GOOS == "plan9" { + return + } + t.Fatal("Read unexpectedly returned io.EOF after socket was abruptly closed") + } + if ne, ok := err.(Error); !ok { + t.Errorf("Read error does not implement net.Error: %v", err) + } else if ne.Temporary() { + t.Errorf("Read error is unexpectedly temporary: %v", err) + } +} + +// The various errors should implement the Error interface. +func TestErrors(t *testing.T) { + var ( + _ Error = &OpError{} + _ Error = &ParseError{} + _ Error = &AddrError{} + _ Error = UnknownNetworkError("") + _ Error = InvalidAddrError("") + _ Error = &timeoutError{} + _ Error = &DNSConfigError{} + _ Error = &DNSError{} + ) + + // ErrClosed was introduced as type error, so we can't check + // it using a declaration. + if _, ok := ErrClosed.(Error); !ok { + t.Fatal("ErrClosed does not implement Error") + } +} diff --git a/platform/dbops/binaries/go/go/src/net/net_windows_test.go b/platform/dbops/binaries/go/go/src/net/net_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..947dda56f28abf7b45f9d8296ba1cf4286a848f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/net_windows_test.go @@ -0,0 +1,631 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strings" + "syscall" + "testing" + "time" +) + +func toErrno(err error) (syscall.Errno, bool) { + operr, ok := err.(*OpError) + if !ok { + return 0, false + } + syserr, ok := operr.Err.(*os.SyscallError) + if !ok { + return 0, false + } + errno, ok := syserr.Err.(syscall.Errno) + if !ok { + return 0, false + } + return errno, true +} + +// TestAcceptIgnoreSomeErrors tests that windows TCPListener.AcceptTCP +// handles broken connections. It verifies that broken connections do +// not affect future connections. +func TestAcceptIgnoreSomeErrors(t *testing.T) { + recv := func(ln Listener, ignoreSomeReadErrors bool) (string, error) { + c, err := ln.Accept() + if err != nil { + // Display windows errno in error message. + errno, ok := toErrno(err) + if !ok { + return "", err + } + return "", fmt.Errorf("%v (windows errno=%d)", err, errno) + } + defer c.Close() + + b := make([]byte, 100) + n, err := c.Read(b) + if err == nil || err == io.EOF { + return string(b[:n]), nil + } + errno, ok := toErrno(err) + if ok && ignoreSomeReadErrors && (errno == syscall.ERROR_NETNAME_DELETED || errno == syscall.WSAECONNRESET) { + return "", nil + } + return "", err + } + + send := func(addr string, data string) error { + c, err := Dial("tcp", addr) + if err != nil { + return err + } + defer c.Close() + + b := []byte(data) + n, err := c.Write(b) + if err != nil { + return err + } + if n != len(b) { + return fmt.Errorf(`Only %d chars of string "%s" sent`, n, data) + } + return nil + } + + if envaddr := os.Getenv("GOTEST_DIAL_ADDR"); envaddr != "" { + // In child process. + c, err := Dial("tcp", envaddr) + if err != nil { + t.Fatal(err) + } + fmt.Printf("sleeping\n") + time.Sleep(time.Minute) // process will be killed here + c.Close() + } + + ln, err := Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + // Start child process that connects to our listener. + cmd := exec.Command(os.Args[0], "-test.run=TestAcceptIgnoreSomeErrors") + cmd.Env = append(os.Environ(), "GOTEST_DIAL_ADDR="+ln.Addr().String()) + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("cmd.StdoutPipe failed: %v", err) + } + err = cmd.Start() + if err != nil { + t.Fatalf("cmd.Start failed: %v\n", err) + } + outReader := bufio.NewReader(stdout) + for { + s, err := outReader.ReadString('\n') + if err != nil { + t.Fatalf("reading stdout failed: %v", err) + } + if s == "sleeping\n" { + break + } + } + defer cmd.Wait() // ignore error - we know it is getting killed + + const alittle = 100 * time.Millisecond + time.Sleep(alittle) + cmd.Process.Kill() // the only way to trigger the errors + time.Sleep(alittle) + + // Send second connection data (with delay in a separate goroutine). + result := make(chan error) + go func() { + time.Sleep(alittle) + err := send(ln.Addr().String(), "abc") + if err != nil { + result <- err + } + result <- nil + }() + defer func() { + err := <-result + if err != nil { + t.Fatalf("send failed: %v", err) + } + }() + + // Receive first or second connection. + s, err := recv(ln, true) + if err != nil { + t.Fatalf("recv failed: %v", err) + } + switch s { + case "": + // First connection data is received, let's get second connection data. + case "abc": + // First connection is lost forever, but that is ok. + return + default: + t.Fatalf(`"%s" received from recv, but "" or "abc" expected`, s) + } + + // Get second connection data. + s, err = recv(ln, false) + if err != nil { + t.Fatalf("recv failed: %v", err) + } + if s != "abc" { + t.Fatalf(`"%s" received from recv, but "abc" expected`, s) + } +} + +func runCmd(args ...string) ([]byte, error) { + removeUTF8BOM := func(b []byte) []byte { + if len(b) >= 3 && b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF { + return b[3:] + } + return b + } + f, err := os.CreateTemp("", "netcmd") + if err != nil { + return nil, err + } + f.Close() + defer os.Remove(f.Name()) + cmd := fmt.Sprintf(`%s | Out-File "%s" -encoding UTF8`, strings.Join(args, " "), f.Name()) + out, err := exec.Command("powershell", "-Command", cmd).CombinedOutput() + if err != nil { + if len(out) != 0 { + return nil, fmt.Errorf("%s failed: %v: %q", args[0], err, string(removeUTF8BOM(out))) + } + var err2 error + out, err2 = os.ReadFile(f.Name()) + if err2 != nil { + return nil, err2 + } + if len(out) != 0 { + return nil, fmt.Errorf("%s failed: %v: %q", args[0], err, string(removeUTF8BOM(out))) + } + return nil, fmt.Errorf("%s failed: %v", args[0], err) + } + out, err = os.ReadFile(f.Name()) + if err != nil { + return nil, err + } + return removeUTF8BOM(out), nil +} + +func checkNetsh(t *testing.T) { + if testenv.Builder() == "windows-arm64-10" { + // netsh was observed to sometimes hang on this builder. + // We have not observed failures on windows-arm64-11, so for the + // moment we are leaving the test enabled elsewhere on the theory + // that it may have been a platform bug fixed in Windows 11. + testenv.SkipFlaky(t, 52082) + } + out, err := runCmd("netsh", "help") + if err != nil { + t.Fatal(err) + } + if bytes.Contains(out, []byte("The following helper DLL cannot be loaded")) { + t.Skipf("powershell failure:\n%s", err) + } + if !bytes.Contains(out, []byte("The following commands are available:")) { + t.Skipf("powershell does not speak English:\n%s", out) + } +} + +func netshInterfaceIPShowInterface(ipver string, ifaces map[string]bool) error { + out, err := runCmd("netsh", "interface", ipver, "show", "interface", "level=verbose") + if err != nil { + return err + } + // interface information is listed like: + // + //Interface Local Area Connection Parameters + //---------------------------------------------- + //IfLuid : ethernet_6 + //IfIndex : 11 + //State : connected + //Metric : 10 + //... + var name string + lines := bytes.Split(out, []byte{'\r', '\n'}) + for _, line := range lines { + if bytes.HasPrefix(line, []byte("Interface ")) && bytes.HasSuffix(line, []byte(" Parameters")) { + f := line[len("Interface "):] + f = f[:len(f)-len(" Parameters")] + name = string(f) + continue + } + var isup bool + switch string(line) { + case "State : connected": + isup = true + case "State : disconnected": + isup = false + default: + continue + } + if name != "" { + if v, ok := ifaces[name]; ok && v != isup { + return fmt.Errorf("%s:%s isup=%v: ipv4 and ipv6 report different interface state", ipver, name, isup) + } + ifaces[name] = isup + name = "" + } + } + return nil +} + +func TestInterfacesWithNetsh(t *testing.T) { + checkNetsh(t) + + toString := func(name string, isup bool) string { + if isup { + return name + ":up" + } + return name + ":down" + } + + ift, err := Interfaces() + if err != nil { + t.Fatal(err) + } + have := make([]string, 0) + for _, ifi := range ift { + have = append(have, toString(ifi.Name, ifi.Flags&FlagUp != 0)) + } + sort.Strings(have) + + ifaces := make(map[string]bool) + err = netshInterfaceIPShowInterface("ipv6", ifaces) + if err != nil { + t.Fatal(err) + } + err = netshInterfaceIPShowInterface("ipv4", ifaces) + if err != nil { + t.Fatal(err) + } + want := make([]string, 0) + for name, isup := range ifaces { + want = append(want, toString(name, isup)) + } + sort.Strings(want) + + if strings.Join(want, "/") != strings.Join(have, "/") { + t.Fatalf("unexpected interface list %q, want %q", have, want) + } +} + +func netshInterfaceIPv4ShowAddress(name string, netshOutput []byte) []string { + // Address information is listed like: + // + //Configuration for interface "Local Area Connection" + // DHCP enabled: Yes + // IP Address: 10.0.0.2 + // Subnet Prefix: 10.0.0.0/24 (mask 255.255.255.0) + // IP Address: 10.0.0.3 + // Subnet Prefix: 10.0.0.0/24 (mask 255.255.255.0) + // Default Gateway: 10.0.0.254 + // Gateway Metric: 0 + // InterfaceMetric: 10 + // + //Configuration for interface "Loopback Pseudo-Interface 1" + // DHCP enabled: No + // IP Address: 127.0.0.1 + // Subnet Prefix: 127.0.0.0/8 (mask 255.0.0.0) + // InterfaceMetric: 50 + // + addrs := make([]string, 0) + var addr, subnetprefix string + var processingOurInterface bool + lines := bytes.Split(netshOutput, []byte{'\r', '\n'}) + for _, line := range lines { + if !processingOurInterface { + if !bytes.HasPrefix(line, []byte("Configuration for interface")) { + continue + } + if !bytes.Contains(line, []byte(`"`+name+`"`)) { + continue + } + processingOurInterface = true + continue + } + if len(line) == 0 { + break + } + if bytes.Contains(line, []byte("Subnet Prefix:")) { + f := bytes.Split(line, []byte{':'}) + if len(f) == 2 { + f = bytes.Split(f[1], []byte{'('}) + if len(f) == 2 { + f = bytes.Split(f[0], []byte{'/'}) + if len(f) == 2 { + subnetprefix = string(bytes.TrimSpace(f[1])) + if addr != "" && subnetprefix != "" { + addrs = append(addrs, addr+"/"+subnetprefix) + } + } + } + } + } + addr = "" + if bytes.Contains(line, []byte("IP Address:")) { + f := bytes.Split(line, []byte{':'}) + if len(f) == 2 { + addr = string(bytes.TrimSpace(f[1])) + } + } + } + return addrs +} + +func netshInterfaceIPv6ShowAddress(name string, netshOutput []byte) []string { + // Address information is listed like: + // + //Address ::1 Parameters + //--------------------------------------------------------- + //Interface Luid : Loopback Pseudo-Interface 1 + //Scope Id : 0.0 + //Valid Lifetime : infinite + //Preferred Lifetime : infinite + //DAD State : Preferred + //Address Type : Other + //Skip as Source : false + // + //Address XXXX::XXXX:XXXX:XXXX:XXXX%11 Parameters + //--------------------------------------------------------- + //Interface Luid : Local Area Connection + //Scope Id : 0.11 + //Valid Lifetime : infinite + //Preferred Lifetime : infinite + //DAD State : Preferred + //Address Type : Other + //Skip as Source : false + // + + // TODO: need to test ipv6 netmask too, but netsh does not outputs it + var addr string + addrs := make([]string, 0) + lines := bytes.Split(netshOutput, []byte{'\r', '\n'}) + for _, line := range lines { + if addr != "" { + if len(line) == 0 { + addr = "" + continue + } + if string(line) != "Interface Luid : "+name { + continue + } + addrs = append(addrs, addr) + addr = "" + continue + } + if !bytes.HasPrefix(line, []byte("Address")) { + continue + } + if !bytes.HasSuffix(line, []byte("Parameters")) { + continue + } + f := bytes.Split(line, []byte{' '}) + if len(f) != 3 { + continue + } + // remove scope ID if present + f = bytes.Split(f[1], []byte{'%'}) + + // netsh can create IPv4-embedded IPv6 addresses, like fe80::5efe:192.168.140.1. + // Convert these to all hexadecimal fe80::5efe:c0a8:8c01 for later string comparisons. + ipv4Tail := regexp.MustCompile(`:\d+\.\d+\.\d+\.\d+$`) + if ipv4Tail.Match(f[0]) { + f[0] = []byte(ParseIP(string(f[0])).String()) + } + + addr = string(bytes.ToLower(bytes.TrimSpace(f[0]))) + } + return addrs +} + +func TestInterfaceAddrsWithNetsh(t *testing.T) { + checkNetsh(t) + + outIPV4, err := runCmd("netsh", "interface", "ipv4", "show", "address") + if err != nil { + t.Fatal(err) + } + outIPV6, err := runCmd("netsh", "interface", "ipv6", "show", "address", "level=verbose") + if err != nil { + t.Fatal(err) + } + + ift, err := Interfaces() + if err != nil { + t.Fatal(err) + } + for _, ifi := range ift { + // Skip the interface if it's down. + if (ifi.Flags & FlagUp) == 0 { + continue + } + have := make([]string, 0) + addrs, err := ifi.Addrs() + if err != nil { + t.Fatal(err) + } + for _, addr := range addrs { + switch addr := addr.(type) { + case *IPNet: + if addr.IP.To4() != nil { + have = append(have, addr.String()) + } + if addr.IP.To16() != nil && addr.IP.To4() == nil { + // netsh does not output netmask for ipv6, so ignore ipv6 mask + have = append(have, addr.IP.String()) + } + case *IPAddr: + if addr.IP.To4() != nil { + have = append(have, addr.String()) + } + if addr.IP.To16() != nil && addr.IP.To4() == nil { + // netsh does not output netmask for ipv6, so ignore ipv6 mask + have = append(have, addr.IP.String()) + } + } + } + sort.Strings(have) + + want := netshInterfaceIPv4ShowAddress(ifi.Name, outIPV4) + wantIPv6 := netshInterfaceIPv6ShowAddress(ifi.Name, outIPV6) + want = append(want, wantIPv6...) + sort.Strings(want) + + if strings.Join(want, "/") != strings.Join(have, "/") { + t.Errorf("%s: unexpected addresses list %q, want %q", ifi.Name, have, want) + } + } +} + +// check that getmac exists as a powershell command, and that it +// speaks English. +func checkGetmac(t *testing.T) { + out, err := runCmd("getmac", "/?") + if err != nil { + if strings.Contains(err.Error(), "term 'getmac' is not recognized as the name of a cmdlet") { + t.Skipf("getmac not available") + } + t.Fatal(err) + } + if !bytes.Contains(out, []byte("network adapters on a system")) { + t.Skipf("skipping test on non-English system") + } +} + +func TestInterfaceHardwareAddrWithGetmac(t *testing.T) { + checkGetmac(t) + + ift, err := Interfaces() + if err != nil { + t.Fatal(err) + } + have := make(map[string]string) + for _, ifi := range ift { + if ifi.Flags&FlagLoopback != 0 { + // no MAC address for loopback interfaces + continue + } + have[ifi.Name] = ifi.HardwareAddr.String() + } + + out, err := runCmd("getmac", "/fo", "list", "/v") + if err != nil { + t.Fatal(err) + } + // getmac output looks like: + // + //Connection Name: Local Area Connection + //Network Adapter: Intel Gigabit Network Connection + //Physical Address: XX-XX-XX-XX-XX-XX + //Transport Name: \Device\Tcpip_{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} + // + //Connection Name: Wireless Network Connection + //Network Adapter: Wireles WLAN Card + //Physical Address: XX-XX-XX-XX-XX-XX + //Transport Name: Media disconnected + // + //Connection Name: Bluetooth Network Connection + //Network Adapter: Bluetooth Device (Personal Area Network) + //Physical Address: N/A + //Transport Name: Hardware not present + // + //Connection Name: VMware Network Adapter VMnet8 + //Network Adapter: VMware Virtual Ethernet Adapter for VMnet8 + //Physical Address: Disabled + //Transport Name: Disconnected + // + want := make(map[string]string) + group := make(map[string]string) // name / values for single adapter + getValue := func(name string) string { + value, found := group[name] + if !found { + t.Fatalf("%q has no %q line in it", group, name) + } + if value == "" { + t.Fatalf("%q has empty %q value", group, name) + } + return value + } + processGroup := func() { + if len(group) == 0 { + return + } + tname := strings.ToLower(getValue("Transport Name")) + if tname == "n/a" { + // skip these + return + } + addr := strings.ToLower(getValue("Physical Address")) + if addr == "disabled" || addr == "n/a" { + // skip these + return + } + addr = strings.ReplaceAll(addr, "-", ":") + cname := getValue("Connection Name") + want[cname] = addr + group = make(map[string]string) + } + lines := bytes.Split(out, []byte{'\r', '\n'}) + for _, line := range lines { + if len(line) == 0 { + processGroup() + continue + } + i := bytes.IndexByte(line, ':') + if i == -1 { + t.Fatalf("line %q has no : in it", line) + } + group[string(line[:i])] = string(bytes.TrimSpace(line[i+1:])) + } + processGroup() + + dups := make(map[string][]string) + for name, addr := range want { + if _, ok := dups[addr]; !ok { + dups[addr] = make([]string, 0) + } + dups[addr] = append(dups[addr], name) + } + +nextWant: + for name, wantAddr := range want { + if haveAddr, ok := have[name]; ok { + if haveAddr != wantAddr { + t.Errorf("unexpected MAC address for %q - %v, want %v", name, haveAddr, wantAddr) + } + continue + } + // We could not find the interface in getmac output by name. + // But sometimes getmac lists many interface names + // for the same MAC address. If that is the case here, + // and we can match at least one of those names, + // let's ignore the other names. + if dupNames, ok := dups[wantAddr]; ok && len(dupNames) > 1 { + for _, dupName := range dupNames { + if haveAddr, ok := have[dupName]; ok && haveAddr == wantAddr { + continue nextWant + } + } + } + t.Errorf("getmac lists %q, but it could not be found among Go interfaces %v", name, have) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/netcgo_off.go b/platform/dbops/binaries/go/go/src/net/netcgo_off.go new file mode 100644 index 0000000000000000000000000000000000000000..54677dcac6cd7987c3eddd570aae8c8f18316e86 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/netcgo_off.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !netcgo + +package net + +const netCgoBuildTag = false diff --git a/platform/dbops/binaries/go/go/src/net/netcgo_on.go b/platform/dbops/binaries/go/go/src/net/netcgo_on.go new file mode 100644 index 0000000000000000000000000000000000000000..25d4bdca72a0449c5d8de847f2ec5b97c47d57f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/netcgo_on.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build netcgo + +package net + +const netCgoBuildTag = true diff --git a/platform/dbops/binaries/go/go/src/net/netgo_netcgo.go b/platform/dbops/binaries/go/go/src/net/netgo_netcgo.go new file mode 100644 index 0000000000000000000000000000000000000000..7f3a5fd007fb43b3bdce55379cef74cd48415b2d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/netgo_netcgo.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build netgo && netcgo + +package net + +func init() { + // This will give a compile time error about the unused constant. + // The advantage of this approach is that the gc compiler + // actually prints the constant, making the problem obvious. + "Do not use both netgo and netcgo build tags." +} diff --git a/platform/dbops/binaries/go/go/src/net/netgo_off.go b/platform/dbops/binaries/go/go/src/net/netgo_off.go new file mode 100644 index 0000000000000000000000000000000000000000..e6bc2d7d069d11c9598b5e50748f34938e92247c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/netgo_off.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !netgo + +package net + +const netGoBuildTag = false diff --git a/platform/dbops/binaries/go/go/src/net/netgo_on.go b/platform/dbops/binaries/go/go/src/net/netgo_on.go new file mode 100644 index 0000000000000000000000000000000000000000..4f088de6e3f80bf83c289d3dcaba19fe5ec83538 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/netgo_on.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build netgo + +package net + +const netGoBuildTag = true diff --git a/platform/dbops/binaries/go/go/src/net/nss.go b/platform/dbops/binaries/go/go/src/net/nss.go new file mode 100644 index 0000000000000000000000000000000000000000..092b515cc7d01ed6d68cf0cf17e1ab8e167c6ded --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/nss.go @@ -0,0 +1,249 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "errors" + "internal/bytealg" + "os" + "sync" + "time" +) + +const ( + nssConfigPath = "/etc/nsswitch.conf" +) + +var nssConfig nsswitchConfig + +type nsswitchConfig struct { + initOnce sync.Once // guards init of nsswitchConfig + + // ch is used as a semaphore that only allows one lookup at a + // time to recheck nsswitch.conf + ch chan struct{} // guards lastChecked and modTime + lastChecked time.Time // last time nsswitch.conf was checked + + mu sync.Mutex // protects nssConf + nssConf *nssConf +} + +func getSystemNSS() *nssConf { + nssConfig.tryUpdate() + nssConfig.mu.Lock() + conf := nssConfig.nssConf + nssConfig.mu.Unlock() + return conf +} + +// init initializes conf and is only called via conf.initOnce. +func (conf *nsswitchConfig) init() { + conf.nssConf = parseNSSConfFile("/etc/nsswitch.conf") + conf.lastChecked = time.Now() + conf.ch = make(chan struct{}, 1) +} + +// tryUpdate tries to update conf. +func (conf *nsswitchConfig) tryUpdate() { + conf.initOnce.Do(conf.init) + + // Ensure only one update at a time checks nsswitch.conf + if !conf.tryAcquireSema() { + return + } + defer conf.releaseSema() + + now := time.Now() + if conf.lastChecked.After(now.Add(-5 * time.Second)) { + return + } + conf.lastChecked = now + + var mtime time.Time + if fi, err := os.Stat(nssConfigPath); err == nil { + mtime = fi.ModTime() + } + if mtime.Equal(conf.nssConf.mtime) { + return + } + + nssConf := parseNSSConfFile(nssConfigPath) + conf.mu.Lock() + conf.nssConf = nssConf + conf.mu.Unlock() +} + +func (conf *nsswitchConfig) acquireSema() { + conf.ch <- struct{}{} +} + +func (conf *nsswitchConfig) tryAcquireSema() bool { + select { + case conf.ch <- struct{}{}: + return true + default: + return false + } +} + +func (conf *nsswitchConfig) releaseSema() { + <-conf.ch +} + +// nssConf represents the state of the machine's /etc/nsswitch.conf file. +type nssConf struct { + mtime time.Time // time of nsswitch.conf modification + err error // any error encountered opening or parsing the file + sources map[string][]nssSource // keyed by database (e.g. "hosts") +} + +type nssSource struct { + source string // e.g. "compat", "files", "mdns4_minimal" + criteria []nssCriterion +} + +// standardCriteria reports all specified criteria have the default +// status actions. +func (s nssSource) standardCriteria() bool { + for i, crit := range s.criteria { + if !crit.standardStatusAction(i == len(s.criteria)-1) { + return false + } + } + return true +} + +// nssCriterion is the parsed structure of one of the criteria in brackets +// after an NSS source name. +type nssCriterion struct { + negate bool // if "!" was present + status string // e.g. "success", "unavail" (lowercase) + action string // e.g. "return", "continue" (lowercase) +} + +// standardStatusAction reports whether c is equivalent to not +// specifying the criterion at all. last is whether this criteria is the +// last in the list. +func (c nssCriterion) standardStatusAction(last bool) bool { + if c.negate { + return false + } + var def string + switch c.status { + case "success": + def = "return" + case "notfound", "unavail", "tryagain": + def = "continue" + default: + // Unknown status + return false + } + if last && c.action == "return" { + return true + } + return c.action == def +} + +func parseNSSConfFile(file string) *nssConf { + f, err := open(file) + if err != nil { + return &nssConf{err: err} + } + defer f.close() + mtime, _, err := f.stat() + if err != nil { + return &nssConf{err: err} + } + + conf := parseNSSConf(f) + conf.mtime = mtime + return conf +} + +func parseNSSConf(f *file) *nssConf { + conf := new(nssConf) + for line, ok := f.readLine(); ok; line, ok = f.readLine() { + line = trimSpace(removeComment(line)) + if len(line) == 0 { + continue + } + colon := bytealg.IndexByteString(line, ':') + if colon == -1 { + conf.err = errors.New("no colon on line") + return conf + } + db := trimSpace(line[:colon]) + srcs := line[colon+1:] + for { + srcs = trimSpace(srcs) + if len(srcs) == 0 { + break + } + sp := bytealg.IndexByteString(srcs, ' ') + var src string + if sp == -1 { + src = srcs + srcs = "" // done + } else { + src = srcs[:sp] + srcs = trimSpace(srcs[sp+1:]) + } + var criteria []nssCriterion + // See if there's a criteria block in brackets. + if len(srcs) > 0 && srcs[0] == '[' { + bclose := bytealg.IndexByteString(srcs, ']') + if bclose == -1 { + conf.err = errors.New("unclosed criterion bracket") + return conf + } + var err error + criteria, err = parseCriteria(srcs[1:bclose]) + if err != nil { + conf.err = errors.New("invalid criteria: " + srcs[1:bclose]) + return conf + } + srcs = srcs[bclose+1:] + } + if conf.sources == nil { + conf.sources = make(map[string][]nssSource) + } + conf.sources[db] = append(conf.sources[db], nssSource{ + source: src, + criteria: criteria, + }) + } + } + return conf +} + +// parses "foo=bar !foo=bar" +func parseCriteria(x string) (c []nssCriterion, err error) { + err = foreachField(x, func(f string) error { + not := false + if len(f) > 0 && f[0] == '!' { + not = true + f = f[1:] + } + if len(f) < 3 { + return errors.New("criterion too short") + } + eq := bytealg.IndexByteString(f, '=') + if eq == -1 { + return errors.New("criterion lacks equal sign") + } + if hasUpperCase(f) { + lower := []byte(f) + lowerASCIIBytes(lower) + f = string(lower) + } + c = append(c, nssCriterion{ + negate: not, + status: f[:eq], + action: f[eq+1:], + }) + return nil + }) + return +} diff --git a/platform/dbops/binaries/go/go/src/net/nss_test.go b/platform/dbops/binaries/go/go/src/net/nss_test.go new file mode 100644 index 0000000000000000000000000000000000000000..94e6b5fc0a216ef0309c05dbb90c383889a0a5d3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/nss_test.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package net + +import ( + "reflect" + "testing" + "time" +) + +const ubuntuTrustyAvahi = `# /etc/nsswitch.conf +# +# Example configuration of GNU Name Service Switch functionality. +# If you have the libc-doc-reference' and nfo' packages installed, try: +# nfo libc "Name Service Switch"' for information about this file. + +passwd: compat +group: compat +shadow: compat + +hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4 +networks: files + +protocols: db files +services: db files +ethers: db files +rpc: db files + +netgroup: nis +` + +func TestParseNSSConf(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in string + want *nssConf + }{ + { + name: "no_newline", + in: "foo: a b", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": {{source: "a"}, {source: "b"}}, + }, + }, + }, + { + name: "newline", + in: "foo: a b\n", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": {{source: "a"}, {source: "b"}}, + }, + }, + }, + { + name: "whitespace", + in: " foo:a b \n", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": {{source: "a"}, {source: "b"}}, + }, + }, + }, + { + name: "comment1", + in: " foo:a b#c\n", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": {{source: "a"}, {source: "b"}}, + }, + }, + }, + { + name: "comment2", + in: " foo:a b #c \n", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": {{source: "a"}, {source: "b"}}, + }, + }, + }, + { + name: "crit", + in: " foo:a b [!a=b X=Y ] c#d \n", + want: &nssConf{ + sources: map[string][]nssSource{ + "foo": { + {source: "a"}, + { + source: "b", + criteria: []nssCriterion{ + { + negate: true, + status: "a", + action: "b", + }, + { + status: "x", + action: "y", + }, + }, + }, + {source: "c"}, + }, + }, + }, + }, + + // Ubuntu Trusty w/ avahi-daemon, libavahi-* etc installed. + { + name: "ubuntu_trusty_avahi", + in: ubuntuTrustyAvahi, + want: &nssConf{ + sources: map[string][]nssSource{ + "passwd": {{source: "compat"}}, + "group": {{source: "compat"}}, + "shadow": {{source: "compat"}}, + "hosts": { + {source: "files"}, + { + source: "mdns4_minimal", + criteria: []nssCriterion{ + { + negate: false, + status: "notfound", + action: "return", + }, + }, + }, + {source: "dns"}, + {source: "mdns4"}, + }, + "networks": {{source: "files"}}, + "protocols": { + {source: "db"}, + {source: "files"}, + }, + "services": { + {source: "db"}, + {source: "files"}, + }, + "ethers": { + {source: "db"}, + {source: "files"}, + }, + "rpc": { + {source: "db"}, + {source: "files"}, + }, + "netgroup": { + {source: "nis"}, + }, + }, + }, + }, + } + + for _, tt := range tests { + gotConf := nssStr(t, tt.in) + gotConf.mtime = time.Time{} // ignore mtime in comparison + if !reflect.DeepEqual(gotConf, tt.want) { + t.Errorf("%s: mismatch\n got %#v\nwant %#v", tt.name, gotConf, tt.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/packetconn_test.go b/platform/dbops/binaries/go/go/src/net/packetconn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e39e7de5d7f6f5bbd0bbff204bd4bbd94ed7275d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/packetconn_test.go @@ -0,0 +1,149 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements API tests across platforms and should never have a build +// constraint. + +package net + +import ( + "os" + "testing" +) + +// The full stack test cases for IPConn have been moved to the +// following: +// golang.org/x/net/ipv4 +// golang.org/x/net/ipv6 +// golang.org/x/net/icmp + +func packetConnTestData(t *testing.T, network string) ([]byte, func()) { + if !testableNetwork(network) { + return nil, func() { t.Logf("skipping %s test", network) } + } + return []byte("PACKETCONN TEST"), nil +} + +func TestPacketConn(t *testing.T) { + var packetConnTests = []struct { + net string + addr1 string + addr2 string + }{ + {"udp", "127.0.0.1:0", "127.0.0.1:0"}, + {"unixgram", testUnixAddr(t), testUnixAddr(t)}, + } + + closer := func(c PacketConn, net, addr1, addr2 string) { + c.Close() + switch net { + case "unixgram": + os.Remove(addr1) + os.Remove(addr2) + } + } + + for _, tt := range packetConnTests { + wb, skipOrFatalFn := packetConnTestData(t, tt.net) + if skipOrFatalFn != nil { + skipOrFatalFn() + continue + } + + c1, err := ListenPacket(tt.net, tt.addr1) + if err != nil { + t.Fatal(err) + } + defer closer(c1, tt.net, tt.addr1, tt.addr2) + c1.LocalAddr() + + c2, err := ListenPacket(tt.net, tt.addr2) + if err != nil { + t.Fatal(err) + } + defer closer(c2, tt.net, tt.addr1, tt.addr2) + c2.LocalAddr() + rb2 := make([]byte, 128) + + if _, err := c1.WriteTo(wb, c2.LocalAddr()); err != nil { + t.Fatal(err) + } + if _, _, err := c2.ReadFrom(rb2); err != nil { + t.Fatal(err) + } + if _, err := c2.WriteTo(wb, c1.LocalAddr()); err != nil { + t.Fatal(err) + } + rb1 := make([]byte, 128) + if _, _, err := c1.ReadFrom(rb1); err != nil { + t.Fatal(err) + } + } +} + +func TestConnAndPacketConn(t *testing.T) { + var packetConnTests = []struct { + net string + addr1 string + addr2 string + }{ + {"udp", "127.0.0.1:0", "127.0.0.1:0"}, + {"unixgram", testUnixAddr(t), testUnixAddr(t)}, + } + + closer := func(c PacketConn, net, addr1, addr2 string) { + c.Close() + switch net { + case "unixgram": + os.Remove(addr1) + os.Remove(addr2) + } + } + + for _, tt := range packetConnTests { + var wb []byte + wb, skipOrFatalFn := packetConnTestData(t, tt.net) + if skipOrFatalFn != nil { + skipOrFatalFn() + continue + } + + c1, err := ListenPacket(tt.net, tt.addr1) + if err != nil { + t.Fatal(err) + } + defer closer(c1, tt.net, tt.addr1, tt.addr2) + c1.LocalAddr() + + c2, err := Dial(tt.net, c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + c2.LocalAddr() + c2.RemoteAddr() + + if _, err := c2.Write(wb); err != nil { + t.Fatal(err) + } + rb1 := make([]byte, 128) + if _, _, err := c1.ReadFrom(rb1); err != nil { + t.Fatal(err) + } + var dst Addr + switch tt.net { + case "unixgram": + continue + default: + dst = c2.LocalAddr() + } + if _, err := c1.WriteTo(wb, dst); err != nil { + t.Fatal(err) + } + rb2 := make([]byte, 128) + if _, err := c2.Read(rb2); err != nil { + t.Fatal(err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/parse.go b/platform/dbops/binaries/go/go/src/net/parse.go new file mode 100644 index 0000000000000000000000000000000000000000..29dffad43cf4fd12ac966c64d1efbf3f8871908a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/parse.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package net + +import ( + "internal/bytealg" + "io" + "os" + "time" +) + +type file struct { + file *os.File + data []byte + atEOF bool +} + +func (f *file) close() { f.file.Close() } + +func (f *file) getLineFromData() (s string, ok bool) { + data := f.data + i := 0 + for i = 0; i < len(data); i++ { + if data[i] == '\n' { + s = string(data[0:i]) + ok = true + // move data + i++ + n := len(data) - i + copy(data[0:], data[i:]) + f.data = data[0:n] + return + } + } + if f.atEOF && len(f.data) > 0 { + // EOF, return all we have + s = string(data) + f.data = f.data[0:0] + ok = true + } + return +} + +func (f *file) readLine() (s string, ok bool) { + if s, ok = f.getLineFromData(); ok { + return + } + if len(f.data) < cap(f.data) { + ln := len(f.data) + n, err := io.ReadFull(f.file, f.data[ln:cap(f.data)]) + if n >= 0 { + f.data = f.data[0 : ln+n] + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + f.atEOF = true + } + } + s, ok = f.getLineFromData() + return +} + +func (f *file) stat() (mtime time.Time, size int64, err error) { + st, err := f.file.Stat() + if err != nil { + return time.Time{}, 0, err + } + return st.ModTime(), st.Size(), nil +} + +func open(name string) (*file, error) { + fd, err := os.Open(name) + if err != nil { + return nil, err + } + return &file{fd, make([]byte, 0, 64*1024), false}, nil +} + +func stat(name string) (mtime time.Time, size int64, err error) { + st, err := os.Stat(name) + if err != nil { + return time.Time{}, 0, err + } + return st.ModTime(), st.Size(), nil +} + +// Count occurrences in s of any bytes in t. +func countAnyByte(s string, t string) int { + n := 0 + for i := 0; i < len(s); i++ { + if bytealg.IndexByteString(t, s[i]) >= 0 { + n++ + } + } + return n +} + +// Split s at any bytes in t. +func splitAtBytes(s string, t string) []string { + a := make([]string, 1+countAnyByte(s, t)) + n := 0 + last := 0 + for i := 0; i < len(s); i++ { + if bytealg.IndexByteString(t, s[i]) >= 0 { + if last < i { + a[n] = s[last:i] + n++ + } + last = i + 1 + } + } + if last < len(s) { + a[n] = s[last:] + n++ + } + return a[0:n] +} + +func getFields(s string) []string { return splitAtBytes(s, " \r\t\n") } + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} + +// xtoi2 converts the next two hex digits of s into a byte. +// If s is longer than 2 bytes then the third byte must be e. +// If the first two bytes of s are not hex digits or the third byte +// does not match e, false is returned. +func xtoi2(s string, e byte) (byte, bool) { + if len(s) > 2 && s[2] != e { + return 0, false + } + n, ei, ok := xtoi(s[:2]) + return byte(n), ok && ei == 2 +} + +// hasUpperCase tells whether the given string contains at least one upper-case. +func hasUpperCase(s string) bool { + for i := range s { + if 'A' <= s[i] && s[i] <= 'Z' { + return true + } + } + return false +} + +// lowerASCIIBytes makes x ASCII lowercase in-place. +func lowerASCIIBytes(x []byte) { + for i, b := range x { + if 'A' <= b && b <= 'Z' { + x[i] += 'a' - 'A' + } + } +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// trimSpace returns x without any leading or trailing ASCII whitespace. +func trimSpace(x string) string { + for len(x) > 0 && isSpace(x[0]) { + x = x[1:] + } + for len(x) > 0 && isSpace(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// isSpace reports whether b is an ASCII space character. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// removeComment returns line, removing any '#' byte and any following +// bytes. +func removeComment(line string) string { + if i := bytealg.IndexByteString(line, '#'); i != -1 { + return line[:i] + } + return line +} + +// foreachField runs fn on each non-empty run of non-space bytes in x. +// It returns the first non-nil error returned by fn. +func foreachField(x string, fn func(field string) error) error { + x = trimSpace(x) + for len(x) > 0 { + sp := bytealg.IndexByteString(x, ' ') + if sp == -1 { + return fn(x) + } + if field := trimSpace(x[:sp]); len(field) > 0 { + if err := fn(field); err != nil { + return err + } + } + x = trimSpace(x[sp+1:]) + } + return nil +} + +// stringsHasSuffix is strings.HasSuffix. It reports whether s ends in +// suffix. +func stringsHasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +// stringsHasSuffixFold reports whether s ends in suffix, +// ASCII-case-insensitively. +func stringsHasSuffixFold(s, suffix string) bool { + return len(s) >= len(suffix) && stringsEqualFold(s[len(s)-len(suffix):], suffix) +} + +// stringsHasPrefix is strings.HasPrefix. It reports whether s begins with prefix. +func stringsHasPrefix(s, prefix string) bool { + return len(s) >= len(prefix) && s[:len(prefix)] == prefix +} + +// stringsEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func stringsEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lowerASCII(s[i]) != lowerASCII(t[i]) { + return false + } + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/net/parse_test.go b/platform/dbops/binaries/go/go/src/net/parse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7c509a97f2d20dae9353ab7c7ae0521dc4f9a4cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/parse_test.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bufio" + "os" + "runtime" + "testing" +) + +func TestReadLine(t *testing.T) { + // /etc/services file does not exist on android, plan9, windows, or wasip1 + // where it would be required to be mounted from the host file system. + switch runtime.GOOS { + case "android", "plan9", "windows", "wasip1": + t.Skipf("not supported on %s", runtime.GOOS) + } + filename := "/etc/services" // a nice big file + + fd, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer fd.Close() + br := bufio.NewReader(fd) + + file, err := open(filename) + if file == nil { + t.Fatal(err) + } + defer file.close() + + lineno := 1 + byteno := 0 + for { + bline, berr := br.ReadString('\n') + if n := len(bline); n > 0 { + bline = bline[0 : n-1] + } + line, ok := file.readLine() + if (berr != nil) != !ok || bline != line { + t.Fatalf("%s:%d (#%d)\nbufio => %q, %v\nnet => %q, %v", filename, lineno, byteno, bline, berr, line, ok) + } + if !ok { + break + } + lineno++ + byteno += len(line) + 1 + } +} + +func TestDtoi(t *testing.T) { + for _, tt := range []struct { + in string + out int + off int + ok bool + }{ + {"", 0, 0, false}, + {"0", 0, 1, true}, + {"65536", 65536, 5, true}, + {"123456789", big, 8, false}, + {"-0", 0, 0, false}, + {"-1234", 0, 0, false}, + } { + n, i, ok := dtoi(tt.in) + if n != tt.out || i != tt.off || ok != tt.ok { + t.Errorf("got %d, %d, %v; want %d, %d, %v", n, i, ok, tt.out, tt.off, tt.ok) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/pipe.go b/platform/dbops/binaries/go/go/src/net/pipe.go new file mode 100644 index 0000000000000000000000000000000000000000..69955e4617c23a79a440b81e99c0e327adf7ceed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/pipe.go @@ -0,0 +1,238 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "io" + "os" + "sync" + "time" +) + +// pipeDeadline is an abstraction for handling timeouts. +type pipeDeadline struct { + mu sync.Mutex // Guards timer and cancel + timer *time.Timer + cancel chan struct{} // Must be non-nil +} + +func makePipeDeadline() pipeDeadline { + return pipeDeadline{cancel: make(chan struct{})} +} + +// set sets the point in time when the deadline will time out. +// A timeout event is signaled by closing the channel returned by waiter. +// Once a timeout has occurred, the deadline can be refreshed by specifying a +// t value in the future. +// +// A zero value for t prevents timeout. +func (d *pipeDeadline) set(t time.Time) { + d.mu.Lock() + defer d.mu.Unlock() + + if d.timer != nil && !d.timer.Stop() { + <-d.cancel // Wait for the timer callback to finish and close cancel + } + d.timer = nil + + // Time is zero, then there is no deadline. + closed := isClosedChan(d.cancel) + if t.IsZero() { + if closed { + d.cancel = make(chan struct{}) + } + return + } + + // Time in the future, setup a timer to cancel in the future. + if dur := time.Until(t); dur > 0 { + if closed { + d.cancel = make(chan struct{}) + } + d.timer = time.AfterFunc(dur, func() { + close(d.cancel) + }) + return + } + + // Time in the past, so close immediately. + if !closed { + close(d.cancel) + } +} + +// wait returns a channel that is closed when the deadline is exceeded. +func (d *pipeDeadline) wait() chan struct{} { + d.mu.Lock() + defer d.mu.Unlock() + return d.cancel +} + +func isClosedChan(c <-chan struct{}) bool { + select { + case <-c: + return true + default: + return false + } +} + +type pipeAddr struct{} + +func (pipeAddr) Network() string { return "pipe" } +func (pipeAddr) String() string { return "pipe" } + +type pipe struct { + wrMu sync.Mutex // Serialize Write operations + + // Used by local Read to interact with remote Write. + // Successful receive on rdRx is always followed by send on rdTx. + rdRx <-chan []byte + rdTx chan<- int + + // Used by local Write to interact with remote Read. + // Successful send on wrTx is always followed by receive on wrRx. + wrTx chan<- []byte + wrRx <-chan int + + once sync.Once // Protects closing localDone + localDone chan struct{} + remoteDone <-chan struct{} + + readDeadline pipeDeadline + writeDeadline pipeDeadline +} + +// Pipe creates a synchronous, in-memory, full duplex +// network connection; both ends implement the [Conn] interface. +// Reads on one end are matched with writes on the other, +// copying data directly between the two; there is no internal +// buffering. +func Pipe() (Conn, Conn) { + cb1 := make(chan []byte) + cb2 := make(chan []byte) + cn1 := make(chan int) + cn2 := make(chan int) + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + p1 := &pipe{ + rdRx: cb1, rdTx: cn1, + wrTx: cb2, wrRx: cn2, + localDone: done1, remoteDone: done2, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + } + p2 := &pipe{ + rdRx: cb2, rdTx: cn2, + wrTx: cb1, wrRx: cn1, + localDone: done2, remoteDone: done1, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + } + return p1, p2 +} + +func (*pipe) LocalAddr() Addr { return pipeAddr{} } +func (*pipe) RemoteAddr() Addr { return pipeAddr{} } + +func (p *pipe) Read(b []byte) (int, error) { + n, err := p.read(b) + if err != nil && err != io.EOF && err != io.ErrClosedPipe { + err = &OpError{Op: "read", Net: "pipe", Err: err} + } + return n, err +} + +func (p *pipe) read(b []byte) (n int, err error) { + switch { + case isClosedChan(p.localDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.remoteDone): + return 0, io.EOF + case isClosedChan(p.readDeadline.wait()): + return 0, os.ErrDeadlineExceeded + } + + select { + case bw := <-p.rdRx: + nr := copy(b, bw) + p.rdTx <- nr + return nr, nil + case <-p.localDone: + return 0, io.ErrClosedPipe + case <-p.remoteDone: + return 0, io.EOF + case <-p.readDeadline.wait(): + return 0, os.ErrDeadlineExceeded + } +} + +func (p *pipe) Write(b []byte) (int, error) { + n, err := p.write(b) + if err != nil && err != io.ErrClosedPipe { + err = &OpError{Op: "write", Net: "pipe", Err: err} + } + return n, err +} + +func (p *pipe) write(b []byte) (n int, err error) { + switch { + case isClosedChan(p.localDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.remoteDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.writeDeadline.wait()): + return 0, os.ErrDeadlineExceeded + } + + p.wrMu.Lock() // Ensure entirety of b is written together + defer p.wrMu.Unlock() + for once := true; once || len(b) > 0; once = false { + select { + case p.wrTx <- b: + nw := <-p.wrRx + b = b[nw:] + n += nw + case <-p.localDone: + return n, io.ErrClosedPipe + case <-p.remoteDone: + return n, io.ErrClosedPipe + case <-p.writeDeadline.wait(): + return n, os.ErrDeadlineExceeded + } + } + return n, nil +} + +func (p *pipe) SetDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.readDeadline.set(t) + p.writeDeadline.set(t) + return nil +} + +func (p *pipe) SetReadDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.readDeadline.set(t) + return nil +} + +func (p *pipe) SetWriteDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.writeDeadline.set(t) + return nil +} + +func (p *pipe) Close() error { + p.once.Do(func() { close(p.localDone) }) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/net/pipe_test.go b/platform/dbops/binaries/go/go/src/net/pipe_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9cc24148ca26fe0173412ae378b38c51fa47b1a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/pipe_test.go @@ -0,0 +1,49 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net_test + +import ( + "io" + "net" + "testing" + "time" + + "golang.org/x/net/nettest" +) + +func TestPipe(t *testing.T) { + nettest.TestConn(t, func() (c1, c2 net.Conn, stop func(), err error) { + c1, c2 = net.Pipe() + stop = func() { + c1.Close() + c2.Close() + } + return + }) +} + +func TestPipeCloseError(t *testing.T) { + c1, c2 := net.Pipe() + c1.Close() + + if _, err := c1.Read(nil); err != io.ErrClosedPipe { + t.Errorf("c1.Read() = %v, want io.ErrClosedPipe", err) + } + if _, err := c1.Write(nil); err != io.ErrClosedPipe { + t.Errorf("c1.Write() = %v, want io.ErrClosedPipe", err) + } + if err := c1.SetDeadline(time.Time{}); err != io.ErrClosedPipe { + t.Errorf("c1.SetDeadline() = %v, want io.ErrClosedPipe", err) + } + if _, err := c2.Read(nil); err != io.EOF { + t.Errorf("c2.Read() = %v, want io.EOF", err) + } + if _, err := c2.Write(nil); err != io.ErrClosedPipe { + t.Errorf("c2.Write() = %v, want io.ErrClosedPipe", err) + } + if err := c2.SetDeadline(time.Time{}); err != io.ErrClosedPipe { + t.Errorf("c2.SetDeadline() = %v, want io.ErrClosedPipe", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/platform_test.go b/platform/dbops/binaries/go/go/src/net/platform_test.go new file mode 100644 index 0000000000000000000000000000000000000000..709d4a3eb7b7a789577361cb3047343ecd53046f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/platform_test.go @@ -0,0 +1,178 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/testenv" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" +) + +var unixEnabledOnAIX bool + +func init() { + if runtime.GOOS == "aix" { + // Unix network isn't properly working on AIX 7.2 with + // Technical Level < 2. + // The information is retrieved only once in this init() + // instead of everytime testableNetwork is called. + out, _ := exec.Command("oslevel", "-s").Output() + if len(out) >= len("7200-XX-ZZ-YYMM") { // AIX 7.2, Tech Level XX, Service Pack ZZ, date YYMM + aixVer := string(out[:4]) + tl, _ := strconv.Atoi(string(out[5:7])) + unixEnabledOnAIX = aixVer > "7200" || (aixVer == "7200" && tl >= 2) + } + } +} + +// testableNetwork reports whether network is testable on the current +// platform configuration. +func testableNetwork(network string) bool { + net, _, _ := strings.Cut(network, ":") + switch net { + case "ip+nopriv": + case "ip", "ip4", "ip6": + switch runtime.GOOS { + case "plan9": + return false + default: + if os.Getuid() != 0 { + return false + } + } + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "ios", "plan9", "windows": + return false + case "aix": + return unixEnabledOnAIX + } + case "unixpacket": + switch runtime.GOOS { + case "aix", "android", "darwin", "ios", "plan9", "windows": + return false + } + } + switch net { + case "tcp4", "udp4", "ip4": + if !supportsIPv4() { + return false + } + case "tcp6", "udp6", "ip6": + if !supportsIPv6() { + return false + } + } + return true +} + +// testableAddress reports whether address of network is testable on +// the current platform configuration. +func testableAddress(network, address string) bool { + switch net, _, _ := strings.Cut(network, ":"); net { + case "unix", "unixgram", "unixpacket": + // Abstract unix domain sockets, a Linux-ism. + if address[0] == '@' && runtime.GOOS != "linux" { + return false + } + } + return true +} + +// testableListenArgs reports whether arguments are testable on the +// current platform configuration. +func testableListenArgs(network, address, client string) bool { + if !testableNetwork(network) || !testableAddress(network, address) { + return false + } + + var err error + var addr Addr + switch net, _, _ := strings.Cut(network, ":"); net { + case "tcp", "tcp4", "tcp6": + addr, err = ResolveTCPAddr("tcp", address) + case "udp", "udp4", "udp6": + addr, err = ResolveUDPAddr("udp", address) + case "ip", "ip4", "ip6": + addr, err = ResolveIPAddr("ip", address) + default: + return true + } + if err != nil { + return false + } + var ip IP + var wildcard bool + switch addr := addr.(type) { + case *TCPAddr: + ip = addr.IP + wildcard = addr.isWildcard() + case *UDPAddr: + ip = addr.IP + wildcard = addr.isWildcard() + case *IPAddr: + ip = addr.IP + wildcard = addr.isWildcard() + } + + // Test wildcard IP addresses. + if wildcard && !testenv.HasExternalNetwork() { + return false + } + + // Test functionality of IPv4 communication using AF_INET and + // IPv6 communication using AF_INET6 sockets. + if !supportsIPv4() && ip.To4() != nil { + return false + } + if !supportsIPv6() && ip.To16() != nil && ip.To4() == nil { + return false + } + cip := ParseIP(client) + if cip != nil { + if !supportsIPv4() && cip.To4() != nil { + return false + } + if !supportsIPv6() && cip.To16() != nil && cip.To4() == nil { + return false + } + } + + // Test functionality of IPv4 communication using AF_INET6 + // sockets. + if !supportsIPv4map() && supportsIPv4() && (network == "tcp" || network == "udp" || network == "ip") && wildcard { + // At this point, we prefer IPv4 when ip is nil. + // See favoriteAddrFamily for further information. + if ip.To16() != nil && ip.To4() == nil && cip.To4() != nil { // a pair of IPv6 server and IPv4 client + return false + } + if (ip.To4() != nil || ip == nil) && cip.To16() != nil && cip.To4() == nil { // a pair of IPv4 server and IPv6 client + return false + } + } + + return true +} + +func condFatalf(t *testing.T, network string, format string, args ...any) { + t.Helper() + // A few APIs like File and Read/WriteMsg{UDP,IP} are not + // fully implemented yet on Plan 9 and Windows. + switch runtime.GOOS { + case "windows", "js", "wasip1": + if network == "file+net" { + t.Logf(format, args...) + return + } + case "plan9": + t.Logf(format, args...) + return + } + t.Fatalf(format, args...) +} diff --git a/platform/dbops/binaries/go/go/src/net/port.go b/platform/dbops/binaries/go/go/src/net/port.go new file mode 100644 index 0000000000000000000000000000000000000000..32e76286193c23ca2a196e9ddceaab33b7f0f5ac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/port.go @@ -0,0 +1,62 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +// parsePort parses service as a decimal integer and returns the +// corresponding value as port. It is the caller's responsibility to +// parse service as a non-decimal integer when needsLookup is true. +// +// Some system resolvers will return a valid port number when given a number +// over 65536 (see https://golang.org/issues/11715). Alas, the parser +// can't bail early on numbers > 65536. Therefore reasonably large/small +// numbers are parsed in full and rejected if invalid. +func parsePort(service string) (port int, needsLookup bool) { + if service == "" { + // Lock in the legacy behavior that an empty string + // means port 0. See golang.org/issue/13610. + return 0, false + } + const ( + max = uint32(1<<32 - 1) + cutoff = uint32(1 << 30) + ) + neg := false + if service[0] == '+' { + service = service[1:] + } else if service[0] == '-' { + neg = true + service = service[1:] + } + var n uint32 + for _, d := range service { + if '0' <= d && d <= '9' { + d -= '0' + } else { + return 0, true + } + if n >= cutoff { + n = max + break + } + n *= 10 + nn := n + uint32(d) + if nn < n || nn > max { + n = max + break + } + n = nn + } + if !neg && n >= cutoff { + port = int(cutoff - 1) + } else if neg && n > cutoff { + port = int(cutoff) + } else { + port = int(n) + } + if neg { + port = -port + } + return port, false +} diff --git a/platform/dbops/binaries/go/go/src/net/port_test.go b/platform/dbops/binaries/go/go/src/net/port_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e0bdb4247d31f51485f65dc53e49101b4154e290 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/port_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import "testing" + +var parsePortTests = []struct { + service string + port int + needsLookup bool +}{ + {"", 0, false}, + + // Decimal number literals + {"-1073741825", -1 << 30, false}, + {"-1073741824", -1 << 30, false}, + {"-1073741823", -(1<<30 - 1), false}, + {"-123456789", -123456789, false}, + {"-1", -1, false}, + {"-0", 0, false}, + {"0", 0, false}, + {"+0", 0, false}, + {"+1", 1, false}, + {"65535", 65535, false}, + {"65536", 65536, false}, + {"123456789", 123456789, false}, + {"1073741822", 1<<30 - 2, false}, + {"1073741823", 1<<30 - 1, false}, + {"1073741824", 1<<30 - 1, false}, + {"1073741825", 1<<30 - 1, false}, + + // Others + {"abc", 0, true}, + {"9pfs", 0, true}, + {"123badport", 0, true}, + {"bad123port", 0, true}, + {"badport123", 0, true}, + {"123456789badport", 0, true}, + {"-2147483649badport", 0, true}, + {"2147483649badport", 0, true}, +} + +func TestParsePort(t *testing.T) { + // The following test cases are cribbed from the strconv + for _, tt := range parsePortTests { + if port, needsLookup := parsePort(tt.service); port != tt.port || needsLookup != tt.needsLookup { + t.Errorf("parsePort(%q) = %d, %t; want %d, %t", tt.service, port, needsLookup, tt.port, tt.needsLookup) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/port_unix.go b/platform/dbops/binaries/go/go/src/net/port_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..df73dbabb3c25fa5c754980e4f53c5d85815629a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/port_unix.go @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 + +// Read system port mappings from /etc/services + +package net + +import ( + "internal/bytealg" + "sync" +) + +var onceReadServices sync.Once + +func readServices() { + file, err := open("/etc/services") + if err != nil { + return + } + defer file.close() + + for line, ok := file.readLine(); ok; line, ok = file.readLine() { + // "http 80/tcp www www-http # World Wide Web HTTP" + if i := bytealg.IndexByteString(line, '#'); i >= 0 { + line = line[:i] + } + f := getFields(line) + if len(f) < 2 { + continue + } + portnet := f[1] // "80/tcp" + port, j, ok := dtoi(portnet) + if !ok || port <= 0 || j >= len(portnet) || portnet[j] != '/' { + continue + } + netw := portnet[j+1:] // "tcp" + m, ok1 := services[netw] + if !ok1 { + m = make(map[string]int) + services[netw] = m + } + for i := 0; i < len(f); i++ { + if i != 1 { // f[1] was port/net + m[f[i]] = port + } + } + } +} + +// goLookupPort is the native Go implementation of LookupPort. +func goLookupPort(network, service string) (port int, err error) { + onceReadServices.Do(readServices) + return lookupPortMap(network, service) +} diff --git a/platform/dbops/binaries/go/go/src/net/protoconn_test.go b/platform/dbops/binaries/go/go/src/net/protoconn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a617470580d5288b517fa3753d874dfb8193952d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/protoconn_test.go @@ -0,0 +1,352 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements API tests across platforms and will never have a build +// tag. + +package net + +import ( + "internal/testenv" + "os" + "runtime" + "testing" + "time" +) + +// The full stack test cases for IPConn have been moved to the +// following: +// golang.org/x/net/ipv4 +// golang.org/x/net/ipv6 +// golang.org/x/net/icmp + +func TestTCPListenerSpecificMethods(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + la, err := ResolveTCPAddr("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + ln, err := ListenTCP("tcp4", la) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + ln.Addr() + mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond) + + if c, err := ln.Accept(); err != nil { + if !err.(Error).Timeout() { + t.Fatal(err) + } + } else { + c.Close() + } + if c, err := ln.AcceptTCP(); err != nil { + if !err.(Error).Timeout() { + t.Fatal(err) + } + } else { + c.Close() + } + + if f, err := ln.File(); err != nil { + condFatalf(t, "file+net", "%v", err) + } else { + f.Close() + } +} + +func TestTCPConnSpecificMethods(t *testing.T) { + la, err := ResolveTCPAddr("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + ln, err := ListenTCP("tcp4", la) + if err != nil { + t.Fatal(err) + } + ch := make(chan error, 1) + handler := func(ls *localServer, ln Listener) { ls.transponder(ls.Listener, ch) } + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + ra, err := ResolveTCPAddr("tcp4", ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + c, err := DialTCP("tcp4", nil, ra) + if err != nil { + t.Fatal(err) + } + defer c.Close() + c.SetKeepAlive(false) + c.SetKeepAlivePeriod(3 * time.Second) + c.SetLinger(0) + c.SetNoDelay(false) + c.LocalAddr() + c.RemoteAddr() + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + + if _, err := c.Write([]byte("TCPCONN TEST")); err != nil { + t.Fatal(err) + } + rb := make([]byte, 128) + if _, err := c.Read(rb); err != nil { + t.Fatal(err) + } + + for err := range ch { + t.Error(err) + } +} + +func TestUDPConnSpecificMethods(t *testing.T) { + la, err := ResolveUDPAddr("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + c, err := ListenUDP("udp4", la) + if err != nil { + t.Fatal(err) + } + defer c.Close() + c.LocalAddr() + c.RemoteAddr() + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + c.SetReadBuffer(2048) + c.SetWriteBuffer(2048) + + wb := []byte("UDPCONN TEST") + rb := make([]byte, 128) + if _, err := c.WriteToUDP(wb, c.LocalAddr().(*UDPAddr)); err != nil { + t.Fatal(err) + } + if _, _, err := c.ReadFromUDP(rb); err != nil { + t.Fatal(err) + } + if _, _, err := c.WriteMsgUDP(wb, nil, c.LocalAddr().(*UDPAddr)); err != nil { + condFatalf(t, c.LocalAddr().Network(), "%v", err) + } + if _, _, _, _, err := c.ReadMsgUDP(rb, nil); err != nil { + condFatalf(t, c.LocalAddr().Network(), "%v", err) + } + + if f, err := c.File(); err != nil { + condFatalf(t, "file+net", "%v", err) + } else { + f.Close() + } + + defer func() { + if p := recover(); p != nil { + t.Fatalf("panicked: %v", p) + } + }() + + c.WriteToUDP(wb, nil) + c.WriteMsgUDP(wb, nil, nil) +} + +func TestIPConnSpecificMethods(t *testing.T) { + if !testableNetwork("ip4") { + t.Skip("skipping: ip4 not supported") + } + + la, err := ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + c, err := ListenIP("ip4:icmp", la) + if testenv.SyscallIsNotSupported(err) { + // May be inside a container that disallows creating a socket or + // not running as root. + t.Skipf("skipping: %v", err) + } else if err != nil { + t.Fatal(err) + } + defer c.Close() + c.LocalAddr() + c.RemoteAddr() + c.SetDeadline(time.Now().Add(someTimeout)) + c.SetReadDeadline(time.Now().Add(someTimeout)) + c.SetWriteDeadline(time.Now().Add(someTimeout)) + c.SetReadBuffer(2048) + c.SetWriteBuffer(2048) + + if f, err := c.File(); err != nil { + condFatalf(t, "file+net", "%v", err) + } else { + f.Close() + } + + defer func() { + if p := recover(); p != nil { + t.Fatalf("panicked: %v", p) + } + }() + + wb := []byte("IPCONN TEST") + c.WriteToIP(wb, nil) + c.WriteMsgIP(wb, nil, nil) +} + +func TestUnixListenerSpecificMethods(t *testing.T) { + if !testableNetwork("unix") { + t.Skip("unix test") + } + + addr := testUnixAddr(t) + la, err := ResolveUnixAddr("unix", addr) + if err != nil { + t.Fatal(err) + } + ln, err := ListenUnix("unix", la) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + defer os.Remove(addr) + ln.Addr() + mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond) + + if c, err := ln.Accept(); err != nil { + if !err.(Error).Timeout() { + t.Fatal(err) + } + } else { + c.Close() + } + if c, err := ln.AcceptUnix(); err != nil { + if !err.(Error).Timeout() { + t.Fatal(err) + } + } else { + c.Close() + } + + if f, err := ln.File(); err != nil { + condFatalf(t, "file+net", "%v", err) + } else { + f.Close() + } +} + +func TestUnixConnSpecificMethods(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + + addr1, addr2, addr3 := testUnixAddr(t), testUnixAddr(t), testUnixAddr(t) + + a1, err := ResolveUnixAddr("unixgram", addr1) + if err != nil { + t.Fatal(err) + } + c1, err := DialUnix("unixgram", a1, nil) + if err != nil { + t.Fatal(err) + } + defer c1.Close() + defer os.Remove(addr1) + c1.LocalAddr() + c1.RemoteAddr() + c1.SetDeadline(time.Now().Add(someTimeout)) + c1.SetReadDeadline(time.Now().Add(someTimeout)) + c1.SetWriteDeadline(time.Now().Add(someTimeout)) + c1.SetReadBuffer(2048) + c1.SetWriteBuffer(2048) + + a2, err := ResolveUnixAddr("unixgram", addr2) + if err != nil { + t.Fatal(err) + } + c2, err := DialUnix("unixgram", a2, nil) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + defer os.Remove(addr2) + c2.LocalAddr() + c2.RemoteAddr() + c2.SetDeadline(time.Now().Add(someTimeout)) + c2.SetReadDeadline(time.Now().Add(someTimeout)) + c2.SetWriteDeadline(time.Now().Add(someTimeout)) + c2.SetReadBuffer(2048) + c2.SetWriteBuffer(2048) + + a3, err := ResolveUnixAddr("unixgram", addr3) + if err != nil { + t.Fatal(err) + } + c3, err := ListenUnixgram("unixgram", a3) + if err != nil { + t.Fatal(err) + } + defer c3.Close() + defer os.Remove(addr3) + c3.LocalAddr() + c3.RemoteAddr() + c3.SetDeadline(time.Now().Add(someTimeout)) + c3.SetReadDeadline(time.Now().Add(someTimeout)) + c3.SetWriteDeadline(time.Now().Add(someTimeout)) + c3.SetReadBuffer(2048) + c3.SetWriteBuffer(2048) + + wb := []byte("UNIXCONN TEST") + rb1 := make([]byte, 128) + rb2 := make([]byte, 128) + rb3 := make([]byte, 128) + if _, _, err := c1.WriteMsgUnix(wb, nil, a2); err != nil { + t.Fatal(err) + } + if _, _, _, _, err := c2.ReadMsgUnix(rb2, nil); err != nil { + t.Fatal(err) + } + if _, err := c2.WriteToUnix(wb, a1); err != nil { + t.Fatal(err) + } + if _, _, err := c1.ReadFromUnix(rb1); err != nil { + t.Fatal(err) + } + if _, err := c3.WriteToUnix(wb, a1); err != nil { + t.Fatal(err) + } + if _, _, err := c1.ReadFromUnix(rb1); err != nil { + t.Fatal(err) + } + if _, err := c2.WriteToUnix(wb, a3); err != nil { + t.Fatal(err) + } + if _, _, err := c3.ReadFromUnix(rb3); err != nil { + t.Fatal(err) + } + + if f, err := c1.File(); err != nil { + condFatalf(t, "file+net", "%v", err) + } else { + f.Close() + } + + defer func() { + if p := recover(); p != nil { + t.Fatalf("panicked: %v", p) + } + }() + + c1.WriteToUnix(wb, nil) + c1.WriteMsgUnix(wb, nil, nil) + c3.WriteToUnix(wb, nil) + c3.WriteMsgUnix(wb, nil, nil) +} diff --git a/platform/dbops/binaries/go/go/src/net/rawconn.go b/platform/dbops/binaries/go/go/src/net/rawconn.go new file mode 100644 index 0000000000000000000000000000000000000000..19228e94ed9e44eb9ff50f7f778518da294be887 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rawconn.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/poll" + "runtime" + "syscall" +) + +// BUG(tmm1): On Windows, the Write method of syscall.RawConn +// does not integrate with the runtime's network poller. It cannot +// wait for the connection to become writeable, and does not respect +// deadlines. If the user-provided callback returns false, the Write +// method will fail immediately. + +// BUG(mikio): On JS and Plan 9, the Control, Read and Write +// methods of syscall.RawConn are not implemented. + +type rawConn struct { + fd *netFD +} + +func (c *rawConn) ok() bool { return c != nil && c.fd != nil } + +func (c *rawConn) Control(f func(uintptr)) error { + if !c.ok() { + return syscall.EINVAL + } + err := c.fd.pfd.RawControl(f) + runtime.KeepAlive(c.fd) + if err != nil { + err = &OpError{Op: "raw-control", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err} + } + return err +} + +func (c *rawConn) Read(f func(uintptr) bool) error { + if !c.ok() { + return syscall.EINVAL + } + err := c.fd.pfd.RawRead(f) + runtime.KeepAlive(c.fd) + if err != nil { + err = &OpError{Op: "raw-read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return err +} + +func (c *rawConn) Write(f func(uintptr) bool) error { + if !c.ok() { + return syscall.EINVAL + } + err := c.fd.pfd.RawWrite(f) + runtime.KeepAlive(c.fd) + if err != nil { + err = &OpError{Op: "raw-write", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return err +} + +// PollFD returns the poll.FD of the underlying connection. +// +// Other packages in std that also import [internal/poll] (such as os) +// can use a type assertion to access this extension method so that +// they can pass the *poll.FD to functions like poll.Splice. +// +// PollFD is not intended for use outside the standard library. +func (c *rawConn) PollFD() *poll.FD { + if !c.ok() { + return nil + } + return &c.fd.pfd +} + +func newRawConn(fd *netFD) *rawConn { + return &rawConn{fd: fd} +} + +// Network returns the network type of the underlying connection. +// +// Other packages in std that import internal/poll and are unable to +// import net (such as os) can use a type assertion to access this +// extension method so that they can distinguish different socket types. +// +// Network is not intended for use outside the standard library. +func (c *rawConn) Network() poll.String { + return poll.String(c.fd.net) +} + +type rawListener struct { + rawConn +} + +func (l *rawListener) Read(func(uintptr) bool) error { + return syscall.EINVAL +} + +func (l *rawListener) Write(func(uintptr) bool) error { + return syscall.EINVAL +} + +func newRawListener(fd *netFD) *rawListener { + return &rawListener{rawConn{fd: fd}} +} diff --git a/platform/dbops/binaries/go/go/src/net/rawconn_stub_test.go b/platform/dbops/binaries/go/go/src/net/rawconn_stub_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6d54f2df55be1516af9f562e8f25bd17b813155e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rawconn_stub_test.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || plan9 || wasip1 + +package net + +import ( + "errors" + "syscall" +) + +func readRawConn(c syscall.RawConn, b []byte) (int, error) { + return 0, errors.New("not supported") +} + +func writeRawConn(c syscall.RawConn, b []byte) error { + return errors.New("not supported") +} + +func controlRawConn(c syscall.RawConn, addr Addr) error { + return errors.New("not supported") +} + +func controlOnConnSetup(network string, address string, c syscall.RawConn) error { + return nil +} diff --git a/platform/dbops/binaries/go/go/src/net/rawconn_test.go b/platform/dbops/binaries/go/go/src/net/rawconn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70b16c411534a1a7ff39288cb65159f4f894350d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rawconn_test.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "runtime" + "testing" + "time" +) + +func TestRawConnReadWrite(t *testing.T) { + switch runtime.GOOS { + case "plan9", "js", "wasip1": + t.Skipf("not supported on %s", runtime.GOOS) + } + + t.Run("TCP", func(t *testing.T) { + handler := func(ls *localServer, ln Listener) { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer c.Close() + + cc, err := ln.(*TCPListener).SyscallConn() + if err != nil { + t.Fatal(err) + } + called := false + op := func(uintptr) bool { + called = true + return true + } + err = cc.Write(op) + if err == nil { + t.Error("Write should return an error") + } + if called { + t.Error("Write shouldn't call op") + } + called = false + err = cc.Read(op) + if err == nil { + t.Error("Read should return an error") + } + if called { + t.Error("Read shouldn't call op") + } + + var b [32]byte + n, err := c.Read(b[:]) + if err != nil { + t.Error(err) + return + } + if _, err := c.Write(b[:n]); err != nil { + t.Error(err) + return + } + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + cc, err := c.(*TCPConn).SyscallConn() + if err != nil { + t.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + if err := writeRawConn(cc, data); err != nil { + t.Fatal(err) + } + var b [32]byte + n, err := readRawConn(cc, b[:]) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b[:n], data) != 0 { + t.Fatalf("got %q; want %q", b[:n], data) + } + }) + t.Run("Deadline", func(t *testing.T) { + switch runtime.GOOS { + case "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + cc, err := c.(*TCPConn).SyscallConn() + if err != nil { + t.Fatal(err) + } + var b [1]byte + + c.SetDeadline(noDeadline) + if err := c.SetDeadline(time.Now().Add(-1)); err != nil { + t.Fatal(err) + } + if err = writeRawConn(cc, b[:]); err == nil { + t.Fatal("Write should fail") + } + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("got %v; want timeout", err) + } + if _, err = readRawConn(cc, b[:]); err == nil { + t.Fatal("Read should fail") + } + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("got %v; want timeout", err) + } + + c.SetReadDeadline(noDeadline) + if err := c.SetReadDeadline(time.Now().Add(-1)); err != nil { + t.Fatal(err) + } + if _, err = readRawConn(cc, b[:]); err == nil { + t.Fatal("Read should fail") + } + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("got %v; want timeout", err) + } + + c.SetWriteDeadline(noDeadline) + if err := c.SetWriteDeadline(time.Now().Add(-1)); err != nil { + t.Fatal(err) + } + if err = writeRawConn(cc, b[:]); err == nil { + t.Fatal("Write should fail") + } + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("got %v; want timeout", err) + } + }) +} + +func TestRawConnControl(t *testing.T) { + switch runtime.GOOS { + case "plan9", "js", "wasip1": + t.Skipf("not supported on %s", runtime.GOOS) + } + + t.Run("TCP", func(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + cc1, err := ln.(*TCPListener).SyscallConn() + if err != nil { + t.Fatal(err) + } + if err := controlRawConn(cc1, ln.Addr()); err != nil { + t.Fatal(err) + } + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + cc2, err := c.(*TCPConn).SyscallConn() + if err != nil { + t.Fatal(err) + } + if err := controlRawConn(cc2, c.LocalAddr()); err != nil { + t.Fatal(err) + } + + ln.Close() + if err := controlRawConn(cc1, ln.Addr()); err == nil { + t.Fatal("Control after Close should fail") + } + c.Close() + if err := controlRawConn(cc2, c.LocalAddr()); err == nil { + t.Fatal("Control after Close should fail") + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/net/rawconn_unix_test.go b/platform/dbops/binaries/go/go/src/net/rawconn_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f11119ed9ec0da04cd04e1ca48452229efa7803c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rawconn_unix_test.go @@ -0,0 +1,115 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "errors" + "syscall" +) + +func readRawConn(c syscall.RawConn, b []byte) (int, error) { + var operr error + var n int + err := c.Read(func(s uintptr) bool { + n, operr = syscall.Read(int(s), b) + if operr == syscall.EAGAIN { + return false + } + return true + }) + if err != nil { + return n, err + } + return n, operr +} + +func writeRawConn(c syscall.RawConn, b []byte) error { + var operr error + err := c.Write(func(s uintptr) bool { + _, operr = syscall.Write(int(s), b) + if operr == syscall.EAGAIN { + return false + } + return true + }) + if err != nil { + return err + } + return operr +} + +func controlRawConn(c syscall.RawConn, addr Addr) error { + var operr error + fn := func(s uintptr) { + _, operr = syscall.GetsockoptInt(int(s), syscall.SOL_SOCKET, syscall.SO_REUSEADDR) + if operr != nil { + return + } + switch addr := addr.(type) { + case *TCPAddr: + // There's no guarantee that IP-level socket + // options work well with dual stack sockets. + // A simple solution would be to take a look + // at the bound address to the raw connection + // and to classify the address family of the + // underlying socket by the bound address: + // + // - When IP.To16() != nil and IP.To4() == nil, + // we can assume that the raw connection + // consists of an IPv6 socket using only + // IPv6 addresses. + // + // - When IP.To16() == nil and IP.To4() != nil, + // the raw connection consists of an IPv4 + // socket using only IPv4 addresses. + // + // - Otherwise, the raw connection is a dual + // stack socket, an IPv6 socket using IPv6 + // addresses including IPv4-mapped or + // IPv4-embedded IPv6 addresses. + if addr.IP.To16() != nil && addr.IP.To4() == nil { + operr = syscall.SetsockoptInt(int(s), syscall.IPPROTO_IPV6, syscall.IPV6_UNICAST_HOPS, 1) + } else if addr.IP.To16() == nil && addr.IP.To4() != nil { + operr = syscall.SetsockoptInt(int(s), syscall.IPPROTO_IP, syscall.IP_TTL, 1) + } + } + } + if err := c.Control(fn); err != nil { + return err + } + return operr +} + +func controlOnConnSetup(network string, address string, c syscall.RawConn) error { + var operr error + var fn func(uintptr) + switch network { + case "tcp", "udp", "ip": + return errors.New("ambiguous network: " + network) + case "unix", "unixpacket", "unixgram": + fn = func(s uintptr) { + _, operr = syscall.GetsockoptInt(int(s), syscall.SOL_SOCKET, syscall.SO_ERROR) + } + default: + switch network[len(network)-1] { + case '4': + fn = func(s uintptr) { + operr = syscall.SetsockoptInt(int(s), syscall.IPPROTO_IP, syscall.IP_TTL, 1) + } + case '6': + fn = func(s uintptr) { + operr = syscall.SetsockoptInt(int(s), syscall.IPPROTO_IPV6, syscall.IPV6_UNICAST_HOPS, 1) + } + default: + return errors.New("unknown network: " + network) + } + } + if err := c.Control(fn); err != nil { + return err + } + return operr +} diff --git a/platform/dbops/binaries/go/go/src/net/rawconn_windows_test.go b/platform/dbops/binaries/go/go/src/net/rawconn_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5febf08f774b1892491ff8dbc0e80725021a1c0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rawconn_windows_test.go @@ -0,0 +1,116 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "errors" + "syscall" + "unsafe" +) + +func readRawConn(c syscall.RawConn, b []byte) (int, error) { + var operr error + var n int + err := c.Read(func(s uintptr) bool { + var read uint32 + var flags uint32 + var buf syscall.WSABuf + buf.Buf = &b[0] + buf.Len = uint32(len(b)) + operr = syscall.WSARecv(syscall.Handle(s), &buf, 1, &read, &flags, nil, nil) + n = int(read) + return true + }) + if err != nil { + return n, err + } + return n, operr +} + +func writeRawConn(c syscall.RawConn, b []byte) error { + var operr error + err := c.Write(func(s uintptr) bool { + var written uint32 + var buf syscall.WSABuf + buf.Buf = &b[0] + buf.Len = uint32(len(b)) + operr = syscall.WSASend(syscall.Handle(s), &buf, 1, &written, 0, nil, nil) + return true + }) + if err != nil { + return err + } + return operr +} + +func controlRawConn(c syscall.RawConn, addr Addr) error { + var operr error + fn := func(s uintptr) { + var v, l int32 + l = int32(unsafe.Sizeof(v)) + operr = syscall.Getsockopt(syscall.Handle(s), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, (*byte)(unsafe.Pointer(&v)), &l) + if operr != nil { + return + } + switch addr := addr.(type) { + case *TCPAddr: + // There's no guarantee that IP-level socket + // options work well with dual stack sockets. + // A simple solution would be to take a look + // at the bound address to the raw connection + // and to classify the address family of the + // underlying socket by the bound address: + // + // - When IP.To16() != nil and IP.To4() == nil, + // we can assume that the raw connection + // consists of an IPv6 socket using only + // IPv6 addresses. + // + // - When IP.To16() == nil and IP.To4() != nil, + // the raw connection consists of an IPv4 + // socket using only IPv4 addresses. + // + // - Otherwise, the raw connection is a dual + // stack socket, an IPv6 socket using IPv6 + // addresses including IPv4-mapped or + // IPv4-embedded IPv6 addresses. + if addr.IP.To16() != nil && addr.IP.To4() == nil { + operr = syscall.SetsockoptInt(syscall.Handle(s), syscall.IPPROTO_IPV6, syscall.IPV6_UNICAST_HOPS, 1) + } else if addr.IP.To16() == nil && addr.IP.To4() != nil { + operr = syscall.SetsockoptInt(syscall.Handle(s), syscall.IPPROTO_IP, syscall.IP_TTL, 1) + } + } + } + if err := c.Control(fn); err != nil { + return err + } + return operr +} + +func controlOnConnSetup(network string, address string, c syscall.RawConn) error { + var operr error + var fn func(uintptr) + switch network { + case "tcp", "udp", "ip": + return errors.New("ambiguous network: " + network) + default: + switch network[len(network)-1] { + case '4': + fn = func(s uintptr) { + operr = syscall.SetsockoptInt(syscall.Handle(s), syscall.IPPROTO_IP, syscall.IP_TTL, 1) + } + case '6': + fn = func(s uintptr) { + operr = syscall.SetsockoptInt(syscall.Handle(s), syscall.IPPROTO_IPV6, syscall.IPV6_UNICAST_HOPS, 1) + } + default: + return errors.New("unknown network: " + network) + } + } + if err := c.Control(fn); err != nil { + return err + } + return operr +} diff --git a/platform/dbops/binaries/go/go/src/net/resolverdialfunc_test.go b/platform/dbops/binaries/go/go/src/net/resolverdialfunc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1af4199269586e4e180d7a9f946654c9ac70f465 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/resolverdialfunc_test.go @@ -0,0 +1,325 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that Resolver.Dial can be a func returning an in-memory net.Conn +// speaking DNS. + +package net + +import ( + "bytes" + "context" + "errors" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/dns/dnsmessage" +) + +func TestResolverDialFunc(t *testing.T) { + r := &Resolver{ + PreferGo: true, + Dial: newResolverDialFunc(&resolverDialHandler{ + StartDial: func(network, address string) error { + t.Logf("StartDial(%q, %q) ...", network, address) + return nil + }, + Question: func(h dnsmessage.Header, q dnsmessage.Question) { + t.Logf("Header: %+v for %q (type=%v, class=%v)", h, + q.Name.String(), q.Type, q.Class) + }, + // TODO: add test without HandleA* hooks specified at all, that Go + // doesn't issue retries; map to something terminal. + HandleA: func(w AWriter, name string) error { + w.AddIP([4]byte{1, 2, 3, 4}) + w.AddIP([4]byte{5, 6, 7, 8}) + return nil + }, + HandleAAAA: func(w AAAAWriter, name string) error { + w.AddIP([16]byte{1: 1, 15: 15}) + w.AddIP([16]byte{2: 2, 14: 14}) + return nil + }, + HandleSRV: func(w SRVWriter, name string) error { + w.AddSRV(1, 2, 80, "foo.bar.") + w.AddSRV(2, 3, 81, "bar.baz.") + return nil + }, + }), + } + ctx := context.Background() + const fakeDomain = "something-that-is-a-not-a-real-domain.fake-tld." + + t.Run("LookupIP", func(t *testing.T) { + ips, err := r.LookupIP(ctx, "ip", fakeDomain) + if err != nil { + t.Fatal(err) + } + if got, want := sortedIPStrings(ips), []string{"0:200::e00", "1.2.3.4", "1::f", "5.6.7.8"}; !reflect.DeepEqual(got, want) { + t.Errorf("LookupIP wrong.\n got: %q\nwant: %q\n", got, want) + } + }) + + t.Run("LookupSRV", func(t *testing.T) { + _, got, err := r.LookupSRV(ctx, "some-service", "tcp", fakeDomain) + if err != nil { + t.Fatal(err) + } + want := []*SRV{ + { + Target: "foo.bar.", + Port: 80, + Priority: 1, + Weight: 2, + }, + { + Target: "bar.baz.", + Port: 81, + Priority: 2, + Weight: 3, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result. got:") + for _, r := range got { + t.Logf(" - %+v", r) + } + } + }) +} + +func sortedIPStrings(ips []IP) []string { + ret := make([]string, len(ips)) + for i, ip := range ips { + ret[i] = ip.String() + } + sort.Strings(ret) + return ret +} + +func newResolverDialFunc(h *resolverDialHandler) func(ctx context.Context, network, address string) (Conn, error) { + return func(ctx context.Context, network, address string) (Conn, error) { + a := &resolverFuncConn{ + h: h, + network: network, + address: address, + ttl: 10, // 10 second default if unset + } + if h.StartDial != nil { + if err := h.StartDial(network, address); err != nil { + return nil, err + } + } + return a, nil + } +} + +type resolverDialHandler struct { + // StartDial, if non-nil, is called when Go first calls Resolver.Dial. + // Any error returned aborts the dial and is returned unwrapped. + StartDial func(network, address string) error + + Question func(dnsmessage.Header, dnsmessage.Question) + + // err may be ErrNotExist or ErrRefused; others map to SERVFAIL (RCode2). + // A nil error means success. + HandleA func(w AWriter, name string) error + HandleAAAA func(w AAAAWriter, name string) error + HandleSRV func(w SRVWriter, name string) error +} + +type ResponseWriter struct{ a *resolverFuncConn } + +func (w ResponseWriter) header() dnsmessage.ResourceHeader { + q := w.a.q + return dnsmessage.ResourceHeader{ + Name: q.Name, + Type: q.Type, + Class: q.Class, + TTL: w.a.ttl, + } +} + +// SetTTL sets the TTL for subsequent written resources. +// Once a resource has been written, SetTTL calls are no-ops. +// That is, it can only be called at most once, before anything +// else is written. +func (w ResponseWriter) SetTTL(seconds uint32) { + // ... intention is last one wins and mutates all previously + // written records too, but that's a little annoying. + // But it's also annoying if the requirement is it needs to be set + // last. + // And it's also annoying if it's possible for users to set + // different TTLs per Answer. + if w.a.wrote { + return + } + w.a.ttl = seconds + +} + +type AWriter struct{ ResponseWriter } + +func (w AWriter) AddIP(v4 [4]byte) { + w.a.wrote = true + err := w.a.builder.AResource(w.header(), dnsmessage.AResource{A: v4}) + if err != nil { + panic(err) + } +} + +type AAAAWriter struct{ ResponseWriter } + +func (w AAAAWriter) AddIP(v6 [16]byte) { + w.a.wrote = true + err := w.a.builder.AAAAResource(w.header(), dnsmessage.AAAAResource{AAAA: v6}) + if err != nil { + panic(err) + } +} + +type SRVWriter struct{ ResponseWriter } + +// AddSRV adds a SRV record. The target name must end in a period and +// be 63 bytes or fewer. +func (w SRVWriter) AddSRV(priority, weight, port uint16, target string) error { + targetName, err := dnsmessage.NewName(target) + if err != nil { + return err + } + w.a.wrote = true + err = w.a.builder.SRVResource(w.header(), dnsmessage.SRVResource{ + Priority: priority, + Weight: weight, + Port: port, + Target: targetName, + }) + if err != nil { + panic(err) // internal fault, not user + } + return nil +} + +var ( + ErrNotExist = errors.New("name does not exist") // maps to RCode3, NXDOMAIN + ErrRefused = errors.New("refused") // maps to RCode5, REFUSED +) + +type resolverFuncConn struct { + h *resolverDialHandler + network string + address string + builder *dnsmessage.Builder + q dnsmessage.Question + ttl uint32 + wrote bool + + rbuf bytes.Buffer +} + +func (*resolverFuncConn) Close() error { return nil } +func (*resolverFuncConn) LocalAddr() Addr { return someaddr{} } +func (*resolverFuncConn) RemoteAddr() Addr { return someaddr{} } +func (*resolverFuncConn) SetDeadline(t time.Time) error { return nil } +func (*resolverFuncConn) SetReadDeadline(t time.Time) error { return nil } +func (*resolverFuncConn) SetWriteDeadline(t time.Time) error { return nil } + +func (a *resolverFuncConn) Read(p []byte) (n int, err error) { + return a.rbuf.Read(p) +} + +func (a *resolverFuncConn) Write(packet []byte) (n int, err error) { + if len(packet) < 2 { + return 0, fmt.Errorf("short write of %d bytes; want 2+", len(packet)) + } + reqLen := int(packet[0])<<8 | int(packet[1]) + req := packet[2:] + if len(req) != reqLen { + return 0, fmt.Errorf("packet declared length %d doesn't match body length %d", reqLen, len(req)) + } + + var parser dnsmessage.Parser + h, err := parser.Start(req) + if err != nil { + // TODO: hook + return 0, err + } + q, err := parser.Question() + hadQ := (err == nil) + if err == nil && a.h.Question != nil { + a.h.Question(h, q) + } + if err != nil && err != dnsmessage.ErrSectionDone { + return 0, err + } + + resh := h + resh.Response = true + resh.Authoritative = true + if hadQ { + resh.RCode = dnsmessage.RCodeSuccess + } else { + resh.RCode = dnsmessage.RCodeNotImplemented + } + a.rbuf.Grow(514) + a.rbuf.WriteByte('X') // reserved header for beu16 length + a.rbuf.WriteByte('Y') // reserved header for beu16 length + builder := dnsmessage.NewBuilder(a.rbuf.Bytes(), resh) + a.builder = &builder + if hadQ { + a.q = q + a.builder.StartQuestions() + err := a.builder.Question(q) + if err != nil { + return 0, fmt.Errorf("Question: %w", err) + } + a.builder.StartAnswers() + switch q.Type { + case dnsmessage.TypeA: + if a.h.HandleA != nil { + resh.RCode = mapRCode(a.h.HandleA(AWriter{ResponseWriter{a}}, q.Name.String())) + } + case dnsmessage.TypeAAAA: + if a.h.HandleAAAA != nil { + resh.RCode = mapRCode(a.h.HandleAAAA(AAAAWriter{ResponseWriter{a}}, q.Name.String())) + } + case dnsmessage.TypeSRV: + if a.h.HandleSRV != nil { + resh.RCode = mapRCode(a.h.HandleSRV(SRVWriter{ResponseWriter{a}}, q.Name.String())) + } + } + } + tcpRes, err := builder.Finish() + if err != nil { + return 0, fmt.Errorf("Finish: %w", err) + } + + n = len(tcpRes) - 2 + tcpRes[0] = byte(n >> 8) + tcpRes[1] = byte(n) + a.rbuf.Write(tcpRes[2:]) + + return len(packet), nil +} + +type someaddr struct{} + +func (someaddr) Network() string { return "unused" } +func (someaddr) String() string { return "unused-someaddr" } + +func mapRCode(err error) dnsmessage.RCode { + switch err { + case nil: + return dnsmessage.RCodeSuccess + case ErrNotExist: + return dnsmessage.RCodeNameError + case ErrRefused: + return dnsmessage.RCodeRefused + default: + return dnsmessage.RCodeServerFailure + } +} diff --git a/platform/dbops/binaries/go/go/src/net/rlimit_js.go b/platform/dbops/binaries/go/go/src/net/rlimit_js.go new file mode 100644 index 0000000000000000000000000000000000000000..9ee5748b2156636af3a6c69985d439724c014168 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rlimit_js.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js + +package net + +// concurrentThreadsLimit returns the number of threads we permit to +// run concurrently doing DNS lookups. +func concurrentThreadsLimit() int { + return 500 +} diff --git a/platform/dbops/binaries/go/go/src/net/rlimit_unix.go b/platform/dbops/binaries/go/go/src/net/rlimit_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..0094756e3a0af326c8a16f78a8332b00ff37a315 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/rlimit_unix.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || wasip1 + +package net + +import "syscall" + +// concurrentThreadsLimit returns the number of threads we permit to +// run concurrently doing DNS lookups via cgo. A DNS lookup may use a +// file descriptor so we limit this to less than the number of +// permitted open files. On some systems, notably Darwin, if +// getaddrinfo is unable to open a file descriptor it simply returns +// EAI_NONAME rather than a useful error. Limiting the number of +// concurrent getaddrinfo calls to less than the permitted number of +// file descriptors makes that error less likely. We don't bother to +// apply the same limit to DNS lookups run directly from Go, because +// there we will return a meaningful "too many open files" error. +func concurrentThreadsLimit() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return 500 + } + r := rlim.Cur + if r > 500 { + r = 500 + } else if r > 30 { + r -= 30 + } + return int(r) +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_linux.go b/platform/dbops/binaries/go/go/src/net/sendfile_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..9a7d0058032f13dd87afcff2806075af4cc1bcf4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_linux.go @@ -0,0 +1,53 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/poll" + "io" + "os" +) + +// sendFile copies the contents of r to c using the sendfile +// system call to minimize copies. +// +// if handled == true, sendFile returns the number (potentially zero) of bytes +// copied and any non-EOF error. +// +// if handled == false, sendFile performed no work. +func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { + var remain int64 = 1<<63 - 1 // by default, copy until EOF + + lr, ok := r.(*io.LimitedReader) + if ok { + remain, r = lr.N, lr.R + if remain <= 0 { + return 0, nil, true + } + } + f, ok := r.(*os.File) + if !ok { + return 0, nil, false + } + + sc, err := f.SyscallConn() + if err != nil { + return 0, nil, false + } + + var werr error + err = sc.Read(func(fd uintptr) bool { + written, werr, handled = poll.SendFile(&c.pfd, int(fd), remain) + return true + }) + if err == nil { + err = werr + } + + if lr != nil { + lr.N = remain - written + } + return written, wrapSyscallError("sendfile", err), handled +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_linux_test.go b/platform/dbops/binaries/go/go/src/net/sendfile_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7a66d3645f2fa0c63973a95b90417c6b027c069e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_linux_test.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux + +package net + +import ( + "io" + "os" + "strconv" + "testing" +) + +func BenchmarkSendFile(b *testing.B) { + b.Run("file-to-tcp", func(b *testing.B) { benchmarkSendFile(b, "tcp") }) + b.Run("file-to-unix", func(b *testing.B) { benchmarkSendFile(b, "unix") }) +} + +func benchmarkSendFile(b *testing.B, proto string) { + for i := 0; i <= 10; i++ { + size := 1 << (i + 10) + bench := sendFileBench{ + proto: proto, + chunkSize: size, + } + b.Run(strconv.Itoa(size), bench.benchSendFile) + } +} + +type sendFileBench struct { + proto string + chunkSize int +} + +func (bench sendFileBench) benchSendFile(b *testing.B) { + fileSize := b.N * bench.chunkSize + f := createTempFile(b, fileSize) + + client, server := spliceTestSocketPair(b, bench.proto) + defer server.Close() + + cleanUp, err := startSpliceClient(client, "r", bench.chunkSize, fileSize) + if err != nil { + client.Close() + b.Fatal(err) + } + defer cleanUp() + + b.ReportAllocs() + b.SetBytes(int64(bench.chunkSize)) + b.ResetTimer() + + // Data go from file to socket via sendfile(2). + sent, err := io.Copy(server, f) + if err != nil { + b.Fatalf("failed to copy data with sendfile, error: %v", err) + } + if sent != int64(fileSize) { + b.Fatalf("bytes sent mismatch, got: %d, want: %d", sent, fileSize) + } +} + +func createTempFile(b *testing.B, size int) *os.File { + f, err := os.CreateTemp(b.TempDir(), "linux-sendfile-bench") + if err != nil { + b.Fatalf("failed to create temporary file: %v", err) + } + b.Cleanup(func() { + f.Close() + }) + + data := make([]byte, size) + if _, err := f.Write(data); err != nil { + b.Fatalf("failed to create and feed the file: %v", err) + } + if err := f.Sync(); err != nil { + b.Fatalf("failed to save the file: %v", err) + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + b.Fatalf("failed to rewind the file: %v", err) + } + + return f +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_stub.go b/platform/dbops/binaries/go/go/src/net/sendfile_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..a4fdd99ffec22c9aaa1063184b8578e955c614c3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_stub.go @@ -0,0 +1,13 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || js || netbsd || openbsd || ios || wasip1 + +package net + +import "io" + +func sendFile(c *netFD, r io.Reader) (n int64, err error, handled bool) { + return 0, nil, false +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_test.go b/platform/dbops/binaries/go/go/src/net/sendfile_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4cba1ed2b1620a9c041d88af2949138a59da8803 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_test.go @@ -0,0 +1,448 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "testing" + "time" +) + +const ( + newton = "../testdata/Isaac.Newton-Opticks.txt" + newtonLen = 567198 + newtonSHA256 = "d4a9ac22462b35e7821a4f2706c211093da678620a8f9997989ee7cf8d507bbd" +) + +func TestSendfile(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + errc := make(chan error, 1) + go func(ln Listener) { + // Wait for a connection. + conn, err := ln.Accept() + if err != nil { + errc <- err + close(errc) + return + } + + go func() { + defer close(errc) + defer conn.Close() + + f, err := os.Open(newton) + if err != nil { + errc <- err + return + } + defer f.Close() + + // Return file data using io.Copy, which should use + // sendFile if available. + sbytes, err := io.Copy(conn, f) + if err != nil { + errc <- err + return + } + + if sbytes != newtonLen { + errc <- fmt.Errorf("sent %d bytes; expected %d", sbytes, newtonLen) + return + } + }() + }(ln) + + // Connect to listener to retrieve file and verify digest matches + // expected. + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + h := sha256.New() + rbytes, err := io.Copy(h, c) + if err != nil { + t.Error(err) + } + + if rbytes != newtonLen { + t.Errorf("received %d bytes; expected %d", rbytes, newtonLen) + } + + if res := hex.EncodeToString(h.Sum(nil)); res != newtonSHA256 { + t.Error("retrieved data hash did not match") + } + + for err := range errc { + t.Error(err) + } +} + +func TestSendfileParts(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + errc := make(chan error, 1) + go func(ln Listener) { + // Wait for a connection. + conn, err := ln.Accept() + if err != nil { + errc <- err + close(errc) + return + } + + go func() { + defer close(errc) + defer conn.Close() + + f, err := os.Open(newton) + if err != nil { + errc <- err + return + } + defer f.Close() + + for i := 0; i < 3; i++ { + // Return file data using io.CopyN, which should use + // sendFile if available. + _, err = io.CopyN(conn, f, 3) + if err != nil { + errc <- err + return + } + } + }() + }(ln) + + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + buf := new(bytes.Buffer) + buf.ReadFrom(c) + + if want, have := "Produced ", buf.String(); have != want { + t.Errorf("unexpected server reply %q, want %q", have, want) + } + + for err := range errc { + t.Error(err) + } +} + +func TestSendfileSeeked(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + const seekTo = 65 << 10 + const sendSize = 10 << 10 + + errc := make(chan error, 1) + go func(ln Listener) { + // Wait for a connection. + conn, err := ln.Accept() + if err != nil { + errc <- err + close(errc) + return + } + + go func() { + defer close(errc) + defer conn.Close() + + f, err := os.Open(newton) + if err != nil { + errc <- err + return + } + defer f.Close() + if _, err := f.Seek(seekTo, io.SeekStart); err != nil { + errc <- err + return + } + + _, err = io.CopyN(conn, f, sendSize) + if err != nil { + errc <- err + return + } + }() + }(ln) + + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + buf := new(bytes.Buffer) + buf.ReadFrom(c) + + if buf.Len() != sendSize { + t.Errorf("Got %d bytes; want %d", buf.Len(), sendSize) + } + + for err := range errc { + t.Error(err) + } +} + +// Test that sendfile doesn't put a pipe into blocking mode. +func TestSendfilePipe(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows", "js", "wasip1": + // These systems don't support deadlines on pipes. + t.Skipf("skipping on %s", runtime.GOOS) + } + + t.Parallel() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer w.Close() + defer r.Close() + + copied := make(chan bool) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + // Accept a connection and copy 1 byte from the read end of + // the pipe to the connection. This will call into sendfile. + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + _, err = io.CopyN(conn, r, 1) + if err != nil { + t.Error(err) + return + } + // Signal the main goroutine that we've copied the byte. + close(copied) + }() + + wg.Add(1) + go func() { + // Write 1 byte to the write end of the pipe. + defer wg.Done() + _, err := w.Write([]byte{'a'}) + if err != nil { + t.Error(err) + } + }() + + wg.Add(1) + go func() { + // Connect to the server started two goroutines up and + // discard any data that it writes. + defer wg.Done() + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Error(err) + return + } + defer conn.Close() + io.Copy(io.Discard, conn) + }() + + // Wait for the byte to be copied, meaning that sendfile has + // been called on the pipe. + <-copied + + // Set a very short deadline on the read end of the pipe. + if err := r.SetDeadline(time.Now().Add(time.Microsecond)); err != nil { + t.Fatal(err) + } + + wg.Add(1) + go func() { + // Wait for much longer than the deadline and write a byte + // to the pipe. + defer wg.Done() + time.Sleep(50 * time.Millisecond) + w.Write([]byte{'b'}) + }() + + // If this read does not time out, the pipe was incorrectly + // put into blocking mode. + _, err = r.Read(make([]byte, 1)) + if err == nil { + t.Error("Read did not time out") + } else if !os.IsTimeout(err) { + t.Errorf("got error %v, expected a time out", err) + } + + wg.Wait() +} + +// Issue 43822: tests that returns EOF when conn write timeout. +func TestSendfileOnWriteTimeoutExceeded(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + errc := make(chan error, 1) + go func(ln Listener) (retErr error) { + defer func() { + errc <- retErr + close(errc) + }() + + conn, err := ln.Accept() + if err != nil { + return err + } + defer conn.Close() + + // Set the write deadline in the past(1h ago). It makes + // sure that it is always write timeout. + if err := conn.SetWriteDeadline(time.Now().Add(-1 * time.Hour)); err != nil { + return err + } + + f, err := os.Open(newton) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(conn, f) + if errors.Is(err, os.ErrDeadlineExceeded) { + return nil + } + + if err == nil { + err = fmt.Errorf("expected ErrDeadlineExceeded, but got nil") + } + return err + }(ln) + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + n, err := io.Copy(io.Discard, conn) + if err != nil { + t.Fatalf("expected nil error, but got %v", err) + } + if n != 0 { + t.Fatalf("expected receive zero, but got %d byte(s)", n) + } + + if err := <-errc; err != nil { + t.Fatal(err) + } +} + +func BenchmarkSendfileZeroBytes(b *testing.B) { + var ( + wg sync.WaitGroup + ctx, cancel = context.WithCancel(context.Background()) + ) + + defer wg.Wait() + + ln := newLocalListener(b, "tcp") + defer ln.Close() + + tempFile, err := os.CreateTemp(b.TempDir(), "test.txt") + if err != nil { + b.Fatalf("failed to create temp file: %v", err) + } + defer tempFile.Close() + + fileName := tempFile.Name() + + dataSize := b.N + wg.Add(1) + go func(f *os.File) { + defer wg.Done() + + for i := 0; i < dataSize; i++ { + if _, err := f.Write([]byte{1}); err != nil { + b.Errorf("failed to write: %v", err) + return + } + if i%1000 == 0 { + f.Sync() + } + } + }(tempFile) + + b.ResetTimer() + b.ReportAllocs() + + wg.Add(1) + go func(ln Listener, fileName string) { + defer wg.Done() + + conn, err := ln.Accept() + if err != nil { + b.Errorf("failed to accept: %v", err) + return + } + defer conn.Close() + + f, err := os.OpenFile(fileName, os.O_RDONLY, 0660) + if err != nil { + b.Errorf("failed to open file: %v", err) + return + } + defer f.Close() + + for { + if ctx.Err() != nil { + return + } + + if _, err := io.Copy(conn, f); err != nil { + b.Errorf("failed to copy: %v", err) + return + } + } + }(ln, fileName) + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatalf("failed to dial: %v", err) + } + defer conn.Close() + + n, err := io.CopyN(io.Discard, conn, int64(dataSize)) + if err != nil { + b.Fatalf("failed to copy: %v", err) + } + if n != int64(dataSize) { + b.Fatalf("expected %d copied bytes, but got %d", dataSize, n) + } + + cancel() +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_unix_alt.go b/platform/dbops/binaries/go/go/src/net/sendfile_unix_alt.go new file mode 100644 index 0000000000000000000000000000000000000000..5cb65ee7670c49f6c4ae1bab2ca5cda209042156 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_unix_alt.go @@ -0,0 +1,85 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin && !ios) || dragonfly || freebsd || solaris + +package net + +import ( + "internal/poll" + "io" + "os" +) + +// sendFile copies the contents of r to c using the sendfile +// system call to minimize copies. +// +// if handled == true, sendFile returns the number (potentially zero) of bytes +// copied and any non-EOF error. +// +// if handled == false, sendFile performed no work. +func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { + // Darwin, FreeBSD, DragonFly and Solaris use 0 as the "until EOF" value. + // If you pass in more bytes than the file contains, it will + // loop back to the beginning ad nauseam until it's sent + // exactly the number of bytes told to. As such, we need to + // know exactly how many bytes to send. + var remain int64 = 0 + + lr, ok := r.(*io.LimitedReader) + if ok { + remain, r = lr.N, lr.R + if remain <= 0 { + return 0, nil, true + } + } + f, ok := r.(*os.File) + if !ok { + return 0, nil, false + } + + if remain == 0 { + fi, err := f.Stat() + if err != nil { + return 0, err, false + } + + remain = fi.Size() + } + + // The other quirk with Darwin/FreeBSD/DragonFly/Solaris's sendfile + // implementation is that it doesn't use the current position + // of the file -- if you pass it offset 0, it starts from + // offset 0. There's no way to tell it "start from current + // position", so we have to manage that explicitly. + pos, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err, false + } + + sc, err := f.SyscallConn() + if err != nil { + return 0, nil, false + } + + var werr error + err = sc.Read(func(fd uintptr) bool { + written, werr, handled = poll.SendFile(&c.pfd, int(fd), pos, remain) + return true + }) + if err == nil { + err = werr + } + + if lr != nil { + lr.N = remain - written + } + + _, err1 := f.Seek(written, io.SeekCurrent) + if err1 != nil && err == nil { + return written, err1, handled + } + + return written, wrapSyscallError("sendfile", err), handled +} diff --git a/platform/dbops/binaries/go/go/src/net/sendfile_windows.go b/platform/dbops/binaries/go/go/src/net/sendfile_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..59b1b0d5c1dd85b0cd540f463184ed7295d3ddc8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sendfile_windows.go @@ -0,0 +1,47 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/poll" + "io" + "os" + "syscall" +) + +// sendFile copies the contents of r to c using the TransmitFile +// system call to minimize copies. +// +// if handled == true, sendFile returns the number of bytes copied and any +// non-EOF error. +// +// if handled == false, sendFile performed no work. +func sendFile(fd *netFD, r io.Reader) (written int64, err error, handled bool) { + var n int64 = 0 // by default, copy until EOF. + + lr, ok := r.(*io.LimitedReader) + if ok { + n, r = lr.N, lr.R + if n <= 0 { + return 0, nil, true + } + } + + f, ok := r.(*os.File) + if !ok { + return 0, nil, false + } + + written, err = poll.SendFile(&fd.pfd, syscall.Handle(f.Fd()), n) + if err != nil { + err = wrapSyscallError("transmitfile", err) + } + + // If any byte was copied, regardless of any error + // encountered mid-way, handled must be set to true. + handled = written > 0 + + return +} diff --git a/platform/dbops/binaries/go/go/src/net/server_test.go b/platform/dbops/binaries/go/go/src/net/server_test.go new file mode 100644 index 0000000000000000000000000000000000000000..eb6b111f1f5ffbe0a6b057e373e2def571427ddf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/server_test.go @@ -0,0 +1,412 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "fmt" + "os" + "testing" +) + +var tcpServerTests = []struct { + snet, saddr string // server endpoint + tnet, taddr string // target endpoint for client +}{ + {snet: "tcp", saddr: ":0", tnet: "tcp", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "0.0.0.0:0", tnet: "tcp", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::ffff:0.0.0.0]:0", tnet: "tcp", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::]:0", tnet: "tcp", taddr: "::1"}, + + {snet: "tcp", saddr: ":0", tnet: "tcp", taddr: "::1"}, + {snet: "tcp", saddr: "0.0.0.0:0", tnet: "tcp", taddr: "::1"}, + {snet: "tcp", saddr: "[::ffff:0.0.0.0]:0", tnet: "tcp", taddr: "::1"}, + {snet: "tcp", saddr: "[::]:0", tnet: "tcp", taddr: "127.0.0.1"}, + + {snet: "tcp", saddr: ":0", tnet: "tcp4", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "0.0.0.0:0", tnet: "tcp4", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::ffff:0.0.0.0]:0", tnet: "tcp4", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::]:0", tnet: "tcp6", taddr: "::1"}, + + {snet: "tcp", saddr: ":0", tnet: "tcp6", taddr: "::1"}, + {snet: "tcp", saddr: "0.0.0.0:0", tnet: "tcp6", taddr: "::1"}, + {snet: "tcp", saddr: "[::ffff:0.0.0.0]:0", tnet: "tcp6", taddr: "::1"}, + {snet: "tcp", saddr: "[::]:0", tnet: "tcp4", taddr: "127.0.0.1"}, + + {snet: "tcp", saddr: "127.0.0.1:0", tnet: "tcp", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::ffff:127.0.0.1]:0", tnet: "tcp", taddr: "127.0.0.1"}, + {snet: "tcp", saddr: "[::1]:0", tnet: "tcp", taddr: "::1"}, + + {snet: "tcp4", saddr: ":0", tnet: "tcp4", taddr: "127.0.0.1"}, + {snet: "tcp4", saddr: "0.0.0.0:0", tnet: "tcp4", taddr: "127.0.0.1"}, + {snet: "tcp4", saddr: "[::ffff:0.0.0.0]:0", tnet: "tcp4", taddr: "127.0.0.1"}, + + {snet: "tcp4", saddr: "127.0.0.1:0", tnet: "tcp4", taddr: "127.0.0.1"}, + + {snet: "tcp6", saddr: ":0", tnet: "tcp6", taddr: "::1"}, + {snet: "tcp6", saddr: "[::]:0", tnet: "tcp6", taddr: "::1"}, + + {snet: "tcp6", saddr: "[::1]:0", tnet: "tcp6", taddr: "::1"}, +} + +// TestTCPServer tests concurrent accept-read-write servers. +func TestTCPServer(t *testing.T) { + const N = 3 + + for i, tt := range tcpServerTests { + t.Run(tt.snet+" "+tt.saddr+"<-"+tt.taddr, func(t *testing.T) { + if !testableListenArgs(tt.snet, tt.saddr, tt.taddr) { + t.Skip("not testable") + } + + ln, err := Listen(tt.snet, tt.saddr) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + + var lss []*localServer + var tpchs []chan error + defer func() { + for _, ls := range lss { + ls.teardown() + } + }() + for i := 0; i < N; i++ { + ls := (&streamListener{Listener: ln}).newLocalServer() + lss = append(lss, ls) + tpchs = append(tpchs, make(chan error, 1)) + } + for i := 0; i < N; i++ { + ch := tpchs[i] + handler := func(ls *localServer, ln Listener) { ls.transponder(ln, ch) } + if err := lss[i].buildup(handler); err != nil { + t.Fatal(err) + } + } + + var trchs []chan error + for i := 0; i < N; i++ { + _, port, err := SplitHostPort(lss[i].Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + d := Dialer{Timeout: someTimeout} + c, err := d.Dial(tt.tnet, JoinHostPort(tt.taddr, port)) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + defer c.Close() + trchs = append(trchs, make(chan error, 1)) + go transceiver(c, []byte("TCP SERVER TEST"), trchs[i]) + } + + for _, ch := range trchs { + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } + for _, ch := range tpchs { + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } + }) + } +} + +// TestUnixAndUnixpacketServer tests concurrent accept-read-write +// servers +func TestUnixAndUnixpacketServer(t *testing.T) { + var unixAndUnixpacketServerTests = []struct { + network, address string + }{ + {"unix", testUnixAddr(t)}, + {"unix", "@nettest/go/unix"}, + + {"unixpacket", testUnixAddr(t)}, + {"unixpacket", "@nettest/go/unixpacket"}, + } + + const N = 3 + + for i, tt := range unixAndUnixpacketServerTests { + if !testableListenArgs(tt.network, tt.address, "") { + t.Logf("skipping %s test", tt.network+" "+tt.address) + continue + } + + ln, err := Listen(tt.network, tt.address) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + + var lss []*localServer + var tpchs []chan error + defer func() { + for _, ls := range lss { + ls.teardown() + } + }() + for i := 0; i < N; i++ { + ls := (&streamListener{Listener: ln}).newLocalServer() + lss = append(lss, ls) + tpchs = append(tpchs, make(chan error, 1)) + } + for i := 0; i < N; i++ { + ch := tpchs[i] + handler := func(ls *localServer, ln Listener) { ls.transponder(ln, ch) } + if err := lss[i].buildup(handler); err != nil { + t.Fatal(err) + } + } + + var trchs []chan error + for i := 0; i < N; i++ { + d := Dialer{Timeout: someTimeout} + c, err := d.Dial(lss[i].Listener.Addr().Network(), lss[i].Listener.Addr().String()) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + + if addr := c.LocalAddr(); addr != nil { + t.Logf("connected %s->%s", addr, lss[i].Listener.Addr()) + } + + defer c.Close() + trchs = append(trchs, make(chan error, 1)) + go transceiver(c, []byte("UNIX AND UNIXPACKET SERVER TEST"), trchs[i]) + } + + for _, ch := range trchs { + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } + for _, ch := range tpchs { + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } + } +} + +var udpServerTests = []struct { + snet, saddr string // server endpoint + tnet, taddr string // target endpoint for client + dial bool // test with Dial +}{ + {snet: "udp", saddr: ":0", tnet: "udp", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "0.0.0.0:0", tnet: "udp", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::ffff:0.0.0.0]:0", tnet: "udp", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::]:0", tnet: "udp", taddr: "::1"}, + + {snet: "udp", saddr: ":0", tnet: "udp", taddr: "::1"}, + {snet: "udp", saddr: "0.0.0.0:0", tnet: "udp", taddr: "::1"}, + {snet: "udp", saddr: "[::ffff:0.0.0.0]:0", tnet: "udp", taddr: "::1"}, + {snet: "udp", saddr: "[::]:0", tnet: "udp", taddr: "127.0.0.1"}, + + {snet: "udp", saddr: ":0", tnet: "udp4", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "0.0.0.0:0", tnet: "udp4", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::ffff:0.0.0.0]:0", tnet: "udp4", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::]:0", tnet: "udp6", taddr: "::1"}, + + {snet: "udp", saddr: ":0", tnet: "udp6", taddr: "::1"}, + {snet: "udp", saddr: "0.0.0.0:0", tnet: "udp6", taddr: "::1"}, + {snet: "udp", saddr: "[::ffff:0.0.0.0]:0", tnet: "udp6", taddr: "::1"}, + {snet: "udp", saddr: "[::]:0", tnet: "udp4", taddr: "127.0.0.1"}, + + {snet: "udp", saddr: "127.0.0.1:0", tnet: "udp", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::ffff:127.0.0.1]:0", tnet: "udp", taddr: "127.0.0.1"}, + {snet: "udp", saddr: "[::1]:0", tnet: "udp", taddr: "::1"}, + + {snet: "udp4", saddr: ":0", tnet: "udp4", taddr: "127.0.0.1"}, + {snet: "udp4", saddr: "0.0.0.0:0", tnet: "udp4", taddr: "127.0.0.1"}, + {snet: "udp4", saddr: "[::ffff:0.0.0.0]:0", tnet: "udp4", taddr: "127.0.0.1"}, + + {snet: "udp4", saddr: "127.0.0.1:0", tnet: "udp4", taddr: "127.0.0.1"}, + + {snet: "udp6", saddr: ":0", tnet: "udp6", taddr: "::1"}, + {snet: "udp6", saddr: "[::]:0", tnet: "udp6", taddr: "::1"}, + + {snet: "udp6", saddr: "[::1]:0", tnet: "udp6", taddr: "::1"}, + + {snet: "udp", saddr: "127.0.0.1:0", tnet: "udp", taddr: "127.0.0.1", dial: true}, + + {snet: "udp", saddr: "[::1]:0", tnet: "udp", taddr: "::1", dial: true}, +} + +func TestUDPServer(t *testing.T) { + for i, tt := range udpServerTests { + i, tt := i, tt + t.Run(fmt.Sprint(i), func(t *testing.T) { + if !testableListenArgs(tt.snet, tt.saddr, tt.taddr) { + t.Skipf("skipping %s %s<-%s test", tt.snet, tt.saddr, tt.taddr) + } + t.Logf("%s %s<-%s", tt.snet, tt.saddr, tt.taddr) + + c1, err := ListenPacket(tt.snet, tt.saddr) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + + ls := (&packetListener{PacketConn: c1}).newLocalServer() + defer ls.teardown() + tpch := make(chan error, 1) + handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) } + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + trch := make(chan error, 1) + _, port, err := SplitHostPort(ls.PacketConn.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + if tt.dial { + d := Dialer{Timeout: someTimeout} + c2, err := d.Dial(tt.tnet, JoinHostPort(tt.taddr, port)) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + defer c2.Close() + go transceiver(c2, []byte("UDP SERVER TEST"), trch) + } else { + c2, err := ListenPacket(tt.tnet, JoinHostPort(tt.taddr, "0")) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + defer c2.Close() + dst, err := ResolveUDPAddr(tt.tnet, JoinHostPort(tt.taddr, port)) + if err != nil { + t.Fatal(err) + } + go packetTransceiver(c2, []byte("UDP SERVER TEST"), dst, trch) + } + + for trch != nil || tpch != nil { + select { + case err, ok := <-trch: + if !ok { + trch = nil + } + if err != nil { + t.Errorf("client: %v", err) + } + case err, ok := <-tpch: + if !ok { + tpch = nil + } + if err != nil { + t.Errorf("server: %v", err) + } + } + } + }) + } +} + +func TestUnixgramServer(t *testing.T) { + var unixgramServerTests = []struct { + saddr string // server endpoint + caddr string // client endpoint + dial bool // test with Dial + }{ + {saddr: testUnixAddr(t), caddr: testUnixAddr(t)}, + {saddr: testUnixAddr(t), caddr: testUnixAddr(t), dial: true}, + + {saddr: "@nettest/go/unixgram/server", caddr: "@nettest/go/unixgram/client"}, + } + + for i, tt := range unixgramServerTests { + i, tt := i, tt + t.Run(fmt.Sprint(i), func(t *testing.T) { + if !testableListenArgs("unixgram", tt.saddr, "") { + t.Skipf("skipping unixgram %s<-%s test", tt.saddr, tt.caddr) + } + t.Logf("unixgram %s<-%s", tt.saddr, tt.caddr) + + c1, err := ListenPacket("unixgram", tt.saddr) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + + ls := (&packetListener{PacketConn: c1}).newLocalServer() + defer ls.teardown() + tpch := make(chan error, 1) + handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) } + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + trch := make(chan error, 1) + if tt.dial { + d := Dialer{Timeout: someTimeout, LocalAddr: &UnixAddr{Net: "unixgram", Name: tt.caddr}} + c2, err := d.Dial("unixgram", ls.PacketConn.LocalAddr().String()) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + defer os.Remove(c2.LocalAddr().String()) + defer c2.Close() + go transceiver(c2, []byte(c2.LocalAddr().String()), trch) + } else { + c2, err := ListenPacket("unixgram", tt.caddr) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Error(perr) + } + t.Fatal(err) + } + defer os.Remove(c2.LocalAddr().String()) + defer c2.Close() + go packetTransceiver(c2, []byte("UNIXGRAM SERVER TEST"), ls.PacketConn.LocalAddr(), trch) + } + + for trch != nil || tpch != nil { + select { + case err, ok := <-trch: + if !ok { + trch = nil + } + if err != nil { + t.Errorf("client: %v", err) + } + case err, ok := <-tpch: + if !ok { + tpch = nil + } + if err != nil { + t.Errorf("server: %v", err) + } + } + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_bsd.go b/platform/dbops/binaries/go/go/src/net/sock_bsd.go new file mode 100644 index 0000000000000000000000000000000000000000..27daf722b53466318304097a043ae9e93d1431f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd + +package net + +import ( + "runtime" + "syscall" +) + +func maxListenerBacklog() int { + var ( + n uint32 + err error + ) + switch runtime.GOOS { + case "darwin", "ios": + n, err = syscall.SysctlUint32("kern.ipc.somaxconn") + case "freebsd": + n, err = syscall.SysctlUint32("kern.ipc.soacceptqueue") + case "netbsd": + // NOTE: NetBSD has no somaxconn-like kernel state so far + case "openbsd": + n, err = syscall.SysctlUint32("kern.somaxconn") + } + if n == 0 || err != nil { + return syscall.SOMAXCONN + } + // FreeBSD stores the backlog in a uint16, as does Linux. + // Assume the other BSDs do too. Truncate number to avoid wrapping. + // See issue 5030. + if n > 1<<16-1 { + n = 1<<16 - 1 + } + return int(n) +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_cloexec.go b/platform/dbops/binaries/go/go/src/net/sock_cloexec.go new file mode 100644 index 0000000000000000000000000000000000000000..9eeb89746b9ef1d3ef6ecd67e3bc95cd65c197c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_cloexec.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements sysSocket for platforms that provide a fast path for +// setting SetNonblock and CloseOnExec. + +//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package net + +import ( + "internal/poll" + "os" + "syscall" +) + +// Wrapper around the socket system call that marks the returned file +// descriptor as nonblocking and close-on-exec. +func sysSocket(family, sotype, proto int) (int, error) { + s, err := socketFunc(family, sotype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, proto) + // TODO: We can remove the fallback on Linux and *BSD, + // as currently supported versions all support accept4 + // with SOCK_CLOEXEC, but Solaris does not. See issue #59359. + switch err { + case nil: + return s, nil + default: + return -1, os.NewSyscallError("socket", err) + case syscall.EPROTONOSUPPORT, syscall.EINVAL: + } + + // See ../syscall/exec_unix.go for description of ForkLock. + syscall.ForkLock.RLock() + s, err = socketFunc(family, sotype, proto) + if err == nil { + syscall.CloseOnExec(s) + } + syscall.ForkLock.RUnlock() + if err != nil { + return -1, os.NewSyscallError("socket", err) + } + if err = syscall.SetNonblock(s, true); err != nil { + poll.CloseFunc(s) + return -1, os.NewSyscallError("setnonblock", err) + } + return s, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_linux.go b/platform/dbops/binaries/go/go/src/net/sock_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..cffe9a236fc7f13da6d0085923dc58102582aa7b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_linux.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/syscall/unix" + "syscall" +) + +// Linux stores the backlog as: +// +// - uint16 in kernel version < 4.1, +// - uint32 in kernel version >= 4.1 +// +// Truncate number to avoid wrapping. +// +// See issue 5030 and 41470. +func maxAckBacklog(n int) int { + major, minor := unix.KernelVersion() + size := 16 + if major > 4 || (major == 4 && minor >= 1) { + size = 32 + } + + var max uint = 1< max { + n = int(max) + } + return n +} + +func maxListenerBacklog() int { + fd, err := open("/proc/sys/net/core/somaxconn") + if err != nil { + return syscall.SOMAXCONN + } + defer fd.close() + l, ok := fd.readLine() + if !ok { + return syscall.SOMAXCONN + } + f := getFields(l) + n, _, ok := dtoi(f[0]) + if n == 0 || !ok { + return syscall.SOMAXCONN + } + + if n > 1<<16-1 { + return maxAckBacklog(n) + } + return n +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_linux_test.go b/platform/dbops/binaries/go/go/src/net/sock_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..11303cfff1e64670c098cc48e060a152dcd1bfa3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_linux_test.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/syscall/unix" + "testing" +) + +func TestMaxAckBacklog(t *testing.T) { + n := 196602 + major, minor := unix.KernelVersion() + backlog := maxAckBacklog(n) + expected := 1<<16 - 1 + if major > 4 || (major == 4 && minor >= 1) { + expected = n + } + if backlog != expected { + t.Fatalf(`Kernel version: "%d.%d", sk_max_ack_backlog mismatch, got %d, want %d`, major, minor, backlog, expected) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_plan9.go b/platform/dbops/binaries/go/go/src/net/sock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..9367ad8fa7843b499b9e17449de4b96051a6843b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_plan9.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +func maxListenerBacklog() int { + // /sys/include/ape/sys/socket.h:/SOMAXCONN + return 5 +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_posix.go b/platform/dbops/binaries/go/go/src/net/sock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..d04c26e7ef449c1dcc9b0a04575708bcc069e335 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_posix.go @@ -0,0 +1,226 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package net + +import ( + "context" + "internal/poll" + "os" + "syscall" +) + +// socket returns a network file descriptor that is ready for +// asynchronous I/O using the network poller. +func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) (fd *netFD, err error) { + s, err := sysSocket(family, sotype, proto) + if err != nil { + return nil, err + } + if err = setDefaultSockopts(s, family, sotype, ipv6only); err != nil { + poll.CloseFunc(s) + return nil, err + } + if fd, err = newFD(s, family, sotype, net); err != nil { + poll.CloseFunc(s) + return nil, err + } + + // This function makes a network file descriptor for the + // following applications: + // + // - An endpoint holder that opens a passive stream + // connection, known as a stream listener + // + // - An endpoint holder that opens a destination-unspecific + // datagram connection, known as a datagram listener + // + // - An endpoint holder that opens an active stream or a + // destination-specific datagram connection, known as a + // dialer + // + // - An endpoint holder that opens the other connection, such + // as talking to the protocol stack inside the kernel + // + // For stream and datagram listeners, they will only require + // named sockets, so we can assume that it's just a request + // from stream or datagram listeners when laddr is not nil but + // raddr is nil. Otherwise we assume it's just for dialers or + // the other connection holders. + + if laddr != nil && raddr == nil { + switch sotype { + case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET: + if err := fd.listenStream(ctx, laddr, listenerBacklog(), ctrlCtxFn); err != nil { + fd.Close() + return nil, err + } + return fd, nil + case syscall.SOCK_DGRAM: + if err := fd.listenDatagram(ctx, laddr, ctrlCtxFn); err != nil { + fd.Close() + return nil, err + } + return fd, nil + } + } + if err := fd.dial(ctx, laddr, raddr, ctrlCtxFn); err != nil { + fd.Close() + return nil, err + } + return fd, nil +} + +func (fd *netFD) ctrlNetwork() string { + switch fd.net { + case "unix", "unixgram", "unixpacket": + return fd.net + } + switch fd.net[len(fd.net)-1] { + case '4', '6': + return fd.net + } + if fd.family == syscall.AF_INET { + return fd.net + "4" + } + return fd.net + "6" +} + +func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) error { + var c *rawConn + if ctrlCtxFn != nil { + c = newRawConn(fd) + var ctrlAddr string + if raddr != nil { + ctrlAddr = raddr.String() + } else if laddr != nil { + ctrlAddr = laddr.String() + } + if err := ctrlCtxFn(ctx, fd.ctrlNetwork(), ctrlAddr, c); err != nil { + return err + } + } + + var lsa syscall.Sockaddr + var err error + if laddr != nil { + if lsa, err = laddr.sockaddr(fd.family); err != nil { + return err + } else if lsa != nil { + if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil { + return os.NewSyscallError("bind", err) + } + } + } + var rsa syscall.Sockaddr // remote address from the user + var crsa syscall.Sockaddr // remote address we actually connected to + if raddr != nil { + if rsa, err = raddr.sockaddr(fd.family); err != nil { + return err + } + if crsa, err = fd.connect(ctx, lsa, rsa); err != nil { + return err + } + fd.isConnected = true + } else { + if err := fd.init(); err != nil { + return err + } + } + // Record the local and remote addresses from the actual socket. + // Get the local address by calling Getsockname. + // For the remote address, use + // 1) the one returned by the connect method, if any; or + // 2) the one from Getpeername, if it succeeds; or + // 3) the one passed to us as the raddr parameter. + lsa, _ = syscall.Getsockname(fd.pfd.Sysfd) + if crsa != nil { + fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(crsa)) + } else if rsa, _ = syscall.Getpeername(fd.pfd.Sysfd); rsa != nil { + fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(rsa)) + } else { + fd.setAddr(fd.addrFunc()(lsa), raddr) + } + return nil +} + +func (fd *netFD) listenStream(ctx context.Context, laddr sockaddr, backlog int, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) error { + var err error + if err = setDefaultListenerSockopts(fd.pfd.Sysfd); err != nil { + return err + } + var lsa syscall.Sockaddr + if lsa, err = laddr.sockaddr(fd.family); err != nil { + return err + } + + if ctrlCtxFn != nil { + c := newRawConn(fd) + if err := ctrlCtxFn(ctx, fd.ctrlNetwork(), laddr.String(), c); err != nil { + return err + } + } + + if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil { + return os.NewSyscallError("bind", err) + } + if err = listenFunc(fd.pfd.Sysfd, backlog); err != nil { + return os.NewSyscallError("listen", err) + } + if err = fd.init(); err != nil { + return err + } + lsa, _ = syscall.Getsockname(fd.pfd.Sysfd) + fd.setAddr(fd.addrFunc()(lsa), nil) + return nil +} + +func (fd *netFD) listenDatagram(ctx context.Context, laddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) error { + switch addr := laddr.(type) { + case *UDPAddr: + // We provide a socket that listens to a wildcard + // address with reusable UDP port when the given laddr + // is an appropriate UDP multicast address prefix. + // This makes it possible for a single UDP listener to + // join multiple different group addresses, for + // multiple UDP listeners that listen on the same UDP + // port to join the same group address. + if addr.IP != nil && addr.IP.IsMulticast() { + if err := setDefaultMulticastSockopts(fd.pfd.Sysfd); err != nil { + return err + } + addr := *addr + switch fd.family { + case syscall.AF_INET: + addr.IP = IPv4zero + case syscall.AF_INET6: + addr.IP = IPv6unspecified + } + laddr = &addr + } + } + var err error + var lsa syscall.Sockaddr + if lsa, err = laddr.sockaddr(fd.family); err != nil { + return err + } + + if ctrlCtxFn != nil { + c := newRawConn(fd) + if err := ctrlCtxFn(ctx, fd.ctrlNetwork(), laddr.String(), c); err != nil { + return err + } + } + if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil { + return os.NewSyscallError("bind", err) + } + if err = fd.init(); err != nil { + return err + } + lsa, _ = syscall.Getsockname(fd.pfd.Sysfd) + fd.setAddr(fd.addrFunc()(lsa), nil) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_stub.go b/platform/dbops/binaries/go/go/src/net/sock_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..fd86fa92dc81586d4092f3786b50560f969bfe0b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_stub.go @@ -0,0 +1,15 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || js || solaris || wasip1 + +package net + +import "syscall" + +func maxListenerBacklog() int { + // TODO: Implement this + // NOTE: Never return a number bigger than 1<<16 - 1. See issue 5030. + return syscall.SOMAXCONN +} diff --git a/platform/dbops/binaries/go/go/src/net/sock_windows.go b/platform/dbops/binaries/go/go/src/net/sock_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..a519909bb045c2251583fdb060ba065fce334599 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sock_windows.go @@ -0,0 +1,27 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/syscall/windows" + "os" + "syscall" +) + +func maxListenerBacklog() int { + // When the socket backlog is SOMAXCONN, Windows will set the backlog to + // "a reasonable maximum value". + // See: https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-listen + return syscall.SOMAXCONN +} + +func sysSocket(family, sotype, proto int) (syscall.Handle, error) { + s, err := wsaSocketFunc(int32(family), int32(sotype), int32(proto), + nil, 0, windows.WSA_FLAG_OVERLAPPED|windows.WSA_FLAG_NO_HANDLE_INHERIT) + if err != nil { + return syscall.InvalidHandle, os.NewSyscallError("socket", err) + } + return s, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/sockaddr_posix.go b/platform/dbops/binaries/go/go/src/net/sockaddr_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..c5604fca355a45322d8cccb06b450a1c10653f25 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockaddr_posix.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "syscall" +) + +// A sockaddr represents a TCP, UDP, IP or Unix network endpoint +// address that can be converted into a syscall.Sockaddr. +type sockaddr interface { + Addr + + // family returns the platform-dependent address family + // identifier. + family() int + + // isWildcard reports whether the address is a wildcard + // address. + isWildcard() bool + + // sockaddr returns the address converted into a syscall + // sockaddr type that implements syscall.Sockaddr + // interface. It returns a nil interface when the address is + // nil. + sockaddr(family int) (syscall.Sockaddr, error) + + // toLocal maps the zero address to a local system address (127.0.0.1 or ::1) + toLocal(net string) sockaddr +} + +func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr { + switch fd.family { + case syscall.AF_INET, syscall.AF_INET6: + switch fd.sotype { + case syscall.SOCK_STREAM: + return sockaddrToTCP + case syscall.SOCK_DGRAM: + return sockaddrToUDP + case syscall.SOCK_RAW: + return sockaddrToIP + } + case syscall.AF_UNIX: + switch fd.sotype { + case syscall.SOCK_STREAM: + return sockaddrToUnix + case syscall.SOCK_DGRAM: + return sockaddrToUnixgram + case syscall.SOCK_SEQPACKET: + return sockaddrToUnixpacket + } + } + return func(syscall.Sockaddr) Addr { return nil } +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_aix.go b/platform/dbops/binaries/go/go/src/net/sockopt_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..7729a4470b6d4f1311f543752f99c1692bf1913f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_aix.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "syscall" +) + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + if (sotype == syscall.SOCK_DGRAM || sotype == syscall.SOCK_RAW) && family != syscall.AF_UNIX { + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) + } + return nil +} + +func setDefaultListenerSockopts(s int) error { + // Allow reuse of recently-used addresses. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} + +func setDefaultMulticastSockopts(s int) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + if err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1); err != nil { + return os.NewSyscallError("setsockopt", err) + } + // Allow reuse of recently-used ports. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_bsd.go b/platform/dbops/binaries/go/go/src/net/sockopt_bsd.go new file mode 100644 index 0000000000000000000000000000000000000000..ff998119808e48d67a2bfbdd8116f65768133d9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd + +package net + +import ( + "os" + "runtime" + "syscall" +) + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + if runtime.GOOS == "dragonfly" && sotype != syscall.SOCK_RAW { + // On DragonFly BSD, we adjust the ephemeral port + // range because unlike other BSD systems its default + // port range doesn't conform to IANA recommendation + // as described in RFC 6056 and is pretty narrow. + switch family { + case syscall.AF_INET: + syscall.SetsockoptInt(s, syscall.IPPROTO_IP, syscall.IP_PORTRANGE, syscall.IP_PORTRANGE_HIGH) + case syscall.AF_INET6: + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_PORTRANGE, syscall.IPV6_PORTRANGE_HIGH) + } + } + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW && supportsIPv4map() { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + if (sotype == syscall.SOCK_DGRAM || sotype == syscall.SOCK_RAW) && family != syscall.AF_UNIX { + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) + } + return nil +} + +func setDefaultListenerSockopts(s int) error { + // Allow reuse of recently-used addresses. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} + +func setDefaultMulticastSockopts(s int) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + if err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1); err != nil { + return os.NewSyscallError("setsockopt", err) + } + // Allow reuse of recently-used ports. + // This option is supported only in descendants of 4.4BSD, + // to make an effective multicast application that requires + // quick draw possible. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_fake.go b/platform/dbops/binaries/go/go/src/net/sockopt_fake.go new file mode 100644 index 0000000000000000000000000000000000000000..9d9f7ea951cc4839d851216caf764541182e882f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_fake.go @@ -0,0 +1,46 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package net + +import "syscall" + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + return nil +} + +func setDefaultListenerSockopts(s int) error { + return nil +} + +func setDefaultMulticastSockopts(s int) error { + return nil +} + +func setReadBuffer(fd *netFD, bytes int) error { + if fd.fakeNetFD != nil { + return fd.fakeNetFD.setReadBuffer(bytes) + } + return syscall.ENOPROTOOPT +} + +func setWriteBuffer(fd *netFD, bytes int) error { + if fd.fakeNetFD != nil { + return fd.fakeNetFD.setWriteBuffer(bytes) + } + return syscall.ENOPROTOOPT +} + +func setKeepAlive(fd *netFD, keepalive bool) error { + return syscall.ENOPROTOOPT +} + +func setLinger(fd *netFD, sec int) error { + if fd.fakeNetFD != nil { + return fd.fakeNetFD.setLinger(sec) + } + return syscall.ENOPROTOOPT +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_linux.go b/platform/dbops/binaries/go/go/src/net/sockopt_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..3d544299acf8c133229a1e7b770c4b8d1b4f9fde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_linux.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "syscall" +) + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + if (sotype == syscall.SOCK_DGRAM || sotype == syscall.SOCK_RAW) && family != syscall.AF_UNIX { + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) + } + return nil +} + +func setDefaultListenerSockopts(s int) error { + // Allow reuse of recently-used addresses. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} + +func setDefaultMulticastSockopts(s int) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_plan9.go b/platform/dbops/binaries/go/go/src/net/sockopt_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..02468cda970f367087b2024fe7bc885373dab866 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_plan9.go @@ -0,0 +1,19 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import "syscall" + +func setKeepAlive(fd *netFD, keepalive bool) error { + if keepalive { + _, e := fd.ctl.WriteAt([]byte("keepalive"), 0) + return e + } + return nil +} + +func setLinger(fd *netFD, sec int) error { + return syscall.EPLAN9 +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_posix.go b/platform/dbops/binaries/go/go/src/net/sockopt_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..a380c7719b484c224f161f4aa776972e32fa1851 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_posix.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package net + +import ( + "internal/bytealg" + "runtime" + "syscall" +) + +// Boolean to int. +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func interfaceToIPv4Addr(ifi *Interface) (IP, error) { + if ifi == nil { + return IPv4zero, nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch v := ifa.(type) { + case *IPAddr: + if v.IP.To4() != nil { + return v.IP, nil + } + case *IPNet: + if v.IP.To4() != nil { + return v.IP, nil + } + } + } + return nil, errNoSuchInterface +} + +func setIPv4MreqToInterface(mreq *syscall.IPMreq, ifi *Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch v := ifa.(type) { + case *IPAddr: + if a := v.IP.To4(); a != nil { + copy(mreq.Interface[:], a) + goto done + } + case *IPNet: + if a := v.IP.To4(); a != nil { + copy(mreq.Interface[:], a) + goto done + } + } + } +done: + if bytealg.Equal(mreq.Multiaddr[:], IPv4zero.To4()) { + return errNoSuchMulticastInterface + } + return nil +} + +func setReadBuffer(fd *netFD, bytes int) error { + err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setWriteBuffer(fd *netFD, bytes int) error { + err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setKeepAlive(fd *netFD, keepalive bool) error { + err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, boolint(keepalive)) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setLinger(fd *netFD, sec int) error { + var l syscall.Linger + if sec >= 0 { + l.Onoff = 1 + l.Linger = int32(sec) + } else { + l.Onoff = 0 + l.Linger = 0 + } + err := fd.pfd.SetsockoptLinger(syscall.SOL_SOCKET, syscall.SO_LINGER, &l) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_solaris.go b/platform/dbops/binaries/go/go/src/net/sockopt_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..3d544299acf8c133229a1e7b770c4b8d1b4f9fde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_solaris.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "syscall" +) + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + if (sotype == syscall.SOCK_DGRAM || sotype == syscall.SOCK_RAW) && family != syscall.AF_UNIX { + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) + } + return nil +} + +func setDefaultListenerSockopts(s int) error { + // Allow reuse of recently-used addresses. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} + +func setDefaultMulticastSockopts(s int) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockopt_windows.go b/platform/dbops/binaries/go/go/src/net/sockopt_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..8afaf34514d89211ea1303f3541ffddf3a62df42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockopt_windows.go @@ -0,0 +1,40 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "syscall" +) + +func setDefaultSockopts(s syscall.Handle, family, sotype int, ipv6only bool) error { + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + if (sotype == syscall.SOCK_DGRAM || sotype == syscall.SOCK_RAW) && family != syscall.AF_UNIX && family != syscall.AF_INET6 { + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) + } + return nil +} + +func setDefaultListenerSockopts(s syscall.Handle) error { + // Windows will reuse recently-used addresses by default. + // SO_REUSEADDR should not be used here, as it allows + // a socket to forcibly bind to a port in use by another socket. + // This could lead to a non-deterministic behavior, where + // connection requests over the port cannot be guaranteed + // to be handled by the correct socket. + return nil +} + +func setDefaultMulticastSockopts(s syscall.Handle) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockoptip_bsdvar.go b/platform/dbops/binaries/go/go/src/net/sockoptip_bsdvar.go new file mode 100644 index 0000000000000000000000000000000000000000..3e9ba1ee7867e75746d460ab926055d1e668e35d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockoptip_bsdvar.go @@ -0,0 +1,30 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package net + +import ( + "runtime" + "syscall" +) + +func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error { + ip, err := interfaceToIPv4Addr(ifi) + if err != nil { + return wrapSyscallError("setsockopt", err) + } + var a [4]byte + copy(a[:], ip.To4()) + err = fd.pfd.SetsockoptInet4Addr(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, a) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setIPv4MulticastLoopback(fd *netFD, v bool) error { + err := fd.pfd.SetsockoptByte(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, byte(boolint(v))) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockoptip_linux.go b/platform/dbops/binaries/go/go/src/net/sockoptip_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..bd7d8344258e2418fb8ac8c36463deb19fcec96e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockoptip_linux.go @@ -0,0 +1,27 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "runtime" + "syscall" +) + +func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error { + var v int32 + if ifi != nil { + v = int32(ifi.Index) + } + mreq := &syscall.IPMreqn{Ifindex: v} + err := fd.pfd.SetsockoptIPMreqn(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, mreq) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setIPv4MulticastLoopback(fd *netFD, v bool) error { + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v)) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockoptip_posix.go b/platform/dbops/binaries/go/go/src/net/sockoptip_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..572ea455c09586f02d45d3b23716cf31d59240a9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockoptip_posix.go @@ -0,0 +1,49 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package net + +import ( + "runtime" + "syscall" +) + +func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error { + mreq := &syscall.IPMreq{Multiaddr: [4]byte{ip[0], ip[1], ip[2], ip[3]}} + if err := setIPv4MreqToInterface(mreq, ifi); err != nil { + return err + } + err := fd.pfd.SetsockoptIPMreq(syscall.IPPROTO_IP, syscall.IP_ADD_MEMBERSHIP, mreq) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error { + var v int + if ifi != nil { + v = ifi.Index + } + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_IF, v) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setIPv6MulticastLoopback(fd *netFD, v bool) error { + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_LOOP, boolint(v)) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error { + mreq := &syscall.IPv6Mreq{} + copy(mreq.Multiaddr[:], ip) + if ifi != nil { + mreq.Interface = uint32(ifi.Index) + } + err := fd.pfd.SetsockoptIPv6Mreq(syscall.IPPROTO_IPV6, syscall.IPV6_JOIN_GROUP, mreq) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/sockoptip_stub.go b/platform/dbops/binaries/go/go/src/net/sockoptip_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..23891a865f555a2d564f15198d0deb89401ac5b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockoptip_stub.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package net + +import "syscall" + +func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error { + return syscall.ENOPROTOOPT +} + +func setIPv4MulticastLoopback(fd *netFD, v bool) error { + return syscall.ENOPROTOOPT +} + +func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error { + return syscall.ENOPROTOOPT +} + +func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error { + return syscall.ENOPROTOOPT +} + +func setIPv6MulticastLoopback(fd *netFD, v bool) error { + return syscall.ENOPROTOOPT +} + +func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error { + return syscall.ENOPROTOOPT +} diff --git a/platform/dbops/binaries/go/go/src/net/sockoptip_windows.go b/platform/dbops/binaries/go/go/src/net/sockoptip_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..9dfa37c51e974c97811f857301c9bdf7bf227f79 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sockoptip_windows.go @@ -0,0 +1,29 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "runtime" + "syscall" +) + +func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error { + ip, err := interfaceToIPv4Addr(ifi) + if err != nil { + return os.NewSyscallError("setsockopt", err) + } + var a [4]byte + copy(a[:], ip.To4()) + err = fd.pfd.SetsockoptInet4Addr(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, a) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setIPv4MulticastLoopback(fd *netFD, v bool) error { + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v)) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/splice_linux.go b/platform/dbops/binaries/go/go/src/net/splice_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..bdafcb59ab84802ec6e703ed625ea130b15a42f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/splice_linux.go @@ -0,0 +1,62 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/poll" + "io" +) + +// spliceFrom transfers data from r to c using the splice system call to minimize +// copies from and to userspace. c must be a TCP connection. +// Currently, spliceFrom is only enabled if r is a TCP or a stream-oriented Unix connection. +// +// If spliceFrom returns handled == false, it has performed no work. +func spliceFrom(c *netFD, r io.Reader) (written int64, err error, handled bool) { + var remain int64 = 1<<63 - 1 // by default, copy until EOF + lr, ok := r.(*io.LimitedReader) + if ok { + remain, r = lr.N, lr.R + if remain <= 0 { + return 0, nil, true + } + } + + var s *netFD + switch v := r.(type) { + case *TCPConn: + s = v.fd + case tcpConnWithoutWriteTo: + s = v.fd + case *UnixConn: + if v.fd.net != "unix" { + return 0, nil, false + } + s = v.fd + default: + return 0, nil, false + } + + written, handled, sc, err := poll.Splice(&c.pfd, &s.pfd, remain) + if lr != nil { + lr.N -= written + } + return written, wrapSyscallError(sc, err), handled +} + +// spliceTo transfers data from c to w using the splice system call to minimize +// copies from and to userspace. c must be a TCP connection. +// Currently, spliceTo is only enabled if w is a stream-oriented Unix connection. +// +// If spliceTo returns handled == false, it has performed no work. +func spliceTo(w io.Writer, c *netFD) (written int64, err error, handled bool) { + uc, ok := w.(*UnixConn) + if !ok || uc.fd.net != "unix" { + return + } + + written, handled, sc, err := poll.Splice(&uc.fd.pfd, &c.pfd, 1<<63-1) + return written, wrapSyscallError(sc, err), handled +} diff --git a/platform/dbops/binaries/go/go/src/net/splice_stub.go b/platform/dbops/binaries/go/go/src/net/splice_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..239227ff88397db25b2f8033947b8948d62539be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/splice_stub.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package net + +import "io" + +func spliceFrom(_ *netFD, _ io.Reader) (int64, error, bool) { + return 0, nil, false +} + +func spliceTo(_ io.Writer, _ *netFD) (int64, error, bool) { + return 0, nil, false +} diff --git a/platform/dbops/binaries/go/go/src/net/splice_test.go b/platform/dbops/binaries/go/go/src/net/splice_test.go new file mode 100644 index 0000000000000000000000000000000000000000..227ddebff402cea3a8ece694d44b68e91cea02f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/splice_test.go @@ -0,0 +1,541 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux + +package net + +import ( + "io" + "log" + "os" + "os/exec" + "strconv" + "sync" + "testing" + "time" +) + +func TestSplice(t *testing.T) { + t.Run("tcp-to-tcp", func(t *testing.T) { testSplice(t, "tcp", "tcp") }) + if !testableNetwork("unixgram") { + t.Skip("skipping unix-to-tcp tests") + } + t.Run("unix-to-tcp", func(t *testing.T) { testSplice(t, "unix", "tcp") }) + t.Run("tcp-to-unix", func(t *testing.T) { testSplice(t, "tcp", "unix") }) + t.Run("tcp-to-file", func(t *testing.T) { testSpliceToFile(t, "tcp", "file") }) + t.Run("unix-to-file", func(t *testing.T) { testSpliceToFile(t, "unix", "file") }) + t.Run("no-unixpacket", testSpliceNoUnixpacket) + t.Run("no-unixgram", testSpliceNoUnixgram) +} + +func testSpliceToFile(t *testing.T, upNet, downNet string) { + t.Run("simple", spliceTestCase{upNet, downNet, 128, 128, 0}.testFile) + t.Run("multipleWrite", spliceTestCase{upNet, downNet, 4096, 1 << 20, 0}.testFile) + t.Run("big", spliceTestCase{upNet, downNet, 5 << 20, 1 << 30, 0}.testFile) + t.Run("honorsLimitedReader", spliceTestCase{upNet, downNet, 4096, 1 << 20, 1 << 10}.testFile) + t.Run("updatesLimitedReaderN", spliceTestCase{upNet, downNet, 1024, 4096, 4096 + 100}.testFile) + t.Run("limitedReaderAtLimit", spliceTestCase{upNet, downNet, 32, 128, 128}.testFile) +} + +func testSplice(t *testing.T, upNet, downNet string) { + t.Run("simple", spliceTestCase{upNet, downNet, 128, 128, 0}.test) + t.Run("multipleWrite", spliceTestCase{upNet, downNet, 4096, 1 << 20, 0}.test) + t.Run("big", spliceTestCase{upNet, downNet, 5 << 20, 1 << 30, 0}.test) + t.Run("honorsLimitedReader", spliceTestCase{upNet, downNet, 4096, 1 << 20, 1 << 10}.test) + t.Run("updatesLimitedReaderN", spliceTestCase{upNet, downNet, 1024, 4096, 4096 + 100}.test) + t.Run("limitedReaderAtLimit", spliceTestCase{upNet, downNet, 32, 128, 128}.test) + t.Run("readerAtEOF", func(t *testing.T) { testSpliceReaderAtEOF(t, upNet, downNet) }) + t.Run("issue25985", func(t *testing.T) { testSpliceIssue25985(t, upNet, downNet) }) +} + +type spliceTestCase struct { + upNet, downNet string + + chunkSize, totalSize int + limitReadSize int +} + +func (tc spliceTestCase) test(t *testing.T) { + clientUp, serverUp := spliceTestSocketPair(t, tc.upNet) + defer serverUp.Close() + cleanup, err := startSpliceClient(clientUp, "w", tc.chunkSize, tc.totalSize) + if err != nil { + t.Fatal(err) + } + defer cleanup() + clientDown, serverDown := spliceTestSocketPair(t, tc.downNet) + defer serverDown.Close() + cleanup, err = startSpliceClient(clientDown, "r", tc.chunkSize, tc.totalSize) + if err != nil { + t.Fatal(err) + } + defer cleanup() + var ( + r io.Reader = serverUp + size = tc.totalSize + ) + if tc.limitReadSize > 0 { + if tc.limitReadSize < size { + size = tc.limitReadSize + } + + r = &io.LimitedReader{ + N: int64(tc.limitReadSize), + R: serverUp, + } + defer serverUp.Close() + } + n, err := io.Copy(serverDown, r) + serverDown.Close() + if err != nil { + t.Fatal(err) + } + if want := int64(size); want != n { + t.Errorf("want %d bytes spliced, got %d", want, n) + } + + if tc.limitReadSize > 0 { + wantN := 0 + if tc.limitReadSize > size { + wantN = tc.limitReadSize - size + } + + if n := r.(*io.LimitedReader).N; n != int64(wantN) { + t.Errorf("r.N = %d, want %d", n, wantN) + } + } +} + +func (tc spliceTestCase) testFile(t *testing.T) { + f, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + client, server := spliceTestSocketPair(t, tc.upNet) + defer server.Close() + + cleanup, err := startSpliceClient(client, "w", tc.chunkSize, tc.totalSize) + if err != nil { + client.Close() + t.Fatal("failed to start splice client:", err) + } + defer cleanup() + + var ( + r io.Reader = server + actualSize = tc.totalSize + ) + if tc.limitReadSize > 0 { + if tc.limitReadSize < actualSize { + actualSize = tc.limitReadSize + } + + r = &io.LimitedReader{ + N: int64(tc.limitReadSize), + R: r, + } + } + + got, err := io.Copy(f, r) + if err != nil { + t.Fatalf("failed to ReadFrom with error: %v", err) + } + if want := int64(actualSize); got != want { + t.Errorf("got %d bytes, want %d", got, want) + } + if tc.limitReadSize > 0 { + wantN := 0 + if tc.limitReadSize > actualSize { + wantN = tc.limitReadSize - actualSize + } + + if gotN := r.(*io.LimitedReader).N; gotN != int64(wantN) { + t.Errorf("r.N = %d, want %d", gotN, wantN) + } + } +} + +func testSpliceReaderAtEOF(t *testing.T, upNet, downNet string) { + // UnixConn doesn't implement io.ReaderFrom, which will fail + // the following test in asserting a UnixConn to be an io.ReaderFrom, + // so skip this test. + if upNet == "unix" || downNet == "unix" { + t.Skip("skipping test on unix socket") + } + + clientUp, serverUp := spliceTestSocketPair(t, upNet) + defer clientUp.Close() + clientDown, serverDown := spliceTestSocketPair(t, downNet) + defer clientDown.Close() + + serverUp.Close() + + // We'd like to call net.spliceFrom here and check the handled return + // value, but we disable splice on old Linux kernels. + // + // In that case, poll.Splice and net.spliceFrom return a non-nil error + // and handled == false. We'd ideally like to see handled == true + // because the source reader is at EOF, but if we're running on an old + // kernel, and splice is disabled, we won't see EOF from net.spliceFrom, + // because we won't touch the reader at all. + // + // Trying to untangle the errors from net.spliceFrom and match them + // against the errors created by the poll package would be brittle, + // so this is a higher level test. + // + // The following ReadFrom should return immediately, regardless of + // whether splice is disabled or not. The other side should then + // get a goodbye signal. Test for the goodbye signal. + msg := "bye" + go func() { + serverDown.(io.ReaderFrom).ReadFrom(serverUp) + io.WriteString(serverDown, msg) + serverDown.Close() + }() + + buf := make([]byte, 3) + _, err := io.ReadFull(clientDown, buf) + if err != nil { + t.Errorf("clientDown: %v", err) + } + if string(buf) != msg { + t.Errorf("clientDown got %q, want %q", buf, msg) + } +} + +func testSpliceIssue25985(t *testing.T, upNet, downNet string) { + front := newLocalListener(t, upNet) + defer front.Close() + back := newLocalListener(t, downNet) + defer back.Close() + + var wg sync.WaitGroup + wg.Add(2) + + proxy := func() { + src, err := front.Accept() + if err != nil { + return + } + dst, err := Dial(downNet, back.Addr().String()) + if err != nil { + return + } + defer dst.Close() + defer src.Close() + go func() { + io.Copy(src, dst) + wg.Done() + }() + go func() { + io.Copy(dst, src) + wg.Done() + }() + } + + go proxy() + + toFront, err := Dial(upNet, front.Addr().String()) + if err != nil { + t.Fatal(err) + } + + io.WriteString(toFront, "foo") + toFront.Close() + + fromProxy, err := back.Accept() + if err != nil { + t.Fatal(err) + } + defer fromProxy.Close() + + _, err = io.ReadAll(fromProxy) + if err != nil { + t.Fatal(err) + } + + wg.Wait() +} + +func testSpliceNoUnixpacket(t *testing.T) { + clientUp, serverUp := spliceTestSocketPair(t, "unixpacket") + defer clientUp.Close() + defer serverUp.Close() + clientDown, serverDown := spliceTestSocketPair(t, "tcp") + defer clientDown.Close() + defer serverDown.Close() + // If splice called poll.Splice here, we'd get err == syscall.EINVAL + // and handled == false. If poll.Splice gets an EINVAL on the first + // try, it assumes the kernel it's running on doesn't support splice + // for unix sockets and returns handled == false. This works for our + // purposes by somewhat of an accident, but is not entirely correct. + // + // What we want is err == nil and handled == false, i.e. we never + // called poll.Splice, because we know the unix socket's network. + _, err, handled := spliceFrom(serverDown.(*TCPConn).fd, serverUp) + if err != nil || handled != false { + t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled) + } +} + +func testSpliceNoUnixgram(t *testing.T) { + addr, err := ResolveUnixAddr("unixgram", testUnixAddr(t)) + if err != nil { + t.Fatal(err) + } + defer os.Remove(addr.Name) + up, err := ListenUnixgram("unixgram", addr) + if err != nil { + t.Fatal(err) + } + defer up.Close() + clientDown, serverDown := spliceTestSocketPair(t, "tcp") + defer clientDown.Close() + defer serverDown.Close() + // Analogous to testSpliceNoUnixpacket. + _, err, handled := spliceFrom(serverDown.(*TCPConn).fd, up) + if err != nil || handled != false { + t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled) + } +} + +func BenchmarkSplice(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + b.Run("tcp-to-tcp", func(b *testing.B) { benchSplice(b, "tcp", "tcp") }) + b.Run("unix-to-tcp", func(b *testing.B) { benchSplice(b, "unix", "tcp") }) + b.Run("tcp-to-unix", func(b *testing.B) { benchSplice(b, "tcp", "unix") }) +} + +func benchSplice(b *testing.B, upNet, downNet string) { + for i := 0; i <= 10; i++ { + chunkSize := 1 << uint(i+10) + tc := spliceTestCase{ + upNet: upNet, + downNet: downNet, + chunkSize: chunkSize, + } + + b.Run(strconv.Itoa(chunkSize), tc.bench) + } +} + +func (tc spliceTestCase) bench(b *testing.B) { + // To benchmark the genericReadFrom code path, set this to false. + useSplice := true + + clientUp, serverUp := spliceTestSocketPair(b, tc.upNet) + defer serverUp.Close() + + cleanup, err := startSpliceClient(clientUp, "w", tc.chunkSize, tc.chunkSize*b.N) + if err != nil { + b.Fatal(err) + } + defer cleanup() + + clientDown, serverDown := spliceTestSocketPair(b, tc.downNet) + defer serverDown.Close() + + cleanup, err = startSpliceClient(clientDown, "r", tc.chunkSize, tc.chunkSize*b.N) + if err != nil { + b.Fatal(err) + } + defer cleanup() + + b.SetBytes(int64(tc.chunkSize)) + b.ResetTimer() + + if useSplice { + _, err := io.Copy(serverDown, serverUp) + if err != nil { + b.Fatal(err) + } + } else { + type onlyReader struct { + io.Reader + } + _, err := io.Copy(serverDown, onlyReader{serverUp}) + if err != nil { + b.Fatal(err) + } + } +} + +func spliceTestSocketPair(t testing.TB, net string) (client, server Conn) { + t.Helper() + ln := newLocalListener(t, net) + defer ln.Close() + var cerr, serr error + acceptDone := make(chan struct{}) + go func() { + server, serr = ln.Accept() + acceptDone <- struct{}{} + }() + client, cerr = Dial(ln.Addr().Network(), ln.Addr().String()) + <-acceptDone + if cerr != nil { + if server != nil { + server.Close() + } + t.Fatal(cerr) + } + if serr != nil { + if client != nil { + client.Close() + } + t.Fatal(serr) + } + return client, server +} + +func startSpliceClient(conn Conn, op string, chunkSize, totalSize int) (func(), error) { + f, err := conn.(interface{ File() (*os.File, error) }).File() + if err != nil { + return nil, err + } + + cmd := exec.Command(os.Args[0], os.Args[1:]...) + cmd.Env = []string{ + "GO_NET_TEST_SPLICE=1", + "GO_NET_TEST_SPLICE_OP=" + op, + "GO_NET_TEST_SPLICE_CHUNK_SIZE=" + strconv.Itoa(chunkSize), + "GO_NET_TEST_SPLICE_TOTAL_SIZE=" + strconv.Itoa(totalSize), + "TMPDIR=" + os.Getenv("TMPDIR"), + } + cmd.ExtraFiles = append(cmd.ExtraFiles, f) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + + donec := make(chan struct{}) + go func() { + cmd.Wait() + conn.Close() + f.Close() + close(donec) + }() + + return func() { + select { + case <-donec: + case <-time.After(5 * time.Second): + log.Printf("killing splice client after 5 second shutdown timeout") + cmd.Process.Kill() + select { + case <-donec: + case <-time.After(5 * time.Second): + log.Printf("splice client didn't die after 10 seconds") + } + } + }, nil +} + +func init() { + if os.Getenv("GO_NET_TEST_SPLICE") == "" { + return + } + defer os.Exit(0) + + f := os.NewFile(uintptr(3), "splice-test-conn") + defer f.Close() + + conn, err := FileConn(f) + if err != nil { + log.Fatal(err) + } + + var chunkSize int + if chunkSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_SPLICE_CHUNK_SIZE")); err != nil { + log.Fatal(err) + } + buf := make([]byte, chunkSize) + + var totalSize int + if totalSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_SPLICE_TOTAL_SIZE")); err != nil { + log.Fatal(err) + } + + var fn func([]byte) (int, error) + switch op := os.Getenv("GO_NET_TEST_SPLICE_OP"); op { + case "r": + fn = conn.Read + case "w": + defer conn.Close() + + fn = conn.Write + default: + log.Fatalf("unknown op %q", op) + } + + var n int + for count := 0; count < totalSize; count += n { + if count+chunkSize > totalSize { + buf = buf[:totalSize-count] + } + + var err error + if n, err = fn(buf); err != nil { + return + } + } +} + +func BenchmarkSpliceFile(b *testing.B) { + b.Run("tcp-to-file", func(b *testing.B) { benchmarkSpliceFile(b, "tcp") }) + b.Run("unix-to-file", func(b *testing.B) { benchmarkSpliceFile(b, "unix") }) +} + +func benchmarkSpliceFile(b *testing.B, proto string) { + for i := 0; i <= 10; i++ { + size := 1 << (i + 10) + bench := spliceFileBench{ + proto: proto, + chunkSize: size, + } + b.Run(strconv.Itoa(size), bench.benchSpliceFile) + } +} + +type spliceFileBench struct { + proto string + chunkSize int +} + +func (bench spliceFileBench) benchSpliceFile(b *testing.B) { + f, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0) + if err != nil { + b.Fatal(err) + } + defer f.Close() + + totalSize := b.N * bench.chunkSize + + client, server := spliceTestSocketPair(b, bench.proto) + defer server.Close() + + cleanup, err := startSpliceClient(client, "w", bench.chunkSize, totalSize) + if err != nil { + client.Close() + b.Fatalf("failed to start splice client: %v", err) + } + defer cleanup() + + b.ReportAllocs() + b.SetBytes(int64(bench.chunkSize)) + b.ResetTimer() + + got, err := io.Copy(f, server) + if err != nil { + b.Fatalf("failed to ReadFrom with error: %v", err) + } + if want := int64(totalSize); got != want { + b.Errorf("bytes sent mismatch, got: %d, want: %d", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/sys_cloexec.go b/platform/dbops/binaries/go/go/src/net/sys_cloexec.go new file mode 100644 index 0000000000000000000000000000000000000000..6e61d40c1925314751e531999cd45a53b9ee9ebf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/sys_cloexec.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements sysSocket for platforms that do not provide a fast path +// for setting SetNonblock and CloseOnExec. + +//go:build aix || darwin + +package net + +import ( + "internal/poll" + "os" + "syscall" +) + +// Wrapper around the socket system call that marks the returned file +// descriptor as nonblocking and close-on-exec. +func sysSocket(family, sotype, proto int) (int, error) { + // See ../syscall/exec_unix.go for description of ForkLock. + syscall.ForkLock.RLock() + s, err := socketFunc(family, sotype, proto) + if err == nil { + syscall.CloseOnExec(s) + } + syscall.ForkLock.RUnlock() + if err != nil { + return -1, os.NewSyscallError("socket", err) + } + if err = syscall.SetNonblock(s, true); err != nil { + poll.CloseFunc(s) + return -1, os.NewSyscallError("setnonblock", err) + } + return s, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsock.go b/platform/dbops/binaries/go/go/src/net/tcpsock.go new file mode 100644 index 0000000000000000000000000000000000000000..590516bff13034c8eb9f960cfb6e43d74664c5ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsock.go @@ -0,0 +1,407 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "internal/itoa" + "io" + "net/netip" + "os" + "syscall" + "time" +) + +// BUG(mikio): On JS and Windows, the File method of TCPConn and +// TCPListener is not implemented. + +// TCPAddr represents the address of a TCP end point. +type TCPAddr struct { + IP IP + Port int + Zone string // IPv6 scoped addressing zone +} + +// AddrPort returns the [TCPAddr] a as a [netip.AddrPort]. +// +// If a.Port does not fit in a uint16, it's silently truncated. +// +// If a is nil, a zero value is returned. +func (a *TCPAddr) AddrPort() netip.AddrPort { + if a == nil { + return netip.AddrPort{} + } + na, _ := netip.AddrFromSlice(a.IP) + na = na.WithZone(a.Zone) + return netip.AddrPortFrom(na, uint16(a.Port)) +} + +// Network returns the address's network name, "tcp". +func (a *TCPAddr) Network() string { return "tcp" } + +func (a *TCPAddr) String() string { + if a == nil { + return "" + } + ip := ipEmptyString(a.IP) + if a.Zone != "" { + return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port)) + } + return JoinHostPort(ip, itoa.Itoa(a.Port)) +} + +func (a *TCPAddr) isWildcard() bool { + if a == nil || a.IP == nil { + return true + } + return a.IP.IsUnspecified() +} + +func (a *TCPAddr) opAddr() Addr { + if a == nil { + return nil + } + return a +} + +// ResolveTCPAddr returns an address of TCP end point. +// +// The network must be a TCP network name. +// +// If the host in the address parameter is not a literal IP address or +// the port is not a literal port number, ResolveTCPAddr resolves the +// address to an address of TCP end point. +// Otherwise, it parses the address as a pair of literal IP address +// and port number. +// The address parameter can use a host name, but this is not +// recommended, because it will return at most one of the host name's +// IP addresses. +// +// See func [Dial] for a description of the network and address +// parameters. +func ResolveTCPAddr(network, address string) (*TCPAddr, error) { + switch network { + case "tcp", "tcp4", "tcp6": + case "": // a hint wildcard for Go 1.0 undocumented behavior + network = "tcp" + default: + return nil, UnknownNetworkError(network) + } + addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address) + if err != nil { + return nil, err + } + return addrs.forResolve(network, address).(*TCPAddr), nil +} + +// TCPAddrFromAddrPort returns addr as a [TCPAddr]. If addr.IsValid() is false, +// then the returned TCPAddr will contain a nil IP field, indicating an +// address family-agnostic unspecified address. +func TCPAddrFromAddrPort(addr netip.AddrPort) *TCPAddr { + return &TCPAddr{ + IP: addr.Addr().AsSlice(), + Zone: addr.Addr().Zone(), + Port: int(addr.Port()), + } +} + +// TCPConn is an implementation of the [Conn] interface for TCP network +// connections. +type TCPConn struct { + conn +} + +// SyscallConn returns a raw network connection. +// This implements the [syscall.Conn] interface. +func (c *TCPConn) SyscallConn() (syscall.RawConn, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + return newRawConn(c.fd), nil +} + +// ReadFrom implements the [io.ReaderFrom] ReadFrom method. +func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.readFrom(r) + if err != nil && err != io.EOF { + err = &OpError{Op: "readfrom", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, err +} + +// WriteTo implements the io.WriterTo WriteTo method. +func (c *TCPConn) WriteTo(w io.Writer) (int64, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeTo(w) + if err != nil && err != io.EOF { + err = &OpError{Op: "writeto", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, err +} + +// CloseRead shuts down the reading side of the TCP connection. +// Most callers should just use Close. +func (c *TCPConn) CloseRead() error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.closeRead(); err != nil { + return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// CloseWrite shuts down the writing side of the TCP connection. +// Most callers should just use Close. +func (c *TCPConn) CloseWrite() error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.closeWrite(); err != nil { + return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// SetLinger sets the behavior of Close on a connection which still +// has data waiting to be sent or to be acknowledged. +// +// If sec < 0 (the default), the operating system finishes sending the +// data in the background. +// +// If sec == 0, the operating system discards any unsent or +// unacknowledged data. +// +// If sec > 0, the data is sent in the background as with sec < 0. +// On some operating systems including Linux, this may cause Close to block +// until all data has been sent or discarded. +// On some operating systems after sec seconds have elapsed any remaining +// unsent data may be discarded. +func (c *TCPConn) SetLinger(sec int) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setLinger(c.fd, sec); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// SetKeepAlive sets whether the operating system should send +// keep-alive messages on the connection. +func (c *TCPConn) SetKeepAlive(keepalive bool) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setKeepAlive(c.fd, keepalive); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// SetKeepAlivePeriod sets period between keep-alives. +func (c *TCPConn) SetKeepAlivePeriod(d time.Duration) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setKeepAlivePeriod(c.fd, d); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// SetNoDelay controls whether the operating system should delay +// packet transmission in hopes of sending fewer packets (Nagle's +// algorithm). The default is true (no delay), meaning that data is +// sent as soon as possible after a Write. +func (c *TCPConn) SetNoDelay(noDelay bool) error { + if !c.ok() { + return syscall.EINVAL + } + if err := setNoDelay(c.fd, noDelay); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// MultipathTCP reports whether the ongoing connection is using MPTCP. +// +// If Multipath TCP is not supported by the host, by the other peer or +// intentionally / accidentally filtered out by a device in between, a +// fallback to TCP will be done. This method does its best to check if +// MPTCP is still being used or not. +// +// On Linux, more conditions are verified on kernels >= v5.16, improving +// the results. +func (c *TCPConn) MultipathTCP() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + return isUsingMultipathTCP(c.fd), nil +} + +func newTCPConn(fd *netFD, keepAlive time.Duration, keepAliveHook func(time.Duration)) *TCPConn { + setNoDelay(fd, true) + if keepAlive == 0 { + keepAlive = defaultTCPKeepAlive + } + if keepAlive > 0 { + setKeepAlive(fd, true) + setKeepAlivePeriod(fd, keepAlive) + if keepAliveHook != nil { + keepAliveHook(keepAlive) + } + } + return &TCPConn{conn{fd}} +} + +// DialTCP acts like [Dial] for TCP networks. +// +// The network must be a TCP network name; see func Dial for details. +// +// If laddr is nil, a local address is automatically chosen. +// If the IP field of raddr is nil or an unspecified IP address, the +// local system is assumed. +func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) { + switch network { + case "tcp", "tcp4", "tcp6": + default: + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)} + } + if raddr == nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress} + } + sd := &sysDialer{network: network, address: raddr.String()} + c, err := sd.dialTCP(context.Background(), laddr, raddr) + if err != nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err} + } + return c, nil +} + +// TCPListener is a TCP network listener. Clients should typically +// use variables of type [Listener] instead of assuming TCP. +type TCPListener struct { + fd *netFD + lc ListenConfig +} + +// SyscallConn returns a raw network connection. +// This implements the [syscall.Conn] interface. +// +// The returned RawConn only supports calling Control. Read and +// Write return an error. +func (l *TCPListener) SyscallConn() (syscall.RawConn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + return newRawListener(l.fd), nil +} + +// AcceptTCP accepts the next incoming call and returns the new +// connection. +func (l *TCPListener) AcceptTCP() (*TCPConn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + c, err := l.accept() + if err != nil { + return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return c, nil +} + +// Accept implements the Accept method in the [Listener] interface; it +// waits for the next call and returns a generic [Conn]. +func (l *TCPListener) Accept() (Conn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + c, err := l.accept() + if err != nil { + return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return c, nil +} + +// Close stops listening on the TCP address. +// Already Accepted connections are not closed. +func (l *TCPListener) Close() error { + if !l.ok() { + return syscall.EINVAL + } + if err := l.close(); err != nil { + return &OpError{Op: "close", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return nil +} + +// Addr returns the listener's network address, a [*TCPAddr]. +// The Addr returned is shared by all invocations of Addr, so +// do not modify it. +func (l *TCPListener) Addr() Addr { return l.fd.laddr } + +// SetDeadline sets the deadline associated with the listener. +// A zero time value disables the deadline. +func (l *TCPListener) SetDeadline(t time.Time) error { + if !l.ok() { + return syscall.EINVAL + } + return l.fd.SetDeadline(t) +} + +// File returns a copy of the underlying [os.File]. +// It is the caller's responsibility to close f when finished. +// Closing l does not affect f, and closing f does not affect l. +// +// The returned os.File's file descriptor is different from the +// connection's. Attempting to change properties of the original +// using this duplicate may or may not have the desired effect. +func (l *TCPListener) File() (f *os.File, err error) { + if !l.ok() { + return nil, syscall.EINVAL + } + f, err = l.file() + if err != nil { + return nil, &OpError{Op: "file", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return +} + +// ListenTCP acts like [Listen] for TCP networks. +// +// The network must be a TCP network name; see func Dial for details. +// +// If the IP field of laddr is nil or an unspecified IP address, +// ListenTCP listens on all available unicast and anycast IP addresses +// of the local system. +// If the Port field of laddr is 0, a port number is automatically +// chosen. +func ListenTCP(network string, laddr *TCPAddr) (*TCPListener, error) { + switch network { + case "tcp", "tcp4", "tcp6": + default: + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)} + } + if laddr == nil { + laddr = &TCPAddr{} + } + sl := &sysListener{network: network, address: laddr.String()} + ln, err := sl.listenTCP(context.Background(), laddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err} + } + return ln, nil +} + +// roundDurationUp rounds d to the next multiple of to. +func roundDurationUp(d time.Duration, to time.Duration) time.Duration { + return (d + to - 1) / to +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsock_plan9.go b/platform/dbops/binaries/go/go/src/net/tcpsock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..463dedcf44cdedf424edbf8c88c6ff3fbbed21ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsock_plan9.go @@ -0,0 +1,90 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "io" + "os" +) + +func (c *TCPConn) readFrom(r io.Reader) (int64, error) { + return genericReadFrom(c, r) +} + +func (c *TCPConn) writeTo(w io.Writer) (int64, error) { + return genericWriteTo(c, w) +} + +func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + if h := sd.testHookDialTCP; h != nil { + return h(ctx, sd.network, laddr, raddr) + } + if h := testHookDialTCP; h != nil { + return h(ctx, sd.network, laddr, raddr) + } + return sd.doDialTCP(ctx, laddr, raddr) +} + +func (sd *sysDialer) doDialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + switch sd.network { + case "tcp4": + // Plan 9 doesn't complain about [::]:0->127.0.0.1, so it's up to us. + if laddr != nil && len(laddr.IP) != 0 && laddr.IP.To4() == nil { + return nil, &AddrError{Err: "non-IPv4 local address", Addr: laddr.String()} + } + case "tcp", "tcp6": + default: + return nil, UnknownNetworkError(sd.network) + } + if raddr == nil { + return nil, errMissingAddress + } + fd, err := dialPlan9(ctx, sd.network, laddr, raddr) + if err != nil { + return nil, err + } + return newTCPConn(fd, sd.Dialer.KeepAlive, testHookSetKeepAlive), nil +} + +func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil && ln.fd.ctl != nil } + +func (ln *TCPListener) accept() (*TCPConn, error) { + fd, err := ln.fd.acceptPlan9() + if err != nil { + return nil, err + } + return newTCPConn(fd, ln.lc.KeepAlive, nil), nil +} + +func (ln *TCPListener) close() error { + if err := ln.fd.pfd.Close(); err != nil { + return err + } + if _, err := ln.fd.ctl.WriteString("hangup"); err != nil { + ln.fd.ctl.Close() + return err + } + if err := ln.fd.ctl.Close(); err != nil { + return err + } + return nil +} + +func (ln *TCPListener) file() (*os.File, error) { + f, err := ln.dup() + if err != nil { + return nil, err + } + return f, nil +} + +func (sl *sysListener) listenTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) { + fd, err := listenPlan9(ctx, sl.network, laddr) + if err != nil { + return nil, err + } + return &TCPListener{fd: fd, lc: sl.ListenConfig}, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsock_posix.go b/platform/dbops/binaries/go/go/src/net/tcpsock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..01b5ec9ed0564243952ea36a444962ec250e0e9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsock_posix.go @@ -0,0 +1,194 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "context" + "io" + "os" + "syscall" +) + +func sockaddrToTCP(sa syscall.Sockaddr) Addr { + switch sa := sa.(type) { + case *syscall.SockaddrInet4: + return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port} + case *syscall.SockaddrInet6: + return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))} + } + return nil +} + +func (a *TCPAddr) family() int { + if a == nil || len(a.IP) <= IPv4len { + return syscall.AF_INET + } + if a.IP.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +func (a *TCPAddr) sockaddr(family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return ipToSockaddr(family, a.IP, a.Port, a.Zone) +} + +func (a *TCPAddr) toLocal(net string) sockaddr { + return &TCPAddr{loopbackIP(net), a.Port, a.Zone} +} + +func (c *TCPConn) readFrom(r io.Reader) (int64, error) { + if n, err, handled := spliceFrom(c.fd, r); handled { + return n, err + } + if n, err, handled := sendFile(c.fd, r); handled { + return n, err + } + return genericReadFrom(c, r) +} + +func (c *TCPConn) writeTo(w io.Writer) (int64, error) { + if n, err, handled := spliceTo(w, c.fd); handled { + return n, err + } + return genericWriteTo(c, w) +} + +func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + if h := sd.testHookDialTCP; h != nil { + return h(ctx, sd.network, laddr, raddr) + } + if h := testHookDialTCP; h != nil { + return h(ctx, sd.network, laddr, raddr) + } + return sd.doDialTCP(ctx, laddr, raddr) +} + +func (sd *sysDialer) doDialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { + return sd.doDialTCPProto(ctx, laddr, raddr, 0) +} + +func (sd *sysDialer) doDialTCPProto(ctx context.Context, laddr, raddr *TCPAddr, proto int) (*TCPConn, error) { + ctrlCtxFn := sd.Dialer.ControlContext + if ctrlCtxFn == nil && sd.Dialer.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sd.Dialer.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, proto, "dial", ctrlCtxFn) + + // TCP has a rarely used mechanism called a 'simultaneous connection' in + // which Dial("tcp", addr1, addr2) run on the machine at addr1 can + // connect to a simultaneous Dial("tcp", addr2, addr1) run on the machine + // at addr2, without either machine executing Listen. If laddr == nil, + // it means we want the kernel to pick an appropriate originating local + // address. Some Linux kernels cycle blindly through a fixed range of + // local ports, regardless of destination port. If a kernel happens to + // pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"), + // then the Dial will succeed, having simultaneously connected to itself. + // This can only happen when we are letting the kernel pick a port (laddr == nil) + // and when there is no listener for the destination address. + // It's hard to argue this is anything other than a kernel bug. If we + // see this happen, rather than expose the buggy effect to users, we + // close the fd and try again. If it happens twice more, we relent and + // use the result. See also: + // https://golang.org/issue/2690 + // https://stackoverflow.com/questions/4949858/ + // + // The opposite can also happen: if we ask the kernel to pick an appropriate + // originating local address, sometimes it picks one that is already in use. + // So if the error is EADDRNOTAVAIL, we have to try again too, just for + // a different reason. + // + // The kernel socket code is no doubt enjoying watching us squirm. + for i := 0; i < 2 && (laddr == nil || laddr.Port == 0) && (selfConnect(fd, err) || spuriousENOTAVAIL(err)); i++ { + if err == nil { + fd.Close() + } + fd, err = internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, proto, "dial", ctrlCtxFn) + } + + if err != nil { + return nil, err + } + return newTCPConn(fd, sd.Dialer.KeepAlive, testHookSetKeepAlive), nil +} + +func selfConnect(fd *netFD, err error) bool { + // If the connect failed, we clearly didn't connect to ourselves. + if err != nil { + return false + } + + // The socket constructor can return an fd with raddr nil under certain + // unknown conditions. The errors in the calls there to Getpeername + // are discarded, but we can't catch the problem there because those + // calls are sometimes legally erroneous with a "socket not connected". + // Since this code (selfConnect) is already trying to work around + // a problem, we make sure if this happens we recognize trouble and + // ask the DialTCP routine to try again. + // TODO: try to understand what's really going on. + if fd.laddr == nil || fd.raddr == nil { + return true + } + l := fd.laddr.(*TCPAddr) + r := fd.raddr.(*TCPAddr) + return l.Port == r.Port && l.IP.Equal(r.IP) +} + +func spuriousENOTAVAIL(err error) bool { + if op, ok := err.(*OpError); ok { + err = op.Err + } + if sys, ok := err.(*os.SyscallError); ok { + err = sys.Err + } + return err == syscall.EADDRNOTAVAIL +} + +func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil } + +func (ln *TCPListener) accept() (*TCPConn, error) { + fd, err := ln.fd.accept() + if err != nil { + return nil, err + } + return newTCPConn(fd, ln.lc.KeepAlive, nil), nil +} + +func (ln *TCPListener) close() error { + return ln.fd.Close() +} + +func (ln *TCPListener) file() (*os.File, error) { + f, err := ln.fd.dup() + if err != nil { + return nil, err + } + return f, nil +} + +func (sl *sysListener) listenTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) { + return sl.listenTCPProto(ctx, laddr, 0) +} + +func (sl *sysListener) listenTCPProto(ctx context.Context, laddr *TCPAddr, proto int) (*TCPListener, error) { + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_STREAM, proto, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + return &TCPListener{fd: fd, lc: sl.ListenConfig}, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsock_test.go b/platform/dbops/binaries/go/go/src/net/tcpsock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b37e936ff82e103f000910afbb8243cb8773b383 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsock_test.go @@ -0,0 +1,835 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "fmt" + "internal/testenv" + "io" + "os" + "reflect" + "runtime" + "sync" + "testing" + "time" +) + +func BenchmarkTCP4OneShot(b *testing.B) { + benchmarkTCP(b, false, false, "127.0.0.1:0") +} + +func BenchmarkTCP4OneShotTimeout(b *testing.B) { + benchmarkTCP(b, false, true, "127.0.0.1:0") +} + +func BenchmarkTCP4Persistent(b *testing.B) { + benchmarkTCP(b, true, false, "127.0.0.1:0") +} + +func BenchmarkTCP4PersistentTimeout(b *testing.B) { + benchmarkTCP(b, true, true, "127.0.0.1:0") +} + +func BenchmarkTCP6OneShot(b *testing.B) { + if !supportsIPv6() { + b.Skip("ipv6 is not supported") + } + benchmarkTCP(b, false, false, "[::1]:0") +} + +func BenchmarkTCP6OneShotTimeout(b *testing.B) { + if !supportsIPv6() { + b.Skip("ipv6 is not supported") + } + benchmarkTCP(b, false, true, "[::1]:0") +} + +func BenchmarkTCP6Persistent(b *testing.B) { + if !supportsIPv6() { + b.Skip("ipv6 is not supported") + } + benchmarkTCP(b, true, false, "[::1]:0") +} + +func BenchmarkTCP6PersistentTimeout(b *testing.B) { + if !supportsIPv6() { + b.Skip("ipv6 is not supported") + } + benchmarkTCP(b, true, true, "[::1]:0") +} + +func benchmarkTCP(b *testing.B, persistent, timeout bool, laddr string) { + testHookUninstaller.Do(uninstallTestHooks) + + const msgLen = 512 + conns := b.N + numConcurrent := runtime.GOMAXPROCS(-1) * 2 + msgs := 1 + if persistent { + conns = numConcurrent + msgs = b.N / conns + if msgs == 0 { + msgs = 1 + } + if conns > b.N { + conns = b.N + } + } + sendMsg := func(c Conn, buf []byte) bool { + n, err := c.Write(buf) + if n != len(buf) || err != nil { + b.Log(err) + return false + } + return true + } + recvMsg := func(c Conn, buf []byte) bool { + for read := 0; read != len(buf); { + n, err := c.Read(buf) + read += n + if err != nil { + b.Log(err) + return false + } + } + return true + } + ln, err := Listen("tcp", laddr) + if err != nil { + b.Fatal(err) + } + defer ln.Close() + serverSem := make(chan bool, numConcurrent) + // Acceptor. + go func() { + for { + c, err := ln.Accept() + if err != nil { + break + } + serverSem <- true + // Server connection. + go func(c Conn) { + defer func() { + c.Close() + <-serverSem + }() + if timeout { + c.SetDeadline(time.Now().Add(time.Hour)) // Not intended to fire. + } + var buf [msgLen]byte + for m := 0; m < msgs; m++ { + if !recvMsg(c, buf[:]) || !sendMsg(c, buf[:]) { + break + } + } + }(c) + } + }() + clientSem := make(chan bool, numConcurrent) + for i := 0; i < conns; i++ { + clientSem <- true + // Client connection. + go func() { + defer func() { + <-clientSem + }() + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + b.Log(err) + return + } + defer c.Close() + if timeout { + c.SetDeadline(time.Now().Add(time.Hour)) // Not intended to fire. + } + var buf [msgLen]byte + for m := 0; m < msgs; m++ { + if !sendMsg(c, buf[:]) || !recvMsg(c, buf[:]) { + break + } + } + }() + } + for i := 0; i < numConcurrent; i++ { + clientSem <- true + serverSem <- true + } +} + +func BenchmarkTCP4ConcurrentReadWrite(b *testing.B) { + benchmarkTCPConcurrentReadWrite(b, "127.0.0.1:0") +} + +func BenchmarkTCP6ConcurrentReadWrite(b *testing.B) { + if !supportsIPv6() { + b.Skip("ipv6 is not supported") + } + benchmarkTCPConcurrentReadWrite(b, "[::1]:0") +} + +func benchmarkTCPConcurrentReadWrite(b *testing.B, laddr string) { + testHookUninstaller.Do(uninstallTestHooks) + + // The benchmark creates GOMAXPROCS client/server pairs. + // Each pair creates 4 goroutines: client reader/writer and server reader/writer. + // The benchmark stresses concurrent reading and writing to the same connection. + // Such pattern is used in net/http and net/rpc. + + b.StopTimer() + + P := runtime.GOMAXPROCS(0) + N := b.N / P + W := 1000 + + // Setup P client/server connections. + clients := make([]Conn, P) + servers := make([]Conn, P) + ln, err := Listen("tcp", laddr) + if err != nil { + b.Fatal(err) + } + defer ln.Close() + done := make(chan bool) + go func() { + for p := 0; p < P; p++ { + s, err := ln.Accept() + if err != nil { + b.Error(err) + return + } + servers[p] = s + } + done <- true + }() + for p := 0; p < P; p++ { + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + clients[p] = c + } + <-done + + b.StartTimer() + + var wg sync.WaitGroup + wg.Add(4 * P) + for p := 0; p < P; p++ { + // Client writer. + go func(c Conn) { + defer wg.Done() + var buf [1]byte + for i := 0; i < N; i++ { + v := byte(i) + for w := 0; w < W; w++ { + v *= v + } + buf[0] = v + _, err := c.Write(buf[:]) + if err != nil { + b.Error(err) + return + } + } + }(clients[p]) + + // Pipe between server reader and server writer. + pipe := make(chan byte, 128) + + // Server reader. + go func(s Conn) { + defer wg.Done() + var buf [1]byte + for i := 0; i < N; i++ { + _, err := s.Read(buf[:]) + if err != nil { + b.Error(err) + return + } + pipe <- buf[0] + } + }(servers[p]) + + // Server writer. + go func(s Conn) { + defer wg.Done() + var buf [1]byte + for i := 0; i < N; i++ { + v := <-pipe + for w := 0; w < W; w++ { + v *= v + } + buf[0] = v + _, err := s.Write(buf[:]) + if err != nil { + b.Error(err) + return + } + } + s.Close() + }(servers[p]) + + // Client reader. + go func(c Conn) { + defer wg.Done() + var buf [1]byte + for i := 0; i < N; i++ { + _, err := c.Read(buf[:]) + if err != nil { + b.Error(err) + return + } + } + c.Close() + }(clients[p]) + } + wg.Wait() +} + +type resolveTCPAddrTest struct { + network string + litAddrOrName string + addr *TCPAddr + err error +} + +var resolveTCPAddrTests = []resolveTCPAddrTest{ + {"tcp", "127.0.0.1:0", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 0}, nil}, + {"tcp4", "127.0.0.1:65535", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 65535}, nil}, + + {"tcp", "[::1]:0", &TCPAddr{IP: ParseIP("::1"), Port: 0}, nil}, + {"tcp6", "[::1]:65535", &TCPAddr{IP: ParseIP("::1"), Port: 65535}, nil}, + + {"tcp", "[::1%en0]:1", &TCPAddr{IP: ParseIP("::1"), Port: 1, Zone: "en0"}, nil}, + {"tcp6", "[::1%911]:2", &TCPAddr{IP: ParseIP("::1"), Port: 2, Zone: "911"}, nil}, + + {"", "127.0.0.1:0", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 0}, nil}, // Go 1.0 behavior + {"", "[::1]:0", &TCPAddr{IP: ParseIP("::1"), Port: 0}, nil}, // Go 1.0 behavior + + {"tcp", ":12345", &TCPAddr{Port: 12345}, nil}, + + {"http", "127.0.0.1:0", nil, UnknownNetworkError("http")}, + + {"tcp", "127.0.0.1:http", &TCPAddr{IP: ParseIP("127.0.0.1"), Port: 80}, nil}, + {"tcp", "[::ffff:127.0.0.1]:http", &TCPAddr{IP: ParseIP("::ffff:127.0.0.1"), Port: 80}, nil}, + {"tcp", "[2001:db8::1]:http", &TCPAddr{IP: ParseIP("2001:db8::1"), Port: 80}, nil}, + {"tcp4", "127.0.0.1:http", &TCPAddr{IP: ParseIP("127.0.0.1"), Port: 80}, nil}, + {"tcp4", "[::ffff:127.0.0.1]:http", &TCPAddr{IP: ParseIP("127.0.0.1"), Port: 80}, nil}, + {"tcp6", "[2001:db8::1]:http", &TCPAddr{IP: ParseIP("2001:db8::1"), Port: 80}, nil}, + + {"tcp4", "[2001:db8::1]:http", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "2001:db8::1"}}, + {"tcp6", "127.0.0.1:http", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "127.0.0.1"}}, + {"tcp6", "[::ffff:127.0.0.1]:http", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "::ffff:127.0.0.1"}}, +} + +func TestResolveTCPAddr(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + testHookLookupIP = lookupLocalhost + + for _, tt := range resolveTCPAddrTests { + addr, err := ResolveTCPAddr(tt.network, tt.litAddrOrName) + if !reflect.DeepEqual(addr, tt.addr) || !reflect.DeepEqual(err, tt.err) { + t.Errorf("ResolveTCPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr, err, tt.addr, tt.err) + continue + } + if err == nil { + addr2, err := ResolveTCPAddr(addr.Network(), addr.String()) + if !reflect.DeepEqual(addr2, tt.addr) || err != tt.err { + t.Errorf("(%q, %q): ResolveTCPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr.Network(), addr.String(), addr2, err, tt.addr, tt.err) + } + } + } +} + +var tcpListenerNameTests = []struct { + net string + laddr *TCPAddr +}{ + {"tcp4", &TCPAddr{IP: IPv4(127, 0, 0, 1)}}, + {"tcp4", &TCPAddr{}}, + {"tcp4", nil}, +} + +func TestTCPListenerName(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + for _, tt := range tcpListenerNameTests { + ln, err := ListenTCP(tt.net, tt.laddr) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + la := ln.Addr() + if a, ok := la.(*TCPAddr); !ok || a.Port == 0 { + t.Fatalf("got %v; expected a proper address with non-zero port number", la) + } + } +} + +func TestIPv6LinkLocalUnicastTCP(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + if !supportsIPv6() { + t.Skip("IPv6 is not supported") + } + + for i, tt := range ipv6LinkLocalUnicastTCPTests { + ln, err := Listen(tt.network, tt.address) + if err != nil { + // It might return "LookupHost returned no + // suitable address" error on some platforms. + t.Log(err) + continue + } + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + ch := make(chan error, 1) + handler := func(ls *localServer, ln Listener) { ls.transponder(ln, ch) } + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + if la, ok := ln.Addr().(*TCPAddr); !ok || !tt.nameLookup && la.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", la) + } + + c, err := Dial(tt.network, ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + if la, ok := c.LocalAddr().(*TCPAddr); !ok || !tt.nameLookup && la.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", la) + } + if ra, ok := c.RemoteAddr().(*TCPAddr); !ok || !tt.nameLookup && ra.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", ra) + } + + if _, err := c.Write([]byte("TCP OVER IPV6 LINKLOCAL TEST")); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + if _, err := c.Read(b); err != nil { + t.Fatal(err) + } + + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } +} + +func TestTCPConcurrentAccept(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + ln, err := Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + const N = 10 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + for { + c, err := ln.Accept() + if err != nil { + break + } + c.Close() + } + wg.Done() + }() + } + attempts := 10 * N + fails := 0 + d := &Dialer{Timeout: 200 * time.Millisecond} + for i := 0; i < attempts; i++ { + c, err := d.Dial("tcp", ln.Addr().String()) + if err != nil { + fails++ + } else { + c.Close() + } + } + ln.Close() + wg.Wait() + if fails > attempts/9 { // see issues 7400 and 7541 + t.Fatalf("too many Dial failed: %v", fails) + } + if fails > 0 { + t.Logf("# of failed Dials: %v", fails) + } +} + +func TestTCPReadWriteAllocs(t *testing.T) { + switch runtime.GOOS { + case "plan9": + // The implementation of asynchronous cancelable + // I/O on Plan 9 allocates memory. + // See net/fd_io_plan9.go. + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln, err := Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + var server Conn + errc := make(chan error, 1) + go func() { + var err error + server, err = ln.Accept() + errc <- err + }() + client, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer client.Close() + if err := <-errc; err != nil { + t.Fatal(err) + } + defer server.Close() + + var buf [128]byte + allocs := testing.AllocsPerRun(1000, func() { + _, err := server.Write(buf[:]) + if err != nil { + t.Fatal(err) + } + _, err = io.ReadFull(client, buf[:]) + if err != nil { + t.Fatal(err) + } + }) + if allocs > 0 { + t.Fatalf("got %v; want 0", allocs) + } + + var bufwrt [128]byte + ch := make(chan bool) + defer close(ch) + go func() { + for <-ch { + _, err := server.Write(bufwrt[:]) + errc <- err + } + }() + allocs = testing.AllocsPerRun(1000, func() { + ch <- true + if _, err = io.ReadFull(client, buf[:]); err != nil { + t.Fatal(err) + } + if err := <-errc; err != nil { + t.Fatal(err) + } + }) + if allocs > 0 { + t.Fatalf("got %v; want 0", allocs) + } +} + +func TestTCPStress(t *testing.T) { + const conns = 2 + const msgLen = 512 + msgs := int(1e4) + if testing.Short() { + msgs = 1e2 + } + + sendMsg := func(c Conn, buf []byte) bool { + n, err := c.Write(buf) + if n != len(buf) || err != nil { + t.Log(err) + return false + } + return true + } + recvMsg := func(c Conn, buf []byte) bool { + for read := 0; read != len(buf); { + n, err := c.Read(buf) + read += n + if err != nil { + t.Log(err) + return false + } + } + return true + } + + ln, err := Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + done := make(chan bool) + // Acceptor. + go func() { + defer func() { + done <- true + }() + for { + c, err := ln.Accept() + if err != nil { + break + } + // Server connection. + go func(c Conn) { + defer c.Close() + var buf [msgLen]byte + for m := 0; m < msgs; m++ { + if !recvMsg(c, buf[:]) || !sendMsg(c, buf[:]) { + break + } + } + }(c) + } + }() + for i := 0; i < conns; i++ { + // Client connection. + go func() { + defer func() { + done <- true + }() + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Log(err) + return + } + defer c.Close() + var buf [msgLen]byte + for m := 0; m < msgs; m++ { + if !sendMsg(c, buf[:]) || !recvMsg(c, buf[:]) { + break + } + } + }() + } + for i := 0; i < conns; i++ { + <-done + } + ln.Close() + <-done +} + +// Test that >32-bit reads work on 64-bit systems. +// On 32-bit systems this tests that maxint reads work. +func TestTCPBig(t *testing.T) { + if !*testTCPBig { + t.Skip("test disabled; use -tcpbig to enable") + } + + for _, writev := range []bool{false, true} { + t.Run(fmt.Sprintf("writev=%v", writev), func(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + x := int(1 << 30) + x = x*5 + 1<<20 // just over 5 GB on 64-bit, just over 1GB on 32-bit + done := make(chan int) + go func() { + defer close(done) + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + buf := make([]byte, x) + var n int + if writev { + var n64 int64 + n64, err = (&Buffers{buf}).WriteTo(c) + n = int(n64) + } else { + n, err = c.Write(buf) + } + if n != len(buf) || err != nil { + t.Errorf("Write(buf) = %d, %v, want %d, nil", n, err, x) + } + c.Close() + }() + + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + buf := make([]byte, x) + n, err := io.ReadFull(c, buf) + if n != len(buf) || err != nil { + t.Errorf("Read(buf) = %d, %v, want %d, nil", n, err, x) + } + c.Close() + <-done + }) + } +} + +func TestCopyPipeIntoTCP(t *testing.T) { + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf("skipping: os.Pipe not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + errc := make(chan error, 1) + defer func() { + if err := <-errc; err != nil { + t.Error(err) + } + }() + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + defer c.Close() + + buf := make([]byte, 100) + n, err := io.ReadFull(c, buf) + if err != io.ErrUnexpectedEOF || n != 2 { + errc <- fmt.Errorf("got err=%q n=%v; want err=%q n=2", err, n, io.ErrUnexpectedEOF) + return + } + + errc <- nil + }() + + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + errc2 := make(chan error, 1) + defer func() { + if err := <-errc2; err != nil { + t.Error(err) + } + }() + + defer w.Close() + + go func() { + _, err := io.Copy(c, r) + errc2 <- err + }() + + // Split write into 2 packets. That makes Windows TransmitFile + // drop second packet. + packet := make([]byte, 1) + _, err = w.Write(packet) + if err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + _, err = w.Write(packet) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkSetReadDeadline(b *testing.B) { + ln := newLocalListener(b, "tcp") + defer ln.Close() + var serv Conn + done := make(chan error) + go func() { + var err error + serv, err = ln.Accept() + done <- err + }() + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer c.Close() + if err := <-done; err != nil { + b.Fatal(err) + } + defer serv.Close() + c.SetWriteDeadline(time.Now().Add(2 * time.Hour)) + deadline := time.Now().Add(time.Hour) + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.SetReadDeadline(deadline) + deadline = deadline.Add(1) + } +} + +func TestDialTCPDefaultKeepAlive(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + got := time.Duration(-1) + testHookSetKeepAlive = func(d time.Duration) { got = d } + defer func() { testHookSetKeepAlive = func(time.Duration) {} }() + + c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr)) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if got != defaultTCPKeepAlive { + t.Errorf("got keepalive %v; want %v", got, defaultTCPKeepAlive) + } +} + +func TestTCPListenAfterClose(t *testing.T) { + // Regression test for https://go.dev/issue/50216: + // after calling Close on a Listener, the fake net implementation would + // erroneously Accept a connection dialed before the call to Close. + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + + d := &Dialer{} + for n := 2; n > 0; n-- { + wg.Add(1) + go func() { + defer wg.Done() + + c, err := d.DialContext(ctx, ln.Addr().Network(), ln.Addr().String()) + if err == nil { + <-ctx.Done() + c.Close() + } + }() + } + + c, err := ln.Accept() + if err == nil { + c.Close() + } else { + t.Error(err) + } + time.Sleep(10 * time.Millisecond) + cancel() + wg.Wait() + ln.Close() + + c, err = ln.Accept() + if !errors.Is(err, ErrClosed) { + if err == nil { + c.Close() + } + t.Errorf("after l.Close(), l.Accept() = _, %v\nwant %v", err, ErrClosed) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsock_unix_test.go b/platform/dbops/binaries/go/go/src/net/tcpsock_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..df810a21d86824e5a2124d0961d86382acc98571 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsock_unix_test.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 && !windows + +package net + +import ( + "context" + "math/rand" + "runtime" + "sync" + "syscall" + "testing" + "time" +) + +// See golang.org/issue/14548. +func TestTCPSpuriousConnSetupCompletion(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + ln := newLocalListener(t, "tcp") + var wg sync.WaitGroup + wg.Add(1) + go func(ln Listener) { + defer wg.Done() + for { + c, err := ln.Accept() + if err != nil { + return + } + wg.Add(1) + go func(c Conn) { + var b [1]byte + c.Read(b[:]) + c.Close() + wg.Done() + }(c) + } + }(ln) + + attempts := int(1e4) // larger is better + wg.Add(attempts) + throttle := make(chan struct{}, runtime.GOMAXPROCS(-1)*2) + for i := 0; i < attempts; i++ { + throttle <- struct{}{} + go func(i int) { + defer func() { + <-throttle + wg.Done() + }() + d := Dialer{Timeout: 50 * time.Millisecond} + c, err := d.Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + if perr := parseDialError(err); perr != nil { + t.Errorf("#%d: %v (original error: %v)", i, perr, err) + } + return + } + var b [1]byte + if _, err := c.Write(b[:]); err != nil { + if perr := parseWriteError(err); perr != nil { + t.Errorf("#%d: %v", i, err) + } + if samePlatformError(err, syscall.ENOTCONN) { + t.Errorf("#%d: %v", i, err) + } + } + c.Close() + }(i) + } + + ln.Close() + wg.Wait() +} + +// Issue 19289. +// Test that a canceled Dial does not cause a subsequent Dial to succeed. +func TestTCPSpuriousConnSetupCompletionWithCancel(t *testing.T) { + mustHaveExternalNetwork(t) + + defer dnsWaitGroup.Wait() + t.Parallel() + const tries = 10000 + var wg sync.WaitGroup + wg.Add(tries * 2) + sem := make(chan bool, 5) + for i := 0; i < tries; i++ { + sem <- true + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer wg.Done() + time.Sleep(time.Duration(rand.Int63n(int64(5 * time.Millisecond)))) + cancel() + }() + go func(i int) { + defer wg.Done() + var dialer Dialer + // Try to connect to a real host on a port + // that it is not listening on. + _, err := dialer.DialContext(ctx, "tcp", "golang.org:3") + if err == nil { + t.Errorf("Dial to unbound port succeeded on attempt %d", i) + } + <-sem + }(i) + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_darwin.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..53c6756e33e00b3f107d3012fc11109f88219228 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_darwin.go @@ -0,0 +1,25 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "runtime" + "syscall" + "time" +) + +// syscall.TCP_KEEPINTVL is missing on some darwin architectures. +const sysTCP_KEEPINTVL = 0x101 + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // The kernel expects seconds so round to next highest second. + secs := int(roundDurationUp(d, time.Second)) + if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs); err != nil { + return wrapSyscallError("setsockopt", err) + } + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_dragonfly.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..b473c02b6867db4a5afb4fcb4c0b2d947ad64aab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_dragonfly.go @@ -0,0 +1,23 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "runtime" + "syscall" + "time" +) + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // The kernel expects milliseconds so round to next highest + // millisecond. + msecs := int(roundDurationUp(d, time.Millisecond)) + if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs); err != nil { + return wrapSyscallError("setsockopt", err) + } + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, msecs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_openbsd.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_openbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..10e1bef3e5aaed12b5a05cdcd43f611f8e1a2254 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_openbsd.go @@ -0,0 +1,16 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "syscall" + "time" +) + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // OpenBSD has no user-settable per-socket TCP keepalive + // options. + return syscall.ENOPROTOOPT +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_plan9.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..264359dcf3daf15bd14c8d82eadafeffcf566dbb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_plan9.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TCP socket options for plan9 + +package net + +import ( + "internal/itoa" + "syscall" + "time" +) + +func setNoDelay(fd *netFD, noDelay bool) error { + return syscall.EPLAN9 +} + +// Set keep alive period. +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + cmd := "keepalive " + itoa.Itoa(int(d/time.Millisecond)) + _, e := fd.ctl.WriteAt([]byte(cmd), 0) + return e +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_posix.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..d708f04875897697bb494cc637670979440d37eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_posix.go @@ -0,0 +1,18 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package net + +import ( + "runtime" + "syscall" +) + +func setNoDelay(fd *netFD, noDelay bool) error { + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_NODELAY, boolint(noDelay)) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_solaris.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..f15e589dc058488850533a8ff510a9901cde149b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "runtime" + "syscall" + "time" +) + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // The kernel expects milliseconds so round to next highest + // millisecond. + msecs := int(roundDurationUp(d, time.Millisecond)) + + // Normally we'd do + // syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs) + // here, but we can't because Solaris does not have TCP_KEEPINTVL. + // Solaris has TCP_KEEPALIVE_ABORT_THRESHOLD, but it's not the same + // thing, it refers to the total time until aborting (not between + // probes), and it uses an exponential backoff algorithm instead of + // waiting the same time between probes. We can't hope for the best + // and do it anyway, like on Darwin, because Solaris might eventually + // allocate a constant with a different meaning for the value of + // TCP_KEEPINTVL on illumos. + + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE_THRESHOLD, msecs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_stub.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..cef07cd6484e51b7c761da575d0a5ad2a923693a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_stub.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package net + +import ( + "syscall" + "time" +) + +func setNoDelay(fd *netFD, noDelay bool) error { + return syscall.ENOPROTOOPT +} + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + return syscall.ENOPROTOOPT +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_unix.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..bdcdc4023983944dbe39b8483786df7555418f8c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_unix.go @@ -0,0 +1,24 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || freebsd || linux || netbsd + +package net + +import ( + "runtime" + "syscall" + "time" +) + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // The kernel expects seconds so round to next highest second. + secs := int(roundDurationUp(d, time.Second)) + if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs); err != nil { + return wrapSyscallError("setsockopt", err) + } + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/tcpsockopt_windows.go b/platform/dbops/binaries/go/go/src/net/tcpsockopt_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..4a0b09465eea839d19a7baa91bcf0842bd311ca2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/tcpsockopt_windows.go @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + // The kernel expects milliseconds so round to next highest + // millisecond. + msecs := uint32(roundDurationUp(d, time.Millisecond)) + ka := syscall.TCPKeepalive{ + OnOff: 1, + Time: msecs, + Interval: msecs, + } + ret := uint32(0) + size := uint32(unsafe.Sizeof(ka)) + err := fd.pfd.WSAIoctl(syscall.SIO_KEEPALIVE_VALS, (*byte)(unsafe.Pointer(&ka)), size, nil, 0, &ret, nil, 0) + runtime.KeepAlive(fd) + return os.NewSyscallError("wsaioctl", err) +} diff --git a/platform/dbops/binaries/go/go/src/net/timeout_test.go b/platform/dbops/binaries/go/go/src/net/timeout_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ca86f31ef2f98231be6bf967fe8f94ec71e24369 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/timeout_test.go @@ -0,0 +1,1201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "errors" + "fmt" + "internal/testenv" + "io" + "os" + "runtime" + "sync" + "testing" + "time" +) + +func init() { + // Install a hook to ensure that a 1ns timeout will always + // be exceeded by the time Dial gets to the relevant system call. + // + // Without this, systems with a very large timer granularity — such as + // Windows — may be able to accept connections without measurably exceeding + // even an implausibly short deadline. + testHookStepTime = func() { + now := time.Now() + for time.Since(now) == 0 { + time.Sleep(1 * time.Nanosecond) + } + } +} + +var dialTimeoutTests = []struct { + initialTimeout time.Duration + initialDelta time.Duration // for deadline +}{ + // Tests that dial timeouts, deadlines in the past work. + {-5 * time.Second, 0}, + {0, -5 * time.Second}, + {-5 * time.Second, 5 * time.Second}, // timeout over deadline + {-1 << 63, 0}, + {0, -1 << 63}, + + {1 * time.Millisecond, 0}, + {0, 1 * time.Millisecond}, + {1 * time.Millisecond, 5 * time.Second}, // timeout over deadline +} + +func TestDialTimeout(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + t.Parallel() + + ln := newLocalListener(t, "tcp") + defer func() { + if err := ln.Close(); err != nil { + t.Error(err) + } + }() + + for _, tt := range dialTimeoutTests { + t.Run(fmt.Sprintf("%v/%v", tt.initialTimeout, tt.initialDelta), func(t *testing.T) { + // We don't run these subtests in parallel because we don't know how big + // the kernel's accept queue is, and we don't want to accidentally saturate + // it with concurrent calls. (That could cause the Dial to fail with + // ECONNREFUSED or ECONNRESET instead of a timeout error.) + d := Dialer{Timeout: tt.initialTimeout} + delta := tt.initialDelta + + var ( + beforeDial time.Time + afterDial time.Time + err error + ) + for { + if delta != 0 { + d.Deadline = time.Now().Add(delta) + } + + beforeDial = time.Now() + + var c Conn + c, err = d.Dial(ln.Addr().Network(), ln.Addr().String()) + afterDial = time.Now() + + if err != nil { + break + } + + // Even though we're not calling Accept on the Listener, the kernel may + // spuriously accept connections on its behalf. If that happens, we will + // close the connection (to try to get it out of the kernel's accept + // queue) and try a shorter timeout. + // + // We assume that we will reach a point where the call actually does + // time out, although in theory (since this socket is on a loopback + // address) a sufficiently clever kernel could notice that no Accept + // call is pending and bypass both the queue and the timeout to return + // another error immediately. + t.Logf("closing spurious connection from Dial") + c.Close() + + if delta <= 1 && d.Timeout <= 1 { + t.Fatalf("can't reduce Timeout or Deadline") + } + if delta > 1 { + delta /= 2 + t.Logf("reducing Deadline delta to %v", delta) + } + if d.Timeout > 1 { + d.Timeout /= 2 + t.Logf("reducing Timeout to %v", d.Timeout) + } + } + + if d.Deadline.IsZero() || afterDial.Before(d.Deadline) { + delay := afterDial.Sub(beforeDial) + if delay < d.Timeout { + t.Errorf("Dial returned after %v; want ≥%v", delay, d.Timeout) + } + } + + if perr := parseDialError(err); perr != nil { + t.Errorf("unexpected error from Dial: %v", perr) + } + if nerr, ok := err.(Error); !ok || !nerr.Timeout() { + t.Errorf("Dial: %v, want timeout", err) + } + }) + } +} + +func TestDialTimeoutMaxDuration(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer func() { + if err := ln.Close(); err != nil { + t.Error(err) + } + }() + + for _, tt := range []struct { + timeout time.Duration + delta time.Duration // for deadline + }{ + // Large timeouts that will overflow an int64 unix nanos. + {1<<63 - 1, 0}, + {0, 1<<63 - 1}, + } { + t.Run(fmt.Sprintf("timeout=%s/delta=%s", tt.timeout, tt.delta), func(t *testing.T) { + d := Dialer{Timeout: tt.timeout} + if tt.delta != 0 { + d.Deadline = time.Now().Add(tt.delta) + } + c, err := d.Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + if err := c.Close(); err != nil { + t.Error(err) + } + }) + } +} + +var acceptTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that accept deadlines in the past work, even if + // there's incoming connections available. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +func TestAcceptTimeout(t *testing.T) { + testenv.SkipFlaky(t, 17948) + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + var wg sync.WaitGroup + for i, tt := range acceptTimeoutTests { + if tt.timeout < 0 { + wg.Add(1) + go func() { + defer wg.Done() + d := Dialer{Timeout: 100 * time.Millisecond} + c, err := d.Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + + if err := ln.(*TCPListener).SetDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("$%d: %v", i, err) + } + for j, xerr := range tt.xerrs { + for { + c, err := ln.Accept() + if xerr != nil { + if perr := parseAcceptError(err); perr != nil { + t.Errorf("#%d/%d: %v", i, j, perr) + } + if !isDeadlineExceeded(err) { + t.Fatalf("#%d/%d: %v", i, j, err) + } + } + if err == nil { + c.Close() + time.Sleep(10 * time.Millisecond) + continue + } + break + } + } + } + wg.Wait() +} + +func TestAcceptTimeoutMustReturn(t *testing.T) { + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil { + t.Error(err) + } + if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { + t.Error(err) + } + c, err := ln.Accept() + if err == nil { + c.Close() + } + + if perr := parseAcceptError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Fatal(err) + } +} + +func TestAcceptTimeoutMustNotReturn(t *testing.T) { + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + maxch := make(chan *time.Timer) + ch := make(chan error) + go func() { + if err := ln.(*TCPListener).SetDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil { + t.Error(err) + } + maxch <- time.NewTimer(100 * time.Millisecond) + _, err := ln.Accept() + ch <- err + }() + + max := <-maxch + defer max.Stop() + + select { + case err := <-ch: + if perr := parseAcceptError(err); perr != nil { + t.Error(perr) + } + t.Fatalf("expected Accept to not return, but it returned with %v", err) + case <-max.C: + ln.Close() + <-ch // wait for tester goroutine to stop + } +} + +var readTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that read deadlines work, even if there's data ready + // to be read. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestReadTimeout(t *testing.T) { + handler := func(ls *localServer, ln Listener) { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Write([]byte("READ TIMEOUT TEST")) + defer c.Close() + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + for i, tt := range readTimeoutTests { + if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var b [1]byte + for j, xerr := range tt.xerrs { + for { + n, err := c.Read(b[:]) + if xerr != nil { + if perr := parseReadError(err); perr != nil { + t.Errorf("#%d/%d: %v", i, j, perr) + } + if !isDeadlineExceeded(err) { + t.Fatalf("#%d/%d: %v", i, j, err) + } + } + if err == nil { + time.Sleep(tt.timeout / 3) + continue + } + if n != 0 { + t.Fatalf("#%d/%d: read %d; want 0", i, j, n) + } + break + } + } + } +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestReadTimeoutMustNotReturn(t *testing.T) { + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + maxch := make(chan *time.Timer) + ch := make(chan error) + go func() { + if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := c.SetWriteDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := c.SetReadDeadline(noDeadline); err != nil { + t.Error(err) + } + maxch <- time.NewTimer(100 * time.Millisecond) + var b [1]byte + _, err := c.Read(b[:]) + ch <- err + }() + + max := <-maxch + defer max.Stop() + + select { + case err := <-ch: + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + t.Fatalf("expected Read to not return, but it returned with %v", err) + case <-max.C: + c.Close() + err := <-ch // wait for tester goroutine to stop + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() { + t.Fatal(err) + } + } +} + +var readFromTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that read deadlines work, even if there's data ready + // to be read. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +func TestReadFromTimeout(t *testing.T) { + ch := make(chan Addr) + defer close(ch) + handler := func(ls *localPacketServer, c PacketConn) { + if dst, ok := <-ch; ok { + c.WriteTo([]byte("READFROM TIMEOUT TEST"), dst) + } + } + ls := newLocalPacketServer(t, "udp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + host, _, err := SplitHostPort(ls.PacketConn.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c, err := ListenPacket(ls.PacketConn.LocalAddr().Network(), JoinHostPort(host, "0")) + if err != nil { + t.Fatal(err) + } + defer c.Close() + ch <- c.LocalAddr() + + for i, tt := range readFromTimeoutTests { + if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var b [1]byte + for j, xerr := range tt.xerrs { + for { + n, _, err := c.ReadFrom(b[:]) + if xerr != nil { + if perr := parseReadError(err); perr != nil { + t.Errorf("#%d/%d: %v", i, j, perr) + } + if !isDeadlineExceeded(err) { + t.Fatalf("#%d/%d: %v", i, j, err) + } + } + if err == nil { + time.Sleep(tt.timeout / 3) + continue + } + if nerr, ok := err.(Error); ok && nerr.Timeout() && n != 0 { + t.Fatalf("#%d/%d: read %d; want 0", i, j, n) + } + break + } + } + } +} + +var writeTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that write deadlines work, even if there's buffer + // space available to write. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {10 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestWriteTimeout(t *testing.T) { + t.Parallel() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + for i, tt := range writeTimeoutTests { + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if err := c.SetWriteDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("#%d: %v", i, err) + } + for j, xerr := range tt.xerrs { + for { + n, err := c.Write([]byte("WRITE TIMEOUT TEST")) + if xerr != nil { + if perr := parseWriteError(err); perr != nil { + t.Errorf("#%d/%d: %v", i, j, perr) + } + if !isDeadlineExceeded(err) { + t.Fatalf("#%d/%d: %v", i, j, err) + } + } + if err == nil { + time.Sleep(tt.timeout / 3) + continue + } + if n != 0 { + t.Fatalf("#%d/%d: wrote %d; want 0", i, j, n) + } + break + } + } + } +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestWriteTimeoutMustNotReturn(t *testing.T) { + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + maxch := make(chan *time.Timer) + ch := make(chan error) + go func() { + if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := c.SetReadDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := c.SetWriteDeadline(noDeadline); err != nil { + t.Error(err) + } + maxch <- time.NewTimer(100 * time.Millisecond) + var b [1024]byte + for { + if _, err := c.Write(b[:]); err != nil { + ch <- err + break + } + } + }() + + max := <-maxch + defer max.Stop() + + select { + case err := <-ch: + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + t.Fatalf("expected Write to not return, but it returned with %v", err) + case <-max.C: + c.Close() + err := <-ch // wait for tester goroutine to stop + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() { + t.Fatal(err) + } + } +} + +func TestWriteToTimeout(t *testing.T) { + t.Parallel() + + c1 := newLocalPacketListener(t, "udp") + defer c1.Close() + + host, _, err := SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + timeouts := []time.Duration{ + -5 * time.Second, + 10 * time.Millisecond, + } + + for _, timeout := range timeouts { + t.Run(fmt.Sprint(timeout), func(t *testing.T) { + c2, err := ListenPacket(c1.LocalAddr().Network(), JoinHostPort(host, "0")) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + if err := c2.SetWriteDeadline(time.Now().Add(timeout)); err != nil { + t.Fatalf("SetWriteDeadline: %v", err) + } + backoff := 1 * time.Millisecond + nDeadlineExceeded := 0 + for j := 0; nDeadlineExceeded < 2; j++ { + n, err := c2.WriteTo([]byte("WRITETO TIMEOUT TEST"), c1.LocalAddr()) + t.Logf("#%d: WriteTo: %d, %v", j, n, err) + if err == nil && timeout >= 0 && nDeadlineExceeded == 0 { + // If the timeout is nonnegative, some number of WriteTo calls may + // succeed before the timeout takes effect. + t.Logf("WriteTo succeeded; sleeping %v", timeout/3) + time.Sleep(timeout / 3) + continue + } + if isENOBUFS(err) { + t.Logf("WriteTo: %v", err) + // We're looking for a deadline exceeded error, but if the kernel's + // network buffers are saturated we may see ENOBUFS instead (see + // https://go.dev/issue/49930). Give it some time to unsaturate. + time.Sleep(backoff) + backoff *= 2 + continue + } + if perr := parseWriteError(err); perr != nil { + t.Errorf("failed to parse error: %v", perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("error is not 'deadline exceeded'") + } + if n != 0 { + t.Errorf("unexpectedly wrote %d bytes", n) + } + if !t.Failed() { + t.Logf("WriteTo timed out as expected") + } + nDeadlineExceeded++ + } + }) + } +} + +const ( + // minDynamicTimeout is the minimum timeout to attempt for + // tests that automatically increase timeouts until success. + // + // Lower values may allow tests to succeed more quickly if the value is close + // to the true minimum, but may require more iterations (and waste more time + // and CPU power on failed attempts) if the timeout is too low. + minDynamicTimeout = 1 * time.Millisecond + + // maxDynamicTimeout is the maximum timeout to attempt for + // tests that automatically increase timeouts until success. + // + // This should be a strict upper bound on the latency required to hit a + // timeout accurately, even on a slow or heavily-loaded machine. If a test + // would increase the timeout beyond this value, the test fails. + maxDynamicTimeout = 4 * time.Second +) + +// timeoutUpperBound returns the maximum time that we expect a timeout of +// duration d to take to return the caller. +func timeoutUpperBound(d time.Duration) time.Duration { + switch runtime.GOOS { + case "openbsd", "netbsd": + // NetBSD and OpenBSD seem to be unable to reliably hit deadlines even when + // the absolute durations are long. + // In https://build.golang.org/log/c34f8685d020b98377dd4988cd38f0c5bd72267e, + // we observed that an openbsd-amd64-68 builder took 4.090948779s for a + // 2.983020682s timeout (37.1% overhead). + // (See https://go.dev/issue/50189 for further detail.) + // Give them lots of slop to compensate. + return d * 3 / 2 + } + // Other platforms seem to hit their deadlines more reliably, + // at least when they are long enough to cover scheduling jitter. + return d * 11 / 10 +} + +// nextTimeout returns the next timeout to try after an operation took the given +// actual duration with a timeout shorter than that duration. +func nextTimeout(actual time.Duration) (next time.Duration, ok bool) { + if actual >= maxDynamicTimeout { + return maxDynamicTimeout, false + } + // Since the previous attempt took actual, we can't expect to beat that + // duration by any significant margin. Try the next attempt with an arbitrary + // factor above that, so that our growth curve is at least exponential. + next = actual * 5 / 4 + if next > maxDynamicTimeout { + return maxDynamicTimeout, true + } + return next, true +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestReadTimeoutFluctuation(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + d := minDynamicTimeout + b := make([]byte, 256) + for { + t.Logf("SetReadDeadline(+%v)", d) + t0 := time.Now() + deadline := t0.Add(d) + if err = c.SetReadDeadline(deadline); err != nil { + t.Fatalf("SetReadDeadline(%v): %v", deadline, err) + } + var n int + n, err = c.Read(b) + t1 := time.Now() + + if n != 0 || err == nil || !err.(Error).Timeout() { + t.Errorf("Read did not return (0, timeout): (%d, %v)", n, err) + } + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("Read error is not DeadlineExceeded: %v", err) + } + + actual := t1.Sub(t0) + if t1.Before(deadline) { + t.Errorf("Read took %s; expected at least %s", actual, d) + } + if t.Failed() { + return + } + if want := timeoutUpperBound(d); actual > want { + next, ok := nextTimeout(actual) + if !ok { + t.Fatalf("Read took %s; expected at most %v", actual, want) + } + // Maybe this machine is too slow to reliably schedule goroutines within + // the requested duration. Increase the timeout and try again. + t.Logf("Read took %s (expected %s); trying with longer timeout", actual, d) + d = next + continue + } + + break + } +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestReadFromTimeoutFluctuation(t *testing.T) { + c1 := newLocalPacketListener(t, "udp") + defer c1.Close() + + c2, err := Dial(c1.LocalAddr().Network(), c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + d := minDynamicTimeout + b := make([]byte, 256) + for { + t.Logf("SetReadDeadline(+%v)", d) + t0 := time.Now() + deadline := t0.Add(d) + if err = c2.SetReadDeadline(deadline); err != nil { + t.Fatalf("SetReadDeadline(%v): %v", deadline, err) + } + var n int + n, _, err = c2.(PacketConn).ReadFrom(b) + t1 := time.Now() + + if n != 0 || err == nil || !err.(Error).Timeout() { + t.Errorf("ReadFrom did not return (0, timeout): (%d, %v)", n, err) + } + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("ReadFrom error is not DeadlineExceeded: %v", err) + } + + actual := t1.Sub(t0) + if t1.Before(deadline) { + t.Errorf("ReadFrom took %s; expected at least %s", actual, d) + } + if t.Failed() { + return + } + if want := timeoutUpperBound(d); actual > want { + next, ok := nextTimeout(actual) + if !ok { + t.Fatalf("ReadFrom took %s; expected at most %s", actual, want) + } + // Maybe this machine is too slow to reliably schedule goroutines within + // the requested duration. Increase the timeout and try again. + t.Logf("ReadFrom took %s (expected %s); trying with longer timeout", actual, d) + d = next + continue + } + + break + } +} + +func TestWriteTimeoutFluctuation(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + d := minDynamicTimeout + for { + t.Logf("SetWriteDeadline(+%v)", d) + t0 := time.Now() + deadline := t0.Add(d) + if err := c.SetWriteDeadline(deadline); err != nil { + t.Fatalf("SetWriteDeadline(%v): %v", deadline, err) + } + var n int64 + var err error + for { + var dn int + dn, err = c.Write([]byte("TIMEOUT TRANSMITTER")) + n += int64(dn) + if err != nil { + break + } + } + t1 := time.Now() + // Inv: err != nil + if !err.(Error).Timeout() { + t.Fatalf("Write did not return (any, timeout): (%d, %v)", n, err) + } + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Errorf("Write error is not DeadlineExceeded: %v", err) + } + + actual := t1.Sub(t0) + if t1.Before(deadline) { + t.Errorf("Write took %s; expected at least %s", actual, d) + } + if t.Failed() { + return + } + if want := timeoutUpperBound(d); actual > want { + if n > 0 { + // SetWriteDeadline specifies a time “after which I/O operations fail + // instead of blocking”. However, the kernel's send buffer is not yet + // full, we may be able to write some arbitrary (but finite) number of + // bytes to it without blocking. + t.Logf("Wrote %d bytes into send buffer; retrying until buffer is full", n) + if d <= maxDynamicTimeout/2 { + // We don't know how long the actual write loop would have taken if + // the buffer were full, so just guess and double the duration so that + // the next attempt can make twice as much progress toward filling it. + d *= 2 + } + } else if next, ok := nextTimeout(actual); !ok { + t.Fatalf("Write took %s; expected at most %s", actual, want) + } else { + // Maybe this machine is too slow to reliably schedule goroutines within + // the requested duration. Increase the timeout and try again. + t.Logf("Write took %s (expected %s); trying with longer timeout", actual, d) + d = next + } + continue + } + + break + } +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestVariousDeadlines(t *testing.T) { + t.Parallel() + testVariousDeadlines(t) +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestVariousDeadlines1Proc(t *testing.T) { + // Cannot use t.Parallel - modifies global GOMAXPROCS. + if testing.Short() { + t.Skip("skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + testVariousDeadlines(t) +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestVariousDeadlines4Proc(t *testing.T) { + // Cannot use t.Parallel - modifies global GOMAXPROCS. + if testing.Short() { + t.Skip("skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + testVariousDeadlines(t) +} + +func testVariousDeadlines(t *testing.T) { + handler := func(ls *localServer, ln Listener) { + for { + c, err := ln.Accept() + if err != nil { + break + } + c.Read(make([]byte, 1)) // wait for client to close connection + c.Close() + } + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + for _, timeout := range []time.Duration{ + 1 * time.Nanosecond, + 2 * time.Nanosecond, + 5 * time.Nanosecond, + 50 * time.Nanosecond, + 100 * time.Nanosecond, + 200 * time.Nanosecond, + 500 * time.Nanosecond, + 750 * time.Nanosecond, + 1 * time.Microsecond, + 5 * time.Microsecond, + 25 * time.Microsecond, + 250 * time.Microsecond, + 500 * time.Microsecond, + 1 * time.Millisecond, + 5 * time.Millisecond, + 100 * time.Millisecond, + 250 * time.Millisecond, + 500 * time.Millisecond, + 1 * time.Second, + } { + numRuns := 3 + if testing.Short() { + numRuns = 1 + if timeout > 500*time.Microsecond { + continue + } + } + for run := 0; run < numRuns; run++ { + name := fmt.Sprintf("%v %d/%d", timeout, run, numRuns) + t.Log(name) + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + + t0 := time.Now() + if err := c.SetDeadline(t0.Add(timeout)); err != nil { + t.Error(err) + } + n, err := io.Copy(io.Discard, c) + dt := time.Since(t0) + c.Close() + + if nerr, ok := err.(Error); ok && nerr.Timeout() { + t.Logf("%v: good timeout after %v; %d bytes", name, dt, n) + } else { + t.Fatalf("%v: Copy = %d, %v; want timeout", name, n, err) + } + } + } +} + +// TestReadWriteProlongedTimeout tests concurrent deadline +// modification. Known to cause data races in the past. +func TestReadWriteProlongedTimeout(t *testing.T) { + t.Parallel() + + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + handler := func(ls *localServer, ln Listener) { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer c.Close() + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + var b [1]byte + for { + if err := c.SetReadDeadline(time.Now().Add(time.Hour)); err != nil { + if perr := parseCommonError(err); perr != nil { + t.Error(perr) + } + t.Error(err) + return + } + if _, err := c.Read(b[:]); err != nil { + if perr := parseReadError(err); perr != nil { + t.Error(perr) + } + return + } + } + }() + go func() { + defer wg.Done() + var b [1]byte + for { + if err := c.SetWriteDeadline(time.Now().Add(time.Hour)); err != nil { + if perr := parseCommonError(err); perr != nil { + t.Error(perr) + } + t.Error(err) + return + } + if _, err := c.Write(b[:]); err != nil { + if perr := parseWriteError(err); perr != nil { + t.Error(perr) + } + return + } + } + }() + wg.Wait() + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + var b [1]byte + for i := 0; i < 1000; i++ { + c.Write(b[:]) + c.Read(b[:]) + } +} + +// There is a very similar copy of this in os/timeout_test.go. +func TestReadWriteDeadlineRace(t *testing.T) { + t.Parallel() + + N := 1000 + if testing.Short() { + N = 50 + } + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + c, err := Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + tic := time.NewTicker(2 * time.Microsecond) + defer tic.Stop() + for i := 0; i < N; i++ { + if err := c.SetReadDeadline(time.Now().Add(2 * time.Microsecond)); err != nil { + if perr := parseCommonError(err); perr != nil { + t.Error(perr) + } + break + } + if err := c.SetWriteDeadline(time.Now().Add(2 * time.Microsecond)); err != nil { + if perr := parseCommonError(err); perr != nil { + t.Error(perr) + } + break + } + <-tic.C + } + }() + go func() { + defer wg.Done() + var b [1]byte + for i := 0; i < N; i++ { + c.Read(b[:]) // ignore possible timeout errors + } + }() + go func() { + defer wg.Done() + var b [1]byte + for i := 0; i < N; i++ { + c.Write(b[:]) // ignore possible timeout errors + } + }() + wg.Wait() // wait for tester goroutine to stop +} + +// Issue 35367. +func TestConcurrentSetDeadline(t *testing.T) { + ln := newLocalListener(t, "tcp") + defer ln.Close() + + const goroutines = 8 + const conns = 10 + const tries = 100 + + var c [conns]Conn + for i := 0; i < conns; i++ { + var err error + c[i], err = Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c[i].Close() + } + + var wg sync.WaitGroup + wg.Add(goroutines) + now := time.Now() + for i := 0; i < goroutines; i++ { + go func(i int) { + defer wg.Done() + // Make the deadlines steadily earlier, + // to trigger runtime adjusttimers calls. + for j := tries; j > 0; j-- { + for k := 0; k < conns; k++ { + c[k].SetReadDeadline(now.Add(2*time.Hour + time.Duration(i*j*k)*time.Second)) + c[k].SetWriteDeadline(now.Add(1*time.Hour + time.Duration(i*j*k)*time.Second)) + } + } + }(i) + } + wg.Wait() +} + +// isDeadlineExceeded reports whether err is or wraps os.ErrDeadlineExceeded. +// We also check that the error implements net.Error, and that the +// Timeout method returns true. +func isDeadlineExceeded(err error) bool { + nerr, ok := err.(Error) + if !ok { + return false + } + if !nerr.Timeout() { + return false + } + if !errors.Is(err, os.ErrDeadlineExceeded) { + return false + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/net/udpsock.go b/platform/dbops/binaries/go/go/src/net/udpsock.go new file mode 100644 index 0000000000000000000000000000000000000000..4f8acb7fc890de72ff6505dc2a805f5e7876ddfc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/udpsock.go @@ -0,0 +1,368 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "internal/itoa" + "net/netip" + "syscall" +) + +// BUG(mikio): On Plan 9, the ReadMsgUDP and +// WriteMsgUDP methods of UDPConn are not implemented. + +// BUG(mikio): On Windows, the File method of UDPConn is not +// implemented. + +// BUG(mikio): On JS, methods and functions related to UDPConn are not +// implemented. + +// UDPAddr represents the address of a UDP end point. +type UDPAddr struct { + IP IP + Port int + Zone string // IPv6 scoped addressing zone +} + +// AddrPort returns the UDPAddr a as a netip.AddrPort. +// +// If a.Port does not fit in a uint16, it's silently truncated. +// +// If a is nil, a zero value is returned. +func (a *UDPAddr) AddrPort() netip.AddrPort { + if a == nil { + return netip.AddrPort{} + } + na, _ := netip.AddrFromSlice(a.IP) + na = na.WithZone(a.Zone) + return netip.AddrPortFrom(na, uint16(a.Port)) +} + +// Network returns the address's network name, "udp". +func (a *UDPAddr) Network() string { return "udp" } + +func (a *UDPAddr) String() string { + if a == nil { + return "" + } + ip := ipEmptyString(a.IP) + if a.Zone != "" { + return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port)) + } + return JoinHostPort(ip, itoa.Itoa(a.Port)) +} + +func (a *UDPAddr) isWildcard() bool { + if a == nil || a.IP == nil { + return true + } + return a.IP.IsUnspecified() +} + +func (a *UDPAddr) opAddr() Addr { + if a == nil { + return nil + } + return a +} + +// ResolveUDPAddr returns an address of UDP end point. +// +// The network must be a UDP network name. +// +// If the host in the address parameter is not a literal IP address or +// the port is not a literal port number, ResolveUDPAddr resolves the +// address to an address of UDP end point. +// Otherwise, it parses the address as a pair of literal IP address +// and port number. +// The address parameter can use a host name, but this is not +// recommended, because it will return at most one of the host name's +// IP addresses. +// +// See func Dial for a description of the network and address +// parameters. +func ResolveUDPAddr(network, address string) (*UDPAddr, error) { + switch network { + case "udp", "udp4", "udp6": + case "": // a hint wildcard for Go 1.0 undocumented behavior + network = "udp" + default: + return nil, UnknownNetworkError(network) + } + addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address) + if err != nil { + return nil, err + } + return addrs.forResolve(network, address).(*UDPAddr), nil +} + +// UDPAddrFromAddrPort returns addr as a UDPAddr. If addr.IsValid() is false, +// then the returned UDPAddr will contain a nil IP field, indicating an +// address family-agnostic unspecified address. +func UDPAddrFromAddrPort(addr netip.AddrPort) *UDPAddr { + return &UDPAddr{ + IP: addr.Addr().AsSlice(), + Zone: addr.Addr().Zone(), + Port: int(addr.Port()), + } +} + +// An addrPortUDPAddr is a netip.AddrPort-based UDP address that satisfies the Addr interface. +type addrPortUDPAddr struct { + netip.AddrPort +} + +func (addrPortUDPAddr) Network() string { return "udp" } + +// UDPConn is the implementation of the Conn and PacketConn interfaces +// for UDP network connections. +type UDPConn struct { + conn +} + +// SyscallConn returns a raw network connection. +// This implements the syscall.Conn interface. +func (c *UDPConn) SyscallConn() (syscall.RawConn, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + return newRawConn(c.fd), nil +} + +// ReadFromUDP acts like ReadFrom but returns a UDPAddr. +func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) { + // This function is designed to allow the caller to control the lifetime + // of the returned *UDPAddr and thereby prevent an allocation. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/. + // The real work is done by readFromUDP, below. + return c.readFromUDP(b, &UDPAddr{}) +} + +// readFromUDP implements ReadFromUDP. +func (c *UDPConn) readFromUDP(b []byte, addr *UDPAddr) (int, *UDPAddr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + n, addr, err := c.readFrom(b, addr) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, addr, err +} + +// ReadFrom implements the PacketConn ReadFrom method. +func (c *UDPConn) ReadFrom(b []byte) (int, Addr, error) { + n, addr, err := c.readFromUDP(b, &UDPAddr{}) + if addr == nil { + // Return Addr(nil), not Addr(*UDPConn(nil)). + return n, nil, err + } + return n, addr, err +} + +// ReadFromUDPAddrPort acts like ReadFrom but returns a netip.AddrPort. +// +// If c is bound to an unspecified address, the returned +// netip.AddrPort's address might be an IPv4-mapped IPv6 address. +// Use netip.Addr.Unmap to get the address without the IPv6 prefix. +func (c *UDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { + if !c.ok() { + return 0, netip.AddrPort{}, syscall.EINVAL + } + n, addr, err = c.readFromAddrPort(b) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, addr, err +} + +// ReadMsgUDP reads a message from c, copying the payload into b and +// the associated out-of-band data into oob. It returns the number of +// bytes copied into b, the number of bytes copied into oob, the flags +// that were set on the message and the source address of the message. +// +// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be +// used to manipulate IP-level socket options in oob. +func (c *UDPConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *UDPAddr, err error) { + var ap netip.AddrPort + n, oobn, flags, ap, err = c.ReadMsgUDPAddrPort(b, oob) + if ap.IsValid() { + addr = UDPAddrFromAddrPort(ap) + } + return +} + +// ReadMsgUDPAddrPort is like ReadMsgUDP but returns an netip.AddrPort instead of a UDPAddr. +func (c *UDPConn) ReadMsgUDPAddrPort(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) { + if !c.ok() { + return 0, 0, 0, netip.AddrPort{}, syscall.EINVAL + } + n, oobn, flags, addr, err = c.readMsg(b, oob) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return +} + +// WriteToUDP acts like WriteTo but takes a UDPAddr. +func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeTo(b, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return n, err +} + +// WriteToUDPAddrPort acts like WriteTo but takes a netip.AddrPort. +func (c *UDPConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeToAddrPort(b, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err} + } + return n, err +} + +// WriteTo implements the PacketConn WriteTo method. +func (c *UDPConn) WriteTo(b []byte, addr Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + a, ok := addr.(*UDPAddr) + if !ok { + return 0, &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr, Err: syscall.EINVAL} + } + n, err := c.writeTo(b, a) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: a.opAddr(), Err: err} + } + return n, err +} + +// WriteMsgUDP writes a message to addr via c if c isn't connected, or +// to c's remote address if c is connected (in which case addr must be +// nil). The payload is copied from b and the associated out-of-band +// data is copied from oob. It returns the number of payload and +// out-of-band bytes written. +// +// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be +// used to manipulate IP-level socket options in oob. +func (c *UDPConn) WriteMsgUDP(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) { + if !c.ok() { + return 0, 0, syscall.EINVAL + } + n, oobn, err = c.writeMsg(b, oob, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return +} + +// WriteMsgUDPAddrPort is like WriteMsgUDP but takes a netip.AddrPort instead of a UDPAddr. +func (c *UDPConn) WriteMsgUDPAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) { + if !c.ok() { + return 0, 0, syscall.EINVAL + } + n, oobn, err = c.writeMsgAddrPort(b, oob, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err} + } + return +} + +func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} } + +// DialUDP acts like Dial for UDP networks. +// +// The network must be a UDP network name; see func Dial for details. +// +// If laddr is nil, a local address is automatically chosen. +// If the IP field of raddr is nil or an unspecified IP address, the +// local system is assumed. +func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) { + switch network { + case "udp", "udp4", "udp6": + default: + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)} + } + if raddr == nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress} + } + sd := &sysDialer{network: network, address: raddr.String()} + c, err := sd.dialUDP(context.Background(), laddr, raddr) + if err != nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err} + } + return c, nil +} + +// ListenUDP acts like ListenPacket for UDP networks. +// +// The network must be a UDP network name; see func Dial for details. +// +// If the IP field of laddr is nil or an unspecified IP address, +// ListenUDP listens on all available IP addresses of the local system +// except multicast IP addresses. +// If the Port field of laddr is 0, a port number is automatically +// chosen. +func ListenUDP(network string, laddr *UDPAddr) (*UDPConn, error) { + switch network { + case "udp", "udp4", "udp6": + default: + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)} + } + if laddr == nil { + laddr = &UDPAddr{} + } + sl := &sysListener{network: network, address: laddr.String()} + c, err := sl.listenUDP(context.Background(), laddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err} + } + return c, nil +} + +// ListenMulticastUDP acts like ListenPacket for UDP networks but +// takes a group address on a specific network interface. +// +// The network must be a UDP network name; see func Dial for details. +// +// ListenMulticastUDP listens on all available IP addresses of the +// local system including the group, multicast IP address. +// If ifi is nil, ListenMulticastUDP uses the system-assigned +// multicast interface, although this is not recommended because the +// assignment depends on platforms and sometimes it might require +// routing configuration. +// If the Port field of gaddr is 0, a port number is automatically +// chosen. +// +// ListenMulticastUDP is just for convenience of simple, small +// applications. There are golang.org/x/net/ipv4 and +// golang.org/x/net/ipv6 packages for general purpose uses. +// +// Note that ListenMulticastUDP will set the IP_MULTICAST_LOOP socket option +// to 0 under IPPROTO_IP, to disable loopback of multicast packets. +func ListenMulticastUDP(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) { + switch network { + case "udp", "udp4", "udp6": + default: + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: UnknownNetworkError(network)} + } + if gaddr == nil || gaddr.IP == nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: errMissingAddress} + } + sl := &sysListener{network: network, address: gaddr.String()} + c, err := sl.listenMulticastUDP(context.Background(), ifi, gaddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: err} + } + return c, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/udpsock_plan9.go b/platform/dbops/binaries/go/go/src/net/udpsock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..732a3b07eec0b42792f5155226f967e072c25464 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/udpsock_plan9.go @@ -0,0 +1,182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "errors" + "net/netip" + "os" + "syscall" +) + +func (c *UDPConn) readFrom(b []byte, addr *UDPAddr) (int, *UDPAddr, error) { + buf := make([]byte, udpHeaderSize+len(b)) + m, err := c.fd.Read(buf) + if err != nil { + return 0, nil, err + } + if m < udpHeaderSize { + return 0, nil, errors.New("short read reading UDP header") + } + buf = buf[:m] + + h, buf := unmarshalUDPHeader(buf) + n := copy(b, buf) + *addr = UDPAddr{IP: h.raddr, Port: int(h.rport)} + return n, addr, nil +} + +func (c *UDPConn) readFromAddrPort(b []byte) (int, netip.AddrPort, error) { + // TODO: optimize. The equivalent code on posix is alloc-free. + buf := make([]byte, udpHeaderSize+len(b)) + m, err := c.fd.Read(buf) + if err != nil { + return 0, netip.AddrPort{}, err + } + if m < udpHeaderSize { + return 0, netip.AddrPort{}, errors.New("short read reading UDP header") + } + buf = buf[:m] + + h, buf := unmarshalUDPHeader(buf) + n := copy(b, buf) + ip, _ := netip.AddrFromSlice(h.raddr) + addr := netip.AddrPortFrom(ip, h.rport) + return n, addr, nil +} + +func (c *UDPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) { + return 0, 0, 0, netip.AddrPort{}, syscall.EPLAN9 +} + +func (c *UDPConn) writeTo(b []byte, addr *UDPAddr) (int, error) { + if addr == nil { + return 0, errMissingAddress + } + h := new(udpHeader) + h.raddr = addr.IP.To16() + h.laddr = c.fd.laddr.(*UDPAddr).IP.To16() + h.ifcaddr = IPv6zero // ignored (receive only) + h.rport = uint16(addr.Port) + h.lport = uint16(c.fd.laddr.(*UDPAddr).Port) + + buf := make([]byte, udpHeaderSize+len(b)) + i := copy(buf, h.Bytes()) + copy(buf[i:], b) + if _, err := c.fd.Write(buf); err != nil { + return 0, err + } + return len(b), nil +} + +func (c *UDPConn) writeToAddrPort(b []byte, addr netip.AddrPort) (int, error) { + return c.writeTo(b, UDPAddrFromAddrPort(addr)) // TODO: optimize instead of allocating +} + +func (c *UDPConn) writeMsg(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) { + return 0, 0, syscall.EPLAN9 +} + +func (c *UDPConn) writeMsgAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) { + return 0, 0, syscall.EPLAN9 +} + +func (sd *sysDialer) dialUDP(ctx context.Context, laddr, raddr *UDPAddr) (*UDPConn, error) { + fd, err := dialPlan9(ctx, sd.network, laddr, raddr) + if err != nil { + return nil, err + } + return newUDPConn(fd), nil +} + +const udpHeaderSize = 16*3 + 2*2 + +type udpHeader struct { + raddr, laddr, ifcaddr IP + rport, lport uint16 +} + +func (h *udpHeader) Bytes() []byte { + b := make([]byte, udpHeaderSize) + i := 0 + i += copy(b[i:i+16], h.raddr) + i += copy(b[i:i+16], h.laddr) + i += copy(b[i:i+16], h.ifcaddr) + b[i], b[i+1], i = byte(h.rport>>8), byte(h.rport), i+2 + b[i], b[i+1], i = byte(h.lport>>8), byte(h.lport), i+2 + return b +} + +func unmarshalUDPHeader(b []byte) (*udpHeader, []byte) { + h := new(udpHeader) + h.raddr, b = IP(b[:16]), b[16:] + h.laddr, b = IP(b[:16]), b[16:] + h.ifcaddr, b = IP(b[:16]), b[16:] + h.rport, b = uint16(b[0])<<8|uint16(b[1]), b[2:] + h.lport, b = uint16(b[0])<<8|uint16(b[1]), b[2:] + return h, b +} + +func (sl *sysListener) listenUDP(ctx context.Context, laddr *UDPAddr) (*UDPConn, error) { + l, err := listenPlan9(ctx, sl.network, laddr) + if err != nil { + return nil, err + } + _, err = l.ctl.WriteString("headers") + if err != nil { + return nil, err + } + l.data, err = os.OpenFile(l.dir+"/data", os.O_RDWR, 0) + if err != nil { + return nil, err + } + fd, err := l.netFD() + return newUDPConn(fd), err +} + +func (sl *sysListener) listenMulticastUDP(ctx context.Context, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) { + // Plan 9 does not like announce command with a multicast address, + // so do not specify an IP address when listening. + l, err := listenPlan9(ctx, sl.network, &UDPAddr{IP: nil, Port: gaddr.Port, Zone: gaddr.Zone}) + if err != nil { + return nil, err + } + _, err = l.ctl.WriteString("headers") + if err != nil { + return nil, err + } + var addrs []Addr + if ifi != nil { + addrs, err = ifi.Addrs() + if err != nil { + return nil, err + } + } else { + addrs, err = InterfaceAddrs() + if err != nil { + return nil, err + } + } + + have4 := gaddr.IP.To4() != nil + for _, addr := range addrs { + if ipnet, ok := addr.(*IPNet); ok && (ipnet.IP.To4() != nil) == have4 { + _, err = l.ctl.WriteString("addmulti " + ipnet.IP.String() + " " + gaddr.IP.String()) + if err != nil { + return nil, &OpError{Op: "addmulti", Net: "", Source: nil, Addr: ipnet, Err: err} + } + } + } + l.data, err = os.OpenFile(l.dir+"/data", os.O_RDWR, 0) + if err != nil { + return nil, err + } + fd, err := l.netFD() + if err != nil { + return nil, err + } + return newUDPConn(fd), nil +} diff --git a/platform/dbops/binaries/go/go/src/net/udpsock_plan9_test.go b/platform/dbops/binaries/go/go/src/net/udpsock_plan9_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3febfcc07414142b3f769aec0e6300c53948b8b2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/udpsock_plan9_test.go @@ -0,0 +1,69 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/testenv" + "runtime" + "testing" +) + +func TestListenMulticastUDP(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + ifcs, err := Interfaces() + if err != nil { + t.Skip(err.Error()) + } + if len(ifcs) == 0 { + t.Skip("no network interfaces found") + } + + var mifc *Interface + for _, ifc := range ifcs { + if ifc.Flags&FlagUp|FlagMulticast != FlagUp|FlagMulticast { + continue + } + mifc = &ifc + break + } + + if mifc == nil { + t.Skipf("no multicast interfaces found") + } + + c1, err := ListenMulticastUDP("udp4", mifc, &UDPAddr{IP: ParseIP("224.0.0.254")}) + if err != nil { + t.Fatalf("multicast not working on %s: %v", runtime.GOOS, err) + } + c1addr := c1.LocalAddr().(*UDPAddr) + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + c2, err := ListenUDP("udp4", &UDPAddr{IP: IPv4zero, Port: 0}) + c2addr := c2.LocalAddr().(*UDPAddr) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + n, err := c2.WriteToUDP([]byte("data"), c1addr) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("got %d; want 4", n) + } + + n, err = c1.WriteToUDP([]byte("data"), c2addr) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("got %d; want 4", n) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/udpsock_posix.go b/platform/dbops/binaries/go/go/src/net/udpsock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..50350598317eb97b12772c40c7d46b3955e01aff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/udpsock_posix.go @@ -0,0 +1,287 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "context" + "net/netip" + "syscall" +) + +func sockaddrToUDP(sa syscall.Sockaddr) Addr { + switch sa := sa.(type) { + case *syscall.SockaddrInet4: + return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port} + case *syscall.SockaddrInet6: + return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))} + } + return nil +} + +func (a *UDPAddr) family() int { + if a == nil || len(a.IP) <= IPv4len { + return syscall.AF_INET + } + if a.IP.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +func (a *UDPAddr) sockaddr(family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return ipToSockaddr(family, a.IP, a.Port, a.Zone) +} + +func (a *UDPAddr) toLocal(net string) sockaddr { + return &UDPAddr{loopbackIP(net), a.Port, a.Zone} +} + +func (c *UDPConn) readFrom(b []byte, addr *UDPAddr) (int, *UDPAddr, error) { + var n int + var err error + switch c.fd.family { + case syscall.AF_INET: + var from syscall.SockaddrInet4 + n, err = c.fd.readFromInet4(b, &from) + if err == nil { + ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 4 bytes + *addr = UDPAddr{IP: ip[:], Port: from.Port} + } + case syscall.AF_INET6: + var from syscall.SockaddrInet6 + n, err = c.fd.readFromInet6(b, &from) + if err == nil { + ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 16 bytes + *addr = UDPAddr{IP: ip[:], Port: from.Port, Zone: zoneCache.name(int(from.ZoneId))} + } + } + if err != nil { + // No sockaddr, so don't return UDPAddr. + addr = nil + } + return n, addr, err +} + +func (c *UDPConn) readFromAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { + var ip netip.Addr + var port int + switch c.fd.family { + case syscall.AF_INET: + var from syscall.SockaddrInet4 + n, err = c.fd.readFromInet4(b, &from) + if err == nil { + ip = netip.AddrFrom4(from.Addr) + port = from.Port + } + case syscall.AF_INET6: + var from syscall.SockaddrInet6 + n, err = c.fd.readFromInet6(b, &from) + if err == nil { + ip = netip.AddrFrom16(from.Addr).WithZone(zoneCache.name(int(from.ZoneId))) + port = from.Port + } + } + if err == nil { + addr = netip.AddrPortFrom(ip, uint16(port)) + } + return n, addr, err +} + +func (c *UDPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) { + switch c.fd.family { + case syscall.AF_INET: + var sa syscall.SockaddrInet4 + n, oobn, flags, err = c.fd.readMsgInet4(b, oob, 0, &sa) + ip := netip.AddrFrom4(sa.Addr) + addr = netip.AddrPortFrom(ip, uint16(sa.Port)) + case syscall.AF_INET6: + var sa syscall.SockaddrInet6 + n, oobn, flags, err = c.fd.readMsgInet6(b, oob, 0, &sa) + ip := netip.AddrFrom16(sa.Addr).WithZone(zoneCache.name(int(sa.ZoneId))) + addr = netip.AddrPortFrom(ip, uint16(sa.Port)) + } + return +} + +func (c *UDPConn) writeTo(b []byte, addr *UDPAddr) (int, error) { + if c.fd.isConnected { + return 0, ErrWriteToConnected + } + if addr == nil { + return 0, errMissingAddress + } + + switch c.fd.family { + case syscall.AF_INET: + sa, err := ipToSockaddrInet4(addr.IP, addr.Port) + if err != nil { + return 0, err + } + return c.fd.writeToInet4(b, &sa) + case syscall.AF_INET6: + sa, err := ipToSockaddrInet6(addr.IP, addr.Port, addr.Zone) + if err != nil { + return 0, err + } + return c.fd.writeToInet6(b, &sa) + default: + return 0, &AddrError{Err: "invalid address family", Addr: addr.IP.String()} + } +} + +func (c *UDPConn) writeToAddrPort(b []byte, addr netip.AddrPort) (int, error) { + if c.fd.isConnected { + return 0, ErrWriteToConnected + } + if !addr.IsValid() { + return 0, errMissingAddress + } + + switch c.fd.family { + case syscall.AF_INET: + sa, err := addrPortToSockaddrInet4(addr) + if err != nil { + return 0, err + } + return c.fd.writeToInet4(b, &sa) + case syscall.AF_INET6: + sa, err := addrPortToSockaddrInet6(addr) + if err != nil { + return 0, err + } + return c.fd.writeToInet6(b, &sa) + default: + return 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()} + } +} + +func (c *UDPConn) writeMsg(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) { + if c.fd.isConnected && addr != nil { + return 0, 0, ErrWriteToConnected + } + if !c.fd.isConnected && addr == nil { + return 0, 0, errMissingAddress + } + sa, err := addr.sockaddr(c.fd.family) + if err != nil { + return 0, 0, err + } + return c.fd.writeMsg(b, oob, sa) +} + +func (c *UDPConn) writeMsgAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) { + if c.fd.isConnected && addr.IsValid() { + return 0, 0, ErrWriteToConnected + } + if !c.fd.isConnected && !addr.IsValid() { + return 0, 0, errMissingAddress + } + + switch c.fd.family { + case syscall.AF_INET: + sa, err := addrPortToSockaddrInet4(addr) + if err != nil { + return 0, 0, err + } + return c.fd.writeMsgInet4(b, oob, &sa) + case syscall.AF_INET6: + sa, err := addrPortToSockaddrInet6(addr) + if err != nil { + return 0, 0, err + } + return c.fd.writeMsgInet6(b, oob, &sa) + default: + return 0, 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()} + } +} + +func (sd *sysDialer) dialUDP(ctx context.Context, laddr, raddr *UDPAddr) (*UDPConn, error) { + ctrlCtxFn := sd.Dialer.ControlContext + if ctrlCtxFn == nil && sd.Dialer.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sd.Dialer.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_DGRAM, 0, "dial", ctrlCtxFn) + if err != nil { + return nil, err + } + return newUDPConn(fd), nil +} + +func (sl *sysListener) listenUDP(ctx context.Context, laddr *UDPAddr) (*UDPConn, error) { + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_DGRAM, 0, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + return newUDPConn(fd), nil +} + +func (sl *sysListener) listenMulticastUDP(ctx context.Context, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) { + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := internetSocket(ctx, sl.network, gaddr, nil, syscall.SOCK_DGRAM, 0, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + c := newUDPConn(fd) + if ip4 := gaddr.IP.To4(); ip4 != nil { + if err := listenIPv4MulticastUDP(c, ifi, ip4); err != nil { + c.Close() + return nil, err + } + } else { + if err := listenIPv6MulticastUDP(c, ifi, gaddr.IP); err != nil { + c.Close() + return nil, err + } + } + return c, nil +} + +func listenIPv4MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error { + if ifi != nil { + if err := setIPv4MulticastInterface(c.fd, ifi); err != nil { + return err + } + } + if err := setIPv4MulticastLoopback(c.fd, false); err != nil { + return err + } + if err := joinIPv4Group(c.fd, ifi, ip); err != nil { + return err + } + return nil +} + +func listenIPv6MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error { + if ifi != nil { + if err := setIPv6MulticastInterface(c.fd, ifi); err != nil { + return err + } + } + if err := setIPv6MulticastLoopback(c.fd, false); err != nil { + return err + } + if err := joinIPv6Group(c.fd, ifi, ip); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/net/udpsock_test.go b/platform/dbops/binaries/go/go/src/net/udpsock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8a21aa737055ede2556a3725e95db8804ca4976b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/udpsock_test.go @@ -0,0 +1,703 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "errors" + "fmt" + "internal/testenv" + "net/netip" + "os" + "reflect" + "runtime" + "testing" + "time" +) + +func BenchmarkUDP6LinkLocalUnicast(b *testing.B) { + testHookUninstaller.Do(uninstallTestHooks) + + if !supportsIPv6() { + b.Skip("IPv6 is not supported") + } + ifi := loopbackInterface() + if ifi == nil { + b.Skip("loopback interface not found") + } + lla := ipv6LinkLocalUnicastAddr(ifi) + if lla == "" { + b.Skip("IPv6 link-local unicast address not found") + } + + c1, err := ListenPacket("udp6", JoinHostPort(lla+"%"+ifi.Name, "0")) + if err != nil { + b.Fatal(err) + } + defer c1.Close() + c2, err := ListenPacket("udp6", JoinHostPort(lla+"%"+ifi.Name, "0")) + if err != nil { + b.Fatal(err) + } + defer c2.Close() + + var buf [1]byte + for i := 0; i < b.N; i++ { + if _, err := c1.WriteTo(buf[:], c2.LocalAddr()); err != nil { + b.Fatal(err) + } + if _, _, err := c2.ReadFrom(buf[:]); err != nil { + b.Fatal(err) + } + } +} + +type resolveUDPAddrTest struct { + network string + litAddrOrName string + addr *UDPAddr + err error +} + +var resolveUDPAddrTests = []resolveUDPAddrTest{ + {"udp", "127.0.0.1:0", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 0}, nil}, + {"udp4", "127.0.0.1:65535", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 65535}, nil}, + + {"udp", "[::1]:0", &UDPAddr{IP: ParseIP("::1"), Port: 0}, nil}, + {"udp6", "[::1]:65535", &UDPAddr{IP: ParseIP("::1"), Port: 65535}, nil}, + + {"udp", "[::1%en0]:1", &UDPAddr{IP: ParseIP("::1"), Port: 1, Zone: "en0"}, nil}, + {"udp6", "[::1%911]:2", &UDPAddr{IP: ParseIP("::1"), Port: 2, Zone: "911"}, nil}, + + {"", "127.0.0.1:0", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 0}, nil}, // Go 1.0 behavior + {"", "[::1]:0", &UDPAddr{IP: ParseIP("::1"), Port: 0}, nil}, // Go 1.0 behavior + + {"udp", ":12345", &UDPAddr{Port: 12345}, nil}, + + {"http", "127.0.0.1:0", nil, UnknownNetworkError("http")}, + + {"udp", "127.0.0.1:domain", &UDPAddr{IP: ParseIP("127.0.0.1"), Port: 53}, nil}, + {"udp", "[::ffff:127.0.0.1]:domain", &UDPAddr{IP: ParseIP("::ffff:127.0.0.1"), Port: 53}, nil}, + {"udp", "[2001:db8::1]:domain", &UDPAddr{IP: ParseIP("2001:db8::1"), Port: 53}, nil}, + {"udp4", "127.0.0.1:domain", &UDPAddr{IP: ParseIP("127.0.0.1"), Port: 53}, nil}, + {"udp4", "[::ffff:127.0.0.1]:domain", &UDPAddr{IP: ParseIP("127.0.0.1"), Port: 53}, nil}, + {"udp6", "[2001:db8::1]:domain", &UDPAddr{IP: ParseIP("2001:db8::1"), Port: 53}, nil}, + + {"udp4", "[2001:db8::1]:domain", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "2001:db8::1"}}, + {"udp6", "127.0.0.1:domain", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "127.0.0.1"}}, + {"udp6", "[::ffff:127.0.0.1]:domain", nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: "::ffff:127.0.0.1"}}, +} + +func TestResolveUDPAddr(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + testHookLookupIP = lookupLocalhost + + for _, tt := range resolveUDPAddrTests { + addr, err := ResolveUDPAddr(tt.network, tt.litAddrOrName) + if !reflect.DeepEqual(addr, tt.addr) || !reflect.DeepEqual(err, tt.err) { + t.Errorf("ResolveUDPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr, err, tt.addr, tt.err) + continue + } + if err == nil { + addr2, err := ResolveUDPAddr(addr.Network(), addr.String()) + if !reflect.DeepEqual(addr2, tt.addr) || err != tt.err { + t.Errorf("(%q, %q): ResolveUDPAddr(%q, %q) = %#v, %v, want %#v, %v", tt.network, tt.litAddrOrName, addr.Network(), addr.String(), addr2, err, tt.addr, tt.err) + } + } + } +} + +func TestWriteToUDP(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + + if !testableNetwork("udp") { + t.Skipf("skipping: udp not supported") + } + + c, err := ListenPacket("udp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testWriteToConn(t, c.LocalAddr().String()) + testWriteToPacketConn(t, c.LocalAddr().String()) +} + +func testWriteToConn(t *testing.T, raddr string) { + c, err := Dial("udp", raddr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + ra, err := ResolveUDPAddr("udp", raddr) + if err != nil { + t.Fatal(err) + } + + b := []byte("CONNECTED-MODE SOCKET") + _, err = c.(*UDPConn).WriteToUDP(b, ra) + if err == nil { + t.Fatal("should fail") + } + if err != nil && err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + _, err = c.(*UDPConn).WriteTo(b, ra) + if err == nil { + t.Fatal("should fail") + } + if err != nil && err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + _, err = c.Write(b) + if err != nil { + t.Fatal(err) + } + _, _, err = c.(*UDPConn).WriteMsgUDP(b, nil, ra) + if err == nil { + t.Fatal("should fail") + } + if err != nil && err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + _, _, err = c.(*UDPConn).WriteMsgUDP(b, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func testWriteToPacketConn(t *testing.T, raddr string) { + c, err := ListenPacket("udp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + ra, err := ResolveUDPAddr("udp", raddr) + if err != nil { + t.Fatal(err) + } + + b := []byte("UNCONNECTED-MODE SOCKET") + _, err = c.(*UDPConn).WriteToUDP(b, ra) + if err != nil { + t.Fatal(err) + } + _, err = c.WriteTo(b, ra) + if err != nil { + t.Fatal(err) + } + _, err = c.(*UDPConn).Write(b) + if err == nil { + t.Fatal("should fail") + } + _, _, err = c.(*UDPConn).WriteMsgUDP(b, nil, nil) + if err == nil { + t.Fatal("should fail") + } + if err != nil && err.(*OpError).Err != errMissingAddress { + t.Fatalf("should fail as errMissingAddress: %v", err) + } + _, _, err = c.(*UDPConn).WriteMsgUDP(b, nil, ra) + if err != nil { + t.Fatal(err) + } +} + +var udpConnLocalNameTests = []struct { + net string + laddr *UDPAddr +}{ + {"udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}}, + {"udp4", &UDPAddr{}}, + {"udp4", nil}, +} + +func TestUDPConnLocalName(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + for _, tt := range udpConnLocalNameTests { + t.Run(fmt.Sprint(tt.laddr), func(t *testing.T) { + if !testableNetwork(tt.net) { + t.Skipf("skipping: %s not available", tt.net) + } + + c, err := ListenUDP(tt.net, tt.laddr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + la := c.LocalAddr() + if a, ok := la.(*UDPAddr); !ok || a.Port == 0 { + t.Fatalf("got %v; expected a proper address with non-zero port number", la) + } + }) + } +} + +func TestUDPConnLocalAndRemoteNames(t *testing.T) { + if !testableNetwork("udp") { + t.Skipf("skipping: udp not available") + } + + for _, laddr := range []string{"", "127.0.0.1:0"} { + c1, err := ListenPacket("udp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + var la *UDPAddr + if laddr != "" { + var err error + if la, err = ResolveUDPAddr("udp", laddr); err != nil { + t.Fatal(err) + } + } + c2, err := DialUDP("udp", la, c1.LocalAddr().(*UDPAddr)) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var connAddrs = [4]struct { + got Addr + ok bool + }{ + {c1.LocalAddr(), true}, + {c1.(*UDPConn).RemoteAddr(), false}, + {c2.LocalAddr(), true}, + {c2.RemoteAddr(), true}, + } + for _, ca := range connAddrs { + if a, ok := ca.got.(*UDPAddr); ok != ca.ok || ok && a.Port == 0 { + t.Fatalf("got %v; expected a proper address with non-zero port number", ca.got) + } + } + } +} + +func TestIPv6LinkLocalUnicastUDP(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + if !supportsIPv6() { + t.Skip("IPv6 is not supported") + } + + for i, tt := range ipv6LinkLocalUnicastUDPTests { + c1, err := ListenPacket(tt.network, tt.address) + if err != nil { + // It might return "LookupHost returned no + // suitable address" error on some platforms. + t.Log(err) + continue + } + ls := (&packetListener{PacketConn: c1}).newLocalServer() + defer ls.teardown() + ch := make(chan error, 1) + handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, ch) } + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + if la, ok := c1.LocalAddr().(*UDPAddr); !ok || !tt.nameLookup && la.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", la) + } + + c2, err := Dial(tt.network, ls.PacketConn.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + if la, ok := c2.LocalAddr().(*UDPAddr); !ok || !tt.nameLookup && la.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", la) + } + if ra, ok := c2.RemoteAddr().(*UDPAddr); !ok || !tt.nameLookup && ra.Zone == "" { + t.Fatalf("got %v; expected a proper address with zone identifier", ra) + } + + if _, err := c2.Write([]byte("UDP OVER IPV6 LINKLOCAL TEST")); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + if _, err := c2.Read(b); err != nil { + t.Fatal(err) + } + + for err := range ch { + t.Errorf("#%d: %v", i, err) + } + } +} + +func TestUDPZeroBytePayload(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + case "darwin", "ios": + testenv.SkipFlaky(t, 29225) + } + if !testableNetwork("udp") { + t.Skipf("skipping: udp not available") + } + + c := newLocalPacketListener(t, "udp") + defer c.Close() + + for _, genericRead := range []bool{false, true} { + n, err := c.WriteTo(nil, c.LocalAddr()) + if err != nil { + t.Fatal(err) + } + if n != 0 { + t.Errorf("got %d; want 0", n) + } + c.SetReadDeadline(time.Now().Add(30 * time.Second)) + var b [1]byte + var name string + if genericRead { + _, err = c.(Conn).Read(b[:]) + name = "Read" + } else { + _, _, err = c.ReadFrom(b[:]) + name = "ReadFrom" + } + if err != nil { + t.Errorf("%s of zero byte packet failed: %v", name, err) + } + } +} + +func TestUDPZeroByteBuffer(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !testableNetwork("udp") { + t.Skipf("skipping: udp not available") + } + + c := newLocalPacketListener(t, "udp") + defer c.Close() + + b := []byte("UDP ZERO BYTE BUFFER TEST") + for _, genericRead := range []bool{false, true} { + n, err := c.WriteTo(b, c.LocalAddr()) + if err != nil { + t.Fatal(err) + } + if n != len(b) { + t.Errorf("got %d; want %d", n, len(b)) + } + c.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + if genericRead { + _, err = c.(Conn).Read(nil) + } else { + _, _, err = c.ReadFrom(nil) + } + switch err { + case nil: // ReadFrom succeeds + default: // Read may timeout, it depends on the platform + if nerr, ok := err.(Error); (!ok || !nerr.Timeout()) && runtime.GOOS != "windows" { // Windows returns WSAEMSGSIZE + t.Fatal(err) + } + } + } +} + +func TestUDPReadSizeError(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !testableNetwork("udp") { + t.Skipf("skipping: udp not available") + } + + c1 := newLocalPacketListener(t, "udp") + defer c1.Close() + + c2, err := Dial("udp", c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + b1 := []byte("READ SIZE ERROR TEST") + for _, genericRead := range []bool{false, true} { + n, err := c2.Write(b1) + if err != nil { + t.Fatal(err) + } + if n != len(b1) { + t.Errorf("got %d; want %d", n, len(b1)) + } + b2 := make([]byte, len(b1)-1) + if genericRead { + n, err = c1.(Conn).Read(b2) + } else { + n, _, err = c1.ReadFrom(b2) + } + if err != nil && runtime.GOOS != "windows" { // Windows returns WSAEMSGSIZE + t.Fatal(err) + } + if n != len(b1)-1 { + t.Fatalf("got %d; want %d", n, len(b1)-1) + } + } +} + +// TestUDPReadTimeout verifies that ReadFromUDP with timeout returns an error +// without data or an address. +func TestUDPReadTimeout(t *testing.T) { + if !testableNetwork("udp4") { + t.Skipf("skipping: udp4 not available") + } + + la, err := ResolveUDPAddr("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + c, err := ListenUDP("udp4", la) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + c.SetDeadline(time.Now()) + b := make([]byte, 1) + n, addr, err := c.ReadFromUDP(b) + if !errors.Is(err, os.ErrDeadlineExceeded) { + t.Errorf("ReadFromUDP got err %v want os.ErrDeadlineExceeded", err) + } + if n != 0 { + t.Errorf("ReadFromUDP got n %d want 0", n) + } + if addr != nil { + t.Errorf("ReadFromUDP got addr %+#v want nil", addr) + } +} + +func TestAllocs(t *testing.T) { + switch runtime.GOOS { + case "plan9", "js", "wasip1": + // These implementations have not been optimized. + t.Skipf("skipping on %v", runtime.GOOS) + } + if !testableNetwork("udp4") { + t.Skipf("skipping: udp4 not available") + } + + // Optimizations are required to remove the allocs. + testenv.SkipIfOptimizationOff(t) + + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + addr := conn.LocalAddr() + addrPort := addr.(*UDPAddr).AddrPort() + buf := make([]byte, 8) + + allocs := testing.AllocsPerRun(1000, func() { + _, _, err := conn.WriteMsgUDPAddrPort(buf, nil, addrPort) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = conn.ReadMsgUDPAddrPort(buf, nil) + if err != nil { + t.Fatal(err) + } + }) + if got := int(allocs); got != 0 { + t.Errorf("WriteMsgUDPAddrPort/ReadMsgUDPAddrPort allocated %d objects", got) + } + + allocs = testing.AllocsPerRun(1000, func() { + _, err := conn.WriteToUDPAddrPort(buf, addrPort) + if err != nil { + t.Fatal(err) + } + _, _, err = conn.ReadFromUDPAddrPort(buf) + if err != nil { + t.Fatal(err) + } + }) + if got := int(allocs); got != 0 { + t.Errorf("WriteToUDPAddrPort/ReadFromUDPAddrPort allocated %d objects", got) + } + + allocs = testing.AllocsPerRun(1000, func() { + _, err := conn.WriteTo(buf, addr) + if err != nil { + t.Fatal(err) + } + _, _, err = conn.ReadFromUDP(buf) + if err != nil { + t.Fatal(err) + } + }) + if got := int(allocs); got != 1 { + t.Errorf("WriteTo/ReadFromUDP allocated %d objects", got) + } +} + +func BenchmarkReadWriteMsgUDPAddrPort(b *testing.B) { + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + addr := conn.LocalAddr().(*UDPAddr).AddrPort() + buf := make([]byte, 8) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _, err := conn.WriteMsgUDPAddrPort(buf, nil, addr) + if err != nil { + b.Fatal(err) + } + _, _, _, _, err = conn.ReadMsgUDPAddrPort(buf, nil) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteToReadFromUDP(b *testing.B) { + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + addr := conn.LocalAddr() + buf := make([]byte, 8) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := conn.WriteTo(buf, addr) + if err != nil { + b.Fatal(err) + } + _, _, err = conn.ReadFromUDP(buf) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteToReadFromUDPAddrPort(b *testing.B) { + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + addr := conn.LocalAddr().(*UDPAddr).AddrPort() + buf := make([]byte, 8) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := conn.WriteToUDPAddrPort(buf, addr) + if err != nil { + b.Fatal(err) + } + _, _, err = conn.ReadFromUDPAddrPort(buf) + if err != nil { + b.Fatal(err) + } + } +} + +func TestUDPIPVersionReadMsg(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("skipping on %v", runtime.GOOS) + } + if !testableNetwork("udp4") { + t.Skipf("skipping: udp4 not available") + } + + conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)}) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + daddr := conn.LocalAddr().(*UDPAddr).AddrPort() + buf := make([]byte, 8) + _, err = conn.WriteToUDPAddrPort(buf, daddr) + if err != nil { + t.Fatal(err) + } + _, _, _, saddr, err := conn.ReadMsgUDPAddrPort(buf, nil) + if err != nil { + t.Fatal(err) + } + if !saddr.Addr().Is4() { + t.Error("returned AddrPort is not IPv4") + } + _, err = conn.WriteToUDPAddrPort(buf, daddr) + if err != nil { + t.Fatal(err) + } + _, _, _, soldaddr, err := conn.ReadMsgUDP(buf, nil) + if err != nil { + t.Fatal(err) + } + if len(soldaddr.IP) != 4 { + t.Error("returned UDPAddr is not IPv4") + } +} + +// TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion verifies that +// WriteMsgUDPAddrPort accepts IPv4, IPv4-mapped IPv6, and IPv6 target addresses +// on a UDPConn listening on "::". +func TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) { + if !testableNetwork("udp4") { + t.Skipf("skipping: udp4 not available") + } + if !testableNetwork("udp6") { + t.Skipf("skipping: udp6 not available") + } + + switch runtime.GOOS { + case "dragonfly", "openbsd": + // DragonflyBSD's IPv6 sockets are always IPv6-only, according to the man page: + // https://www.dragonflybsd.org/cgi/web-man?command=ip6 (search for IPV6_V6ONLY). + // OpenBSD's IPv6 sockets are always IPv6-only, according to the man page: + // https://man.openbsd.org/ip6#IPV6_V6ONLY + t.Skipf("skipping on %v", runtime.GOOS) + } + + conn, err := ListenUDP("udp", nil) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + daddr4 := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), 12345) + daddr4in6 := netip.AddrPortFrom(netip.MustParseAddr("::ffff:127.0.0.1"), 12345) + daddr6 := netip.AddrPortFrom(netip.MustParseAddr("::1"), 12345) + buf := make([]byte, 8) + + _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4) + if err != nil { + t.Fatal(err) + } + + _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6) + if err != nil { + t.Fatal(err) + } + + _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr6) + if err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock.go b/platform/dbops/binaries/go/go/src/net/unixsock.go new file mode 100644 index 0000000000000000000000000000000000000000..821be7bf741972ac50d840a07d4e5962371724a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock.go @@ -0,0 +1,349 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "os" + "sync" + "syscall" + "time" +) + +// BUG(mikio): On JS, WASIP1 and Plan 9, methods and functions related +// to UnixConn and UnixListener are not implemented. + +// BUG(mikio): On Windows, methods and functions related to UnixConn +// and UnixListener don't work for "unixgram" and "unixpacket". + +// UnixAddr represents the address of a Unix domain socket end point. +type UnixAddr struct { + Name string + Net string +} + +// Network returns the address's network name, "unix", "unixgram" or +// "unixpacket". +func (a *UnixAddr) Network() string { + return a.Net +} + +func (a *UnixAddr) String() string { + if a == nil { + return "" + } + return a.Name +} + +func (a *UnixAddr) isWildcard() bool { + return a == nil || a.Name == "" +} + +func (a *UnixAddr) opAddr() Addr { + if a == nil { + return nil + } + return a +} + +// ResolveUnixAddr returns an address of Unix domain socket end point. +// +// The network must be a Unix network name. +// +// See func [Dial] for a description of the network and address +// parameters. +func ResolveUnixAddr(network, address string) (*UnixAddr, error) { + switch network { + case "unix", "unixgram", "unixpacket": + return &UnixAddr{Name: address, Net: network}, nil + default: + return nil, UnknownNetworkError(network) + } +} + +// UnixConn is an implementation of the [Conn] interface for connections +// to Unix domain sockets. +type UnixConn struct { + conn +} + +// SyscallConn returns a raw network connection. +// This implements the [syscall.Conn] interface. +func (c *UnixConn) SyscallConn() (syscall.RawConn, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + return newRawConn(c.fd), nil +} + +// CloseRead shuts down the reading side of the Unix domain connection. +// Most callers should just use Close. +func (c *UnixConn) CloseRead() error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.closeRead(); err != nil { + return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// CloseWrite shuts down the writing side of the Unix domain connection. +// Most callers should just use Close. +func (c *UnixConn) CloseWrite() error { + if !c.ok() { + return syscall.EINVAL + } + if err := c.fd.closeWrite(); err != nil { + return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return nil +} + +// ReadFromUnix acts like [UnixConn.ReadFrom] but returns a [UnixAddr]. +func (c *UnixConn) ReadFromUnix(b []byte) (int, *UnixAddr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + n, addr, err := c.readFrom(b) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, addr, err +} + +// ReadFrom implements the [PacketConn] ReadFrom method. +func (c *UnixConn) ReadFrom(b []byte) (int, Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + n, addr, err := c.readFrom(b) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if addr == nil { + return n, nil, err + } + return n, addr, err +} + +// ReadMsgUnix reads a message from c, copying the payload into b and +// the associated out-of-band data into oob. It returns the number of +// bytes copied into b, the number of bytes copied into oob, the flags +// that were set on the message and the source address of the message. +// +// Note that if len(b) == 0 and len(oob) > 0, this function will still +// read (and discard) 1 byte from the connection. +func (c *UnixConn) ReadMsgUnix(b, oob []byte) (n, oobn, flags int, addr *UnixAddr, err error) { + if !c.ok() { + return 0, 0, 0, nil, syscall.EINVAL + } + n, oobn, flags, addr, err = c.readMsg(b, oob) + if err != nil { + err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return +} + +// WriteToUnix acts like [UnixConn.WriteTo] but takes a [UnixAddr]. +func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeTo(b, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return n, err +} + +// WriteTo implements the [PacketConn] WriteTo method. +func (c *UnixConn) WriteTo(b []byte, addr Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + a, ok := addr.(*UnixAddr) + if !ok { + return 0, &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr, Err: syscall.EINVAL} + } + n, err := c.writeTo(b, a) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: a.opAddr(), Err: err} + } + return n, err +} + +// WriteMsgUnix writes a message to addr via c, copying the payload +// from b and the associated out-of-band data from oob. It returns the +// number of payload and out-of-band bytes written. +// +// Note that if len(b) == 0 and len(oob) > 0, this function will still +// write 1 byte to the connection. +func (c *UnixConn) WriteMsgUnix(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) { + if !c.ok() { + return 0, 0, syscall.EINVAL + } + n, oobn, err = c.writeMsg(b, oob, addr) + if err != nil { + err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err} + } + return +} + +func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} } + +// DialUnix acts like [Dial] for Unix networks. +// +// The network must be a Unix network name; see func Dial for details. +// +// If laddr is non-nil, it is used as the local address for the +// connection. +func DialUnix(network string, laddr, raddr *UnixAddr) (*UnixConn, error) { + switch network { + case "unix", "unixgram", "unixpacket": + default: + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)} + } + sd := &sysDialer{network: network, address: raddr.String()} + c, err := sd.dialUnix(context.Background(), laddr, raddr) + if err != nil { + return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err} + } + return c, nil +} + +// UnixListener is a Unix domain socket listener. Clients should +// typically use variables of type [Listener] instead of assuming Unix +// domain sockets. +type UnixListener struct { + fd *netFD + path string + unlink bool + unlinkOnce sync.Once +} + +func (ln *UnixListener) ok() bool { return ln != nil && ln.fd != nil } + +// SyscallConn returns a raw network connection. +// This implements the [syscall.Conn] interface. +// +// The returned RawConn only supports calling Control. Read and +// Write return an error. +func (l *UnixListener) SyscallConn() (syscall.RawConn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + return newRawListener(l.fd), nil +} + +// AcceptUnix accepts the next incoming call and returns the new +// connection. +func (l *UnixListener) AcceptUnix() (*UnixConn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + c, err := l.accept() + if err != nil { + return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return c, nil +} + +// Accept implements the Accept method in the [Listener] interface. +// Returned connections will be of type [*UnixConn]. +func (l *UnixListener) Accept() (Conn, error) { + if !l.ok() { + return nil, syscall.EINVAL + } + c, err := l.accept() + if err != nil { + return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return c, nil +} + +// Close stops listening on the Unix address. Already accepted +// connections are not closed. +func (l *UnixListener) Close() error { + if !l.ok() { + return syscall.EINVAL + } + if err := l.close(); err != nil { + return &OpError{Op: "close", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return nil +} + +// Addr returns the listener's network address. +// The Addr returned is shared by all invocations of Addr, so +// do not modify it. +func (l *UnixListener) Addr() Addr { return l.fd.laddr } + +// SetDeadline sets the deadline associated with the listener. +// A zero time value disables the deadline. +func (l *UnixListener) SetDeadline(t time.Time) error { + if !l.ok() { + return syscall.EINVAL + } + return l.fd.SetDeadline(t) +} + +// File returns a copy of the underlying [os.File]. +// It is the caller's responsibility to close f when finished. +// Closing l does not affect f, and closing f does not affect l. +// +// The returned os.File's file descriptor is different from the +// connection's. Attempting to change properties of the original +// using this duplicate may or may not have the desired effect. +func (l *UnixListener) File() (f *os.File, err error) { + if !l.ok() { + return nil, syscall.EINVAL + } + f, err = l.file() + if err != nil { + err = &OpError{Op: "file", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err} + } + return +} + +// ListenUnix acts like [Listen] for Unix networks. +// +// The network must be "unix" or "unixpacket". +func ListenUnix(network string, laddr *UnixAddr) (*UnixListener, error) { + switch network { + case "unix", "unixpacket": + default: + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)} + } + if laddr == nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: errMissingAddress} + } + sl := &sysListener{network: network, address: laddr.String()} + ln, err := sl.listenUnix(context.Background(), laddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err} + } + return ln, nil +} + +// ListenUnixgram acts like [ListenPacket] for Unix networks. +// +// The network must be "unixgram". +func ListenUnixgram(network string, laddr *UnixAddr) (*UnixConn, error) { + switch network { + case "unixgram": + default: + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)} + } + if laddr == nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: errMissingAddress} + } + sl := &sysListener{network: network, address: laddr.String()} + c, err := sl.listenUnixgram(context.Background(), laddr) + if err != nil { + return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err} + } + return c, nil +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_linux_test.go b/platform/dbops/binaries/go/go/src/net/unixsock_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d04007cef38b2c43f820bc7d26e4483b87ed4bf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_linux_test.go @@ -0,0 +1,104 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "reflect" + "syscall" + "testing" + "time" +) + +func TestUnixgramAutobind(t *testing.T) { + laddr := &UnixAddr{Name: "", Net: "unixgram"} + c1, err := ListenUnixgram("unixgram", laddr) + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + // retrieve the autobind address + autoAddr := c1.LocalAddr().(*UnixAddr) + if len(autoAddr.Name) <= 1 { + t.Fatalf("invalid autobind address: %v", autoAddr) + } + if autoAddr.Name[0] != '@' { + t.Fatalf("invalid autobind address: %v", autoAddr) + } + + c2, err := DialUnix("unixgram", nil, autoAddr) + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + if !reflect.DeepEqual(c1.LocalAddr(), c2.RemoteAddr()) { + t.Fatalf("expected autobind address %v, got %v", c1.LocalAddr(), c2.RemoteAddr()) + } +} + +func TestUnixAutobindClose(t *testing.T) { + laddr := &UnixAddr{Name: "", Net: "unix"} + ln, err := ListenUnix("unix", laddr) + if err != nil { + t.Fatal(err) + } + ln.Close() +} + +func TestUnixgramLinuxAbstractLongName(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("abstract unix socket long name test") + } + + // Create an abstract socket name whose length is exactly + // the maximum RawSockkaddrUnix Path len + rsu := syscall.RawSockaddrUnix{} + addrBytes := make([]byte, len(rsu.Path)) + copy(addrBytes, "@abstract_test") + addr := string(addrBytes) + + la, err := ResolveUnixAddr("unixgram", addr) + if err != nil { + t.Fatal(err) + } + c, err := ListenUnixgram("unixgram", la) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + off := make(chan bool) + data := [5]byte{1, 2, 3, 4, 5} + go func() { + defer func() { off <- true }() + s, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0) + if err != nil { + t.Error(err) + return + } + defer syscall.Close(s) + rsa := &syscall.SockaddrUnix{Name: addr} + if err := syscall.Sendto(s, data[:], 0, rsa); err != nil { + t.Error(err) + return + } + }() + + <-off + b := make([]byte, 64) + c.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + n, from, err := c.ReadFrom(b) + if err != nil { + t.Fatal(err) + } + if from != nil { + t.Fatalf("unexpected peer address: %v", from) + } + if !bytes.Equal(b[:n], data[:]) { + t.Fatalf("got %v; want %v", b[:n], data[:]) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_plan9.go b/platform/dbops/binaries/go/go/src/net/unixsock_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..6ebd4d7d3b46ca3d74bbd619375e54286b7ad1a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_plan9.go @@ -0,0 +1,51 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "context" + "os" + "syscall" +) + +func (c *UnixConn) readFrom(b []byte) (int, *UnixAddr, error) { + return 0, nil, syscall.EPLAN9 +} + +func (c *UnixConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *UnixAddr, err error) { + return 0, 0, 0, nil, syscall.EPLAN9 +} + +func (c *UnixConn) writeTo(b []byte, addr *UnixAddr) (int, error) { + return 0, syscall.EPLAN9 +} + +func (c *UnixConn) writeMsg(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) { + return 0, 0, syscall.EPLAN9 +} + +func (sd *sysDialer) dialUnix(ctx context.Context, laddr, raddr *UnixAddr) (*UnixConn, error) { + return nil, syscall.EPLAN9 +} + +func (ln *UnixListener) accept() (*UnixConn, error) { + return nil, syscall.EPLAN9 +} + +func (ln *UnixListener) close() error { + return syscall.EPLAN9 +} + +func (ln *UnixListener) file() (*os.File, error) { + return nil, syscall.EPLAN9 +} + +func (sl *sysListener) listenUnix(ctx context.Context, laddr *UnixAddr) (*UnixListener, error) { + return nil, syscall.EPLAN9 +} + +func (sl *sysListener) listenUnixgram(ctx context.Context, laddr *UnixAddr) (*UnixConn, error) { + return nil, syscall.EPLAN9 +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_posix.go b/platform/dbops/binaries/go/go/src/net/unixsock_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..f6c8e8f0b0e74baa975ea1bbf40ba33e32144159 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_posix.go @@ -0,0 +1,245 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 || windows + +package net + +import ( + "context" + "errors" + "os" + "syscall" +) + +func unixSocket(ctx context.Context, net string, laddr, raddr sockaddr, mode string, ctxCtrlFn func(context.Context, string, string, syscall.RawConn) error) (*netFD, error) { + var sotype int + switch net { + case "unix": + sotype = syscall.SOCK_STREAM + case "unixgram": + sotype = syscall.SOCK_DGRAM + case "unixpacket": + sotype = syscall.SOCK_SEQPACKET + default: + return nil, UnknownNetworkError(net) + } + + switch mode { + case "dial": + if laddr != nil && laddr.isWildcard() { + laddr = nil + } + if raddr != nil && raddr.isWildcard() { + raddr = nil + } + if raddr == nil && (sotype != syscall.SOCK_DGRAM || laddr == nil) { + return nil, errMissingAddress + } + case "listen": + default: + return nil, errors.New("unknown mode: " + mode) + } + + fd, err := socket(ctx, net, syscall.AF_UNIX, sotype, 0, false, laddr, raddr, ctxCtrlFn) + if err != nil { + return nil, err + } + return fd, nil +} + +func sockaddrToUnix(sa syscall.Sockaddr) Addr { + if s, ok := sa.(*syscall.SockaddrUnix); ok { + return &UnixAddr{Name: s.Name, Net: "unix"} + } + return nil +} + +func sockaddrToUnixgram(sa syscall.Sockaddr) Addr { + if s, ok := sa.(*syscall.SockaddrUnix); ok { + return &UnixAddr{Name: s.Name, Net: "unixgram"} + } + return nil +} + +func sockaddrToUnixpacket(sa syscall.Sockaddr) Addr { + if s, ok := sa.(*syscall.SockaddrUnix); ok { + return &UnixAddr{Name: s.Name, Net: "unixpacket"} + } + return nil +} + +func sotypeToNet(sotype int) string { + switch sotype { + case syscall.SOCK_STREAM: + return "unix" + case syscall.SOCK_DGRAM: + return "unixgram" + case syscall.SOCK_SEQPACKET: + return "unixpacket" + default: + panic("sotypeToNet unknown socket type") + } +} + +func (a *UnixAddr) family() int { + return syscall.AF_UNIX +} + +func (a *UnixAddr) sockaddr(family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return &syscall.SockaddrUnix{Name: a.Name}, nil +} + +func (a *UnixAddr) toLocal(net string) sockaddr { + return a +} + +func (c *UnixConn) readFrom(b []byte) (int, *UnixAddr, error) { + var addr *UnixAddr + n, sa, err := c.fd.readFrom(b) + switch sa := sa.(type) { + case *syscall.SockaddrUnix: + if sa.Name != "" { + addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)} + } + } + return n, addr, err +} + +func (c *UnixConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *UnixAddr, err error) { + var sa syscall.Sockaddr + n, oobn, flags, sa, err = c.fd.readMsg(b, oob, readMsgFlags) + if readMsgFlags == 0 && err == nil && oobn > 0 { + setReadMsgCloseOnExec(oob[:oobn]) + } + + switch sa := sa.(type) { + case *syscall.SockaddrUnix: + if sa.Name != "" { + addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)} + } + } + return +} + +func (c *UnixConn) writeTo(b []byte, addr *UnixAddr) (int, error) { + if c.fd.isConnected { + return 0, ErrWriteToConnected + } + if addr == nil { + return 0, errMissingAddress + } + if addr.Net != sotypeToNet(c.fd.sotype) { + return 0, syscall.EAFNOSUPPORT + } + sa := &syscall.SockaddrUnix{Name: addr.Name} + return c.fd.writeTo(b, sa) +} + +func (c *UnixConn) writeMsg(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) { + if c.fd.sotype == syscall.SOCK_DGRAM && c.fd.isConnected { + return 0, 0, ErrWriteToConnected + } + var sa syscall.Sockaddr + if addr != nil { + if addr.Net != sotypeToNet(c.fd.sotype) { + return 0, 0, syscall.EAFNOSUPPORT + } + sa = &syscall.SockaddrUnix{Name: addr.Name} + } + return c.fd.writeMsg(b, oob, sa) +} + +func (sd *sysDialer) dialUnix(ctx context.Context, laddr, raddr *UnixAddr) (*UnixConn, error) { + ctrlCtxFn := sd.Dialer.ControlContext + if ctrlCtxFn == nil && sd.Dialer.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sd.Dialer.Control(network, address, c) + } + } + fd, err := unixSocket(ctx, sd.network, laddr, raddr, "dial", ctrlCtxFn) + if err != nil { + return nil, err + } + return newUnixConn(fd), nil +} + +func (ln *UnixListener) accept() (*UnixConn, error) { + fd, err := ln.fd.accept() + if err != nil { + return nil, err + } + return newUnixConn(fd), nil +} + +func (ln *UnixListener) close() error { + // The operating system doesn't clean up + // the file that announcing created, so + // we have to clean it up ourselves. + // There's a race here--we can't know for + // sure whether someone else has come along + // and replaced our socket name already-- + // but this sequence (remove then close) + // is at least compatible with the auto-remove + // sequence in ListenUnix. It's only non-Go + // programs that can mess us up. + // Even if there are racy calls to Close, we want to unlink only for the first one. + ln.unlinkOnce.Do(func() { + if ln.path[0] != '@' && ln.unlink { + syscall.Unlink(ln.path) + } + }) + return ln.fd.Close() +} + +func (ln *UnixListener) file() (*os.File, error) { + f, err := ln.fd.dup() + if err != nil { + return nil, err + } + return f, nil +} + +// SetUnlinkOnClose sets whether the underlying socket file should be removed +// from the file system when the listener is closed. +// +// The default behavior is to unlink the socket file only when package net created it. +// That is, when the listener and the underlying socket file were created by a call to +// Listen or ListenUnix, then by default closing the listener will remove the socket file. +// but if the listener was created by a call to FileListener to use an already existing +// socket file, then by default closing the listener will not remove the socket file. +func (l *UnixListener) SetUnlinkOnClose(unlink bool) { + l.unlink = unlink +} + +func (sl *sysListener) listenUnix(ctx context.Context, laddr *UnixAddr) (*UnixListener, error) { + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + return &UnixListener{fd: fd, path: fd.laddr.String(), unlink: true}, nil +} + +func (sl *sysListener) listenUnixgram(ctx context.Context, laddr *UnixAddr) (*UnixConn, error) { + var ctrlCtxFn func(cxt context.Context, network, address string, c syscall.RawConn) error + if sl.ListenConfig.Control != nil { + ctrlCtxFn = func(cxt context.Context, network, address string, c syscall.RawConn) error { + return sl.ListenConfig.Control(network, address, c) + } + } + fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", ctrlCtxFn) + if err != nil { + return nil, err + } + return newUnixConn(fd), nil +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cloexec.go b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cloexec.go new file mode 100644 index 0000000000000000000000000000000000000000..fa4fd7d9331f422bdad34e3efbb24786df5739f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cloexec.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || freebsd || solaris + +package net + +import "syscall" + +const readMsgFlags = 0 + +func setReadMsgCloseOnExec(oob []byte) { + scms, err := syscall.ParseSocketControlMessage(oob) + if err != nil { + return + } + + for _, scm := range scms { + if scm.Header.Level == syscall.SOL_SOCKET && scm.Header.Type == syscall.SCM_RIGHTS { + fds, err := syscall.ParseUnixRights(&scm) + if err != nil { + continue + } + for _, fd := range fds { + syscall.CloseOnExec(fd) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cmsg_cloexec.go b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cmsg_cloexec.go new file mode 100644 index 0000000000000000000000000000000000000000..6b0de875ad5c3f1cca2d50c84c7af60c5f23087f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_cmsg_cloexec.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || linux || netbsd || openbsd + +package net + +import "syscall" + +const readMsgFlags = syscall.MSG_CMSG_CLOEXEC + +func setReadMsgCloseOnExec(oob []byte) {} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_other.go b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_other.go new file mode 100644 index 0000000000000000000000000000000000000000..4bef3ee71d527c76d500e0b0714b26d1a7981298 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_other.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 || windows + +package net + +const readMsgFlags = 0 + +func setReadMsgCloseOnExec(oob []byte) {} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_test.go b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2d89dc4936a12904183ac85dbaa0ff560bfb9975 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_readmsg_test.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "internal/syscall/unix" + "os" + "syscall" + "testing" + "time" +) + +func TestUnixConnReadMsgUnixSCMRightsCloseOnExec(t *testing.T) { + if !testableNetwork("unix") { + t.Skip("not unix system") + } + + scmFile, err := os.Open(os.DevNull) + if err != nil { + t.Fatalf("file open: %v", err) + } + defer scmFile.Close() + + rights := syscall.UnixRights(int(scmFile.Fd())) + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + + writeFile := os.NewFile(uintptr(fds[0]), "write-socket") + defer writeFile.Close() + readFile := os.NewFile(uintptr(fds[1]), "read-socket") + defer readFile.Close() + + cw, err := FileConn(writeFile) + if err != nil { + t.Fatalf("FileConn: %v", err) + } + defer cw.Close() + cr, err := FileConn(readFile) + if err != nil { + t.Fatalf("FileConn: %v", err) + } + defer cr.Close() + + ucw, ok := cw.(*UnixConn) + if !ok { + t.Fatalf("got %T; want UnixConn", cw) + } + ucr, ok := cr.(*UnixConn) + if !ok { + t.Fatalf("got %T; want UnixConn", cr) + } + + oob := make([]byte, syscall.CmsgSpace(4)) + err = ucw.SetWriteDeadline(time.Now().Add(5 * time.Second)) + if err != nil { + t.Fatalf("Can't set unix connection timeout: %v", err) + } + _, _, err = ucw.WriteMsgUnix(nil, rights, nil) + if err != nil { + t.Fatalf("UnixConn readMsg: %v", err) + } + err = ucr.SetReadDeadline(time.Now().Add(5 * time.Second)) + if err != nil { + t.Fatalf("Can't set unix connection timeout: %v", err) + } + _, oobn, _, _, err := ucr.ReadMsgUnix(nil, oob) + if err != nil { + t.Fatalf("UnixConn readMsg: %v", err) + } + + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + t.Fatalf("got scms = %#v; expected 1 SocketControlMessage", scms) + } + scm := scms[0] + gotFDs, err := syscall.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("syscall.ParseUnixRights: %v", err) + } + if len(gotFDs) != 1 { + t.Fatalf("got FDs %#v: wanted only 1 fd", gotFDs) + } + defer func() { + if err := syscall.Close(gotFDs[0]); err != nil { + t.Fatalf("fail to close gotFDs: %v", err) + } + }() + + flags, err := unix.Fcntl(gotFDs[0], syscall.F_GETFD, 0) + if err != nil { + t.Fatalf("Can't get flags of fd:%#v, with err:%v", gotFDs[0], err) + } + if flags&syscall.FD_CLOEXEC == 0 { + t.Fatalf("got flags %#x, want %#x (FD_CLOEXEC) set", flags, syscall.FD_CLOEXEC) + } +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_test.go b/platform/dbops/binaries/go/go/src/net/unixsock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6906ecc0466a9d784eb431cbe76c0189e330063a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_test.go @@ -0,0 +1,472 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 && !windows + +package net + +import ( + "bytes" + "internal/testenv" + "os" + "reflect" + "runtime" + "syscall" + "testing" + "time" +) + +func TestReadUnixgramWithUnnamedSocket(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf("skipping: syscall.Socket not implemented on %s", runtime.GOOS) + } + if runtime.GOOS == "openbsd" { + testenv.SkipFlaky(t, 15157) + } + + addr := testUnixAddr(t) + la, err := ResolveUnixAddr("unixgram", addr) + if err != nil { + t.Fatal(err) + } + c, err := ListenUnixgram("unixgram", la) + if err != nil { + t.Fatal(err) + } + defer func() { + c.Close() + os.Remove(addr) + }() + + off := make(chan bool) + data := [5]byte{1, 2, 3, 4, 5} + go func() { + defer func() { off <- true }() + s, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0) + if err != nil { + t.Error(err) + return + } + defer syscall.Close(s) + rsa := &syscall.SockaddrUnix{Name: addr} + if err := syscall.Sendto(s, data[:], 0, rsa); err != nil { + t.Error(err) + return + } + }() + + <-off + b := make([]byte, 64) + c.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + n, from, err := c.ReadFrom(b) + if err != nil { + t.Fatal(err) + } + if from != nil { + t.Fatalf("unexpected peer address: %v", from) + } + if !bytes.Equal(b[:n], data[:]) { + t.Fatalf("got %v; want %v", b[:n], data[:]) + } +} + +func TestUnixgramZeroBytePayload(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + + c1 := newLocalPacketListener(t, "unixgram") + defer os.Remove(c1.LocalAddr().String()) + defer c1.Close() + + c2, err := Dial("unixgram", c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer os.Remove(c2.LocalAddr().String()) + defer c2.Close() + + for _, genericRead := range []bool{false, true} { + n, err := c2.Write(nil) + if err != nil { + t.Fatal(err) + } + if n != 0 { + t.Errorf("got %d; want 0", n) + } + c1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + var b [1]byte + var peer Addr + if genericRead { + _, err = c1.(Conn).Read(b[:]) + } else { + _, peer, err = c1.ReadFrom(b[:]) + } + switch err { + case nil: // ReadFrom succeeds + if peer != nil { // peer is connected-mode + t.Fatalf("unexpected peer address: %v", peer) + } + default: // Read may timeout, it depends on the platform + if !isDeadlineExceeded(err) { + t.Fatal(err) + } + } + } +} + +func TestUnixgramZeroByteBuffer(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + // issue 4352: Recvfrom failed with "address family not + // supported by protocol family" if zero-length buffer provided + + c1 := newLocalPacketListener(t, "unixgram") + defer os.Remove(c1.LocalAddr().String()) + defer c1.Close() + + c2, err := Dial("unixgram", c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer os.Remove(c2.LocalAddr().String()) + defer c2.Close() + + b := []byte("UNIXGRAM ZERO BYTE BUFFER TEST") + for _, genericRead := range []bool{false, true} { + n, err := c2.Write(b) + if err != nil { + t.Fatal(err) + } + if n != len(b) { + t.Errorf("got %d; want %d", n, len(b)) + } + c1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + var peer Addr + if genericRead { + _, err = c1.(Conn).Read(nil) + } else { + _, peer, err = c1.ReadFrom(nil) + } + switch err { + case nil: // ReadFrom succeeds + if peer != nil { // peer is connected-mode + t.Fatalf("unexpected peer address: %v", peer) + } + default: // Read may timeout, it depends on the platform + if !isDeadlineExceeded(err) { + t.Fatal(err) + } + } + } +} + +func TestUnixgramWrite(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + + addr := testUnixAddr(t) + laddr, err := ResolveUnixAddr("unixgram", addr) + if err != nil { + t.Fatal(err) + } + c, err := ListenPacket("unixgram", addr) + if err != nil { + t.Fatal(err) + } + defer os.Remove(addr) + defer c.Close() + + testUnixgramWriteConn(t, laddr) + testUnixgramWritePacketConn(t, laddr) +} + +func testUnixgramWriteConn(t *testing.T, raddr *UnixAddr) { + c, err := Dial("unixgram", raddr.String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + b := []byte("CONNECTED-MODE SOCKET") + if _, err := c.(*UnixConn).WriteToUnix(b, raddr); err == nil { + t.Fatal("should fail") + } else if err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + if _, err = c.(*UnixConn).WriteTo(b, raddr); err == nil { + t.Fatal("should fail") + } else if err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + if _, _, err = c.(*UnixConn).WriteMsgUnix(b, nil, raddr); err == nil { + t.Fatal("should fail") + } else if err.(*OpError).Err != ErrWriteToConnected { + t.Fatalf("should fail as ErrWriteToConnected: %v", err) + } + if _, err := c.Write(b); err != nil { + t.Fatal(err) + } +} + +func testUnixgramWritePacketConn(t *testing.T, raddr *UnixAddr) { + addr := testUnixAddr(t) + c, err := ListenPacket("unixgram", addr) + if err != nil { + t.Fatal(err) + } + defer os.Remove(addr) + defer c.Close() + + b := []byte("UNCONNECTED-MODE SOCKET") + if _, err := c.(*UnixConn).WriteToUnix(b, raddr); err != nil { + t.Fatal(err) + } + if _, err := c.WriteTo(b, raddr); err != nil { + t.Fatal(err) + } + if _, _, err := c.(*UnixConn).WriteMsgUnix(b, nil, raddr); err != nil { + t.Fatal(err) + } + if _, err := c.(*UnixConn).Write(b); err == nil { + t.Fatal("should fail") + } +} + +func TestUnixConnLocalAndRemoteNames(t *testing.T) { + if !testableNetwork("unix") { + t.Skip("unix test") + } + + handler := func(ls *localServer, ln Listener) {} + for _, laddr := range []string{"", testUnixAddr(t)} { + laddr := laddr + taddr := testUnixAddr(t) + ta, err := ResolveUnixAddr("unix", taddr) + if err != nil { + t.Fatal(err) + } + ln, err := ListenUnix("unix", ta) + if err != nil { + t.Fatal(err) + } + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + la, err := ResolveUnixAddr("unix", laddr) + if err != nil { + t.Fatal(err) + } + c, err := DialUnix("unix", la, ta) + if err != nil { + t.Fatal(err) + } + defer func() { + c.Close() + if la != nil { + defer os.Remove(laddr) + } + }() + if _, err := c.Write([]byte("UNIXCONN LOCAL AND REMOTE NAME TEST")); err != nil { + t.Fatal(err) + } + + switch runtime.GOOS { + case "android", "linux": + if laddr == "" { + laddr = "@" // autobind feature + } + } + var connAddrs = [3]struct{ got, want Addr }{ + {ln.Addr(), ta}, + {c.LocalAddr(), &UnixAddr{Name: laddr, Net: "unix"}}, + {c.RemoteAddr(), ta}, + } + for _, ca := range connAddrs { + if !reflect.DeepEqual(ca.got, ca.want) { + t.Fatalf("got %#v, expected %#v", ca.got, ca.want) + } + } + } +} + +func TestUnixgramConnLocalAndRemoteNames(t *testing.T) { + if !testableNetwork("unixgram") { + t.Skip("unixgram test") + } + + for _, laddr := range []string{"", testUnixAddr(t)} { + laddr := laddr + taddr := testUnixAddr(t) + ta, err := ResolveUnixAddr("unixgram", taddr) + if err != nil { + t.Fatal(err) + } + c1, err := ListenUnixgram("unixgram", ta) + if err != nil { + t.Fatal(err) + } + defer func() { + c1.Close() + os.Remove(taddr) + }() + + var la *UnixAddr + if laddr != "" { + if la, err = ResolveUnixAddr("unixgram", laddr); err != nil { + t.Fatal(err) + } + } + c2, err := DialUnix("unixgram", la, ta) + if err != nil { + t.Fatal(err) + } + defer func() { + c2.Close() + if la != nil { + defer os.Remove(laddr) + } + }() + + switch runtime.GOOS { + case "android", "linux": + if laddr == "" { + laddr = "@" // autobind feature + } + } + + var connAddrs = [4]struct{ got, want Addr }{ + {c1.LocalAddr(), ta}, + {c1.RemoteAddr(), nil}, + {c2.LocalAddr(), &UnixAddr{Name: laddr, Net: "unixgram"}}, + {c2.RemoteAddr(), ta}, + } + for _, ca := range connAddrs { + if !reflect.DeepEqual(ca.got, ca.want) { + t.Fatalf("got %#v; want %#v", ca.got, ca.want) + } + } + } +} + +func TestUnixUnlink(t *testing.T) { + if !testableNetwork("unix") { + t.Skip("unix test") + } + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf("skipping: %s does not support Unlink", runtime.GOOS) + } + + name := testUnixAddr(t) + + listen := func(t *testing.T) *UnixListener { + l, err := Listen("unix", name) + if err != nil { + t.Fatal(err) + } + return l.(*UnixListener) + } + checkExists := func(t *testing.T, desc string) { + if _, err := os.Stat(name); err != nil { + t.Fatalf("unix socket does not exist %s: %v", desc, err) + } + } + checkNotExists := func(t *testing.T, desc string) { + if _, err := os.Stat(name); err == nil { + t.Fatalf("unix socket does exist %s: %v", desc, err) + } + } + + // Listener should remove on close. + t.Run("Listen", func(t *testing.T) { + l := listen(t) + checkExists(t, "after Listen") + l.Close() + checkNotExists(t, "after Listener close") + }) + + // FileListener should not. + t.Run("FileListener", func(t *testing.T) { + l := listen(t) + f, _ := l.File() + l1, _ := FileListener(f) + checkExists(t, "after FileListener") + f.Close() + checkExists(t, "after File close") + l1.Close() + checkExists(t, "after FileListener close") + l.Close() + checkNotExists(t, "after Listener close") + }) + + // Only first call to l.Close should remove. + t.Run("SecondClose", func(t *testing.T) { + l := listen(t) + checkExists(t, "after Listen") + l.Close() + checkNotExists(t, "after Listener close") + if err := os.WriteFile(name, []byte("hello world"), 0666); err != nil { + t.Fatalf("cannot recreate socket file: %v", err) + } + checkExists(t, "after writing temp file") + l.Close() + checkExists(t, "after second Listener close") + os.Remove(name) + }) + + // SetUnlinkOnClose should do what it says. + + t.Run("Listen/SetUnlinkOnClose(true)", func(t *testing.T) { + l := listen(t) + checkExists(t, "after Listen") + l.SetUnlinkOnClose(true) + l.Close() + checkNotExists(t, "after Listener close") + }) + + t.Run("Listen/SetUnlinkOnClose(false)", func(t *testing.T) { + l := listen(t) + checkExists(t, "after Listen") + l.SetUnlinkOnClose(false) + l.Close() + checkExists(t, "after Listener close") + os.Remove(name) + }) + + t.Run("FileListener/SetUnlinkOnClose(true)", func(t *testing.T) { + l := listen(t) + f, _ := l.File() + l1, _ := FileListener(f) + checkExists(t, "after FileListener") + l1.(*UnixListener).SetUnlinkOnClose(true) + f.Close() + checkExists(t, "after File close") + l1.Close() + checkNotExists(t, "after FileListener close") + l.Close() + }) + + t.Run("FileListener/SetUnlinkOnClose(false)", func(t *testing.T) { + l := listen(t) + f, _ := l.File() + l1, _ := FileListener(f) + checkExists(t, "after FileListener") + l1.(*UnixListener).SetUnlinkOnClose(false) + f.Close() + checkExists(t, "after File close") + l1.Close() + checkExists(t, "after FileListener close") + l.Close() + }) +} diff --git a/platform/dbops/binaries/go/go/src/net/unixsock_windows_test.go b/platform/dbops/binaries/go/go/src/net/unixsock_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d541d89f78c1e875dca9c1b90578aeca985968e6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/unixsock_windows_test.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package net + +import ( + "internal/syscall/windows/registry" + "os" + "reflect" + "runtime" + "strconv" + "testing" +) + +func isBuild17063() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.READ) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("CurrentBuild") + if err != nil { + return false + } + ver, err := strconv.Atoi(s) + if err != nil { + return false + } + return ver >= 17063 +} + +func TestUnixConnLocalWindows(t *testing.T) { + switch runtime.GOARCH { + case "386": + t.Skip("not supported on windows/386, see golang.org/issue/27943") + case "arm": + t.Skip("not supported on windows/arm, see golang.org/issue/28061") + } + if !isBuild17063() { + t.Skip("unix test") + } + + handler := func(ls *localServer, ln Listener) {} + for _, laddr := range []string{"", testUnixAddr(t)} { + laddr := laddr + taddr := testUnixAddr(t) + ta, err := ResolveUnixAddr("unix", taddr) + if err != nil { + t.Fatal(err) + } + ln, err := ListenUnix("unix", ta) + if err != nil { + t.Fatal(err) + } + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + la, err := ResolveUnixAddr("unix", laddr) + if err != nil { + t.Fatal(err) + } + c, err := DialUnix("unix", la, ta) + if err != nil { + t.Fatal(err) + } + defer func() { + c.Close() + if la != nil { + defer os.Remove(laddr) + } + }() + if _, err := c.Write([]byte("UNIXCONN LOCAL AND REMOTE NAME TEST")); err != nil { + t.Fatal(err) + } + + if laddr == "" { + laddr = "@" + } + var connAddrs = [3]struct{ got, want Addr }{ + {ln.Addr(), ta}, + {c.LocalAddr(), &UnixAddr{Name: laddr, Net: "unix"}}, + {c.RemoteAddr(), ta}, + } + for _, ca := range connAddrs { + if !reflect.DeepEqual(ca.got, ca.want) { + t.Fatalf("got %#v, expected %#v", ca.got, ca.want) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/net/write_unix_test.go b/platform/dbops/binaries/go/go/src/net/write_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..23e8befa92dc9ece8265eab762a9033f815c7771 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/write_unix_test.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package net + +import ( + "bytes" + "syscall" + "testing" + "time" +) + +// Test that a client can't trigger an endless loop of write system +// calls on the server by shutting down the write side on the client. +// Possibility raised in the discussion of https://golang.org/cl/71973. +func TestEndlessWrite(t *testing.T) { + t.Parallel() + c := make(chan bool) + server := func(cs *TCPConn) error { + cs.CloseWrite() + <-c + return nil + } + client := func(ss *TCPConn) error { + // Tell the server to return when we return. + defer close(c) + + // Loop writing to the server. The server is not reading + // anything, so this will eventually block, and then time out. + b := bytes.Repeat([]byte{'a'}, 8192) + cagain := 0 + for { + n, err := ss.conn.fd.pfd.WriteOnce(b) + if n > 0 { + cagain = 0 + } + switch err { + case nil: + case syscall.EAGAIN: + if cagain == 0 { + // We've written enough data to + // start blocking. Set a deadline + // so that we will stop. + ss.SetWriteDeadline(time.Now().Add(5 * time.Millisecond)) + } + cagain++ + if cagain > 20 { + t.Error("looping on EAGAIN") + return nil + } + if err = ss.conn.fd.pfd.WaitWrite(); err != nil { + t.Logf("client WaitWrite: %v", err) + return nil + } + default: + // We expect to eventually get an error. + t.Logf("client WriteOnce: %v", err) + return nil + } + } + } + withTCPConnPair(t, client, server) +} diff --git a/platform/dbops/binaries/go/go/src/net/writev_test.go b/platform/dbops/binaries/go/go/src/net/writev_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e4e88c4fac0354296d1607b6b45915478df5d4ea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/writev_test.go @@ -0,0 +1,228 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "bytes" + "fmt" + "internal/poll" + "io" + "reflect" + "runtime" + "sync" + "testing" +) + +func TestBuffers_read(t *testing.T) { + const story = "once upon a time in Gopherland ... " + buffers := Buffers{ + []byte("once "), + []byte("upon "), + []byte("a "), + []byte("time "), + []byte("in "), + []byte("Gopherland ... "), + } + got, err := io.ReadAll(&buffers) + if err != nil { + t.Fatal(err) + } + if string(got) != story { + t.Errorf("read %q; want %q", got, story) + } + if len(buffers) != 0 { + t.Errorf("len(buffers) = %d; want 0", len(buffers)) + } +} + +func TestBuffers_consume(t *testing.T) { + tests := []struct { + in Buffers + consume int64 + want Buffers + }{ + { + in: Buffers{[]byte("foo"), []byte("bar")}, + consume: 0, + want: Buffers{[]byte("foo"), []byte("bar")}, + }, + { + in: Buffers{[]byte("foo"), []byte("bar")}, + consume: 2, + want: Buffers{[]byte("o"), []byte("bar")}, + }, + { + in: Buffers{[]byte("foo"), []byte("bar")}, + consume: 3, + want: Buffers{[]byte("bar")}, + }, + { + in: Buffers{[]byte("foo"), []byte("bar")}, + consume: 4, + want: Buffers{[]byte("ar")}, + }, + { + in: Buffers{nil, nil, nil, []byte("bar")}, + consume: 1, + want: Buffers{[]byte("ar")}, + }, + { + in: Buffers{nil, nil, nil, []byte("foo")}, + consume: 0, + want: Buffers{[]byte("foo")}, + }, + { + in: Buffers{nil, nil, nil}, + consume: 0, + want: Buffers{}, + }, + } + for i, tt := range tests { + in := tt.in + in.consume(tt.consume) + if !reflect.DeepEqual(in, tt.want) { + t.Errorf("%d. after consume(%d) = %+v, want %+v", i, tt.consume, in, tt.want) + } + } +} + +func TestBuffers_WriteTo(t *testing.T) { + for _, name := range []string{"WriteTo", "Copy"} { + for _, size := range []int{0, 10, 1023, 1024, 1025} { + t.Run(fmt.Sprintf("%s/%d", name, size), func(t *testing.T) { + testBuffer_writeTo(t, size, name == "Copy") + }) + } + } +} + +func testBuffer_writeTo(t *testing.T, chunks int, useCopy bool) { + oldHook := poll.TestHookDidWritev + defer func() { poll.TestHookDidWritev = oldHook }() + var writeLog struct { + sync.Mutex + log []int + } + poll.TestHookDidWritev = func(size int) { + writeLog.Lock() + writeLog.log = append(writeLog.log, size) + writeLog.Unlock() + } + var want bytes.Buffer + for i := 0; i < chunks; i++ { + want.WriteByte(byte(i)) + } + + withTCPConnPair(t, func(c *TCPConn) error { + buffers := make(Buffers, chunks) + for i := range buffers { + buffers[i] = want.Bytes()[i : i+1] + } + var n int64 + var err error + if useCopy { + n, err = io.Copy(c, &buffers) + } else { + n, err = buffers.WriteTo(c) + } + if err != nil { + return err + } + if len(buffers) != 0 { + return fmt.Errorf("len(buffers) = %d; want 0", len(buffers)) + } + if n != int64(want.Len()) { + return fmt.Errorf("Buffers.WriteTo returned %d; want %d", n, want.Len()) + } + return nil + }, func(c *TCPConn) error { + all, err := io.ReadAll(c) + if !bytes.Equal(all, want.Bytes()) || err != nil { + return fmt.Errorf("client read %q, %v; want %q, nil", all, err, want.Bytes()) + } + + writeLog.Lock() // no need to unlock + var gotSum int + for _, v := range writeLog.log { + gotSum += v + } + + var wantSum int + switch runtime.GOOS { + case "aix", "android", "darwin", "ios", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "solaris": + var wantMinCalls int + wantSum = want.Len() + v := chunks + for v > 0 { + wantMinCalls++ + v -= 1024 + } + if len(writeLog.log) < wantMinCalls { + t.Errorf("write calls = %v < wanted min %v", len(writeLog.log), wantMinCalls) + } + case "windows": + var wantCalls int + wantSum = want.Len() + if wantSum > 0 { + wantCalls = 1 // windows will always do 1 syscall, unless sending empty buffer + } + if len(writeLog.log) != wantCalls { + t.Errorf("write calls = %v; want %v", len(writeLog.log), wantCalls) + } + } + if gotSum != wantSum { + t.Errorf("writev call sum = %v; want %v", gotSum, wantSum) + } + return nil + }) +} + +func TestWritevError(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skipf("skipping the test: windows does not have problem sending large chunks of data") + } + + ln := newLocalListener(t, "tcp") + + ch := make(chan Conn, 1) + defer func() { + ln.Close() + for c := range ch { + c.Close() + } + }() + + go func() { + defer close(ch) + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + ch <- c + }() + c1, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c1.Close() + c2 := <-ch + if c2 == nil { + t.Fatal("no server side connection") + } + c2.Close() + + // 1 GB of data should be enough to notice the connection is gone. + // Just a few bytes is not enough. + // Arrange to reuse the same 1 MB buffer so that we don't allocate much. + buf := make([]byte, 1<<20) + buffers := make(Buffers, 1<<10) + for i := range buffers { + buffers[i] = buf + } + if _, err := buffers.WriteTo(c1); err == nil { + t.Fatal("Buffers.WriteTo(closed conn) succeeded, want error") + } +} diff --git a/platform/dbops/binaries/go/go/src/net/writev_unix.go b/platform/dbops/binaries/go/go/src/net/writev_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..3b0325bf640359696516e80ae84b2aae1ec4803f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/net/writev_unix.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "runtime" + "syscall" +) + +func (c *conn) writeBuffers(v *Buffers) (int64, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.fd.writeBuffers(v) + if err != nil { + return n, &OpError{Op: "writev", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, nil +} + +func (fd *netFD) writeBuffers(v *Buffers) (n int64, err error) { + n, err = fd.pfd.Writev((*[][]byte)(v)) + runtime.KeepAlive(fd) + return n, wrapSyscallError("writev", err) +} diff --git a/platform/dbops/binaries/go/go/src/os/dir.go b/platform/dbops/binaries/go/go/src/os/dir.go new file mode 100644 index 0000000000000000000000000000000000000000..5306bcb3ba7c2b1e8f51313805263965f06bdb07 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dir.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "io/fs" + "sort" +) + +type readdirMode int + +const ( + readdirName readdirMode = iota + readdirDirEntry + readdirFileInfo +) + +// Readdir reads the contents of the directory associated with file and +// returns a slice of up to n FileInfo values, as would be returned +// by Lstat, in directory order. Subsequent calls on the same file will yield +// further FileInfos. +// +// If n > 0, Readdir returns at most n FileInfo structures. In this case, if +// Readdir returns an empty slice, it will return a non-nil error +// explaining why. At the end of a directory, the error is io.EOF. +// +// If n <= 0, Readdir returns all the FileInfo from the directory in +// a single slice. In this case, if Readdir succeeds (reads all +// the way to the end of the directory), it returns the slice and a +// nil error. If it encounters an error before the end of the +// directory, Readdir returns the FileInfo read until that point +// and a non-nil error. +// +// Most clients are better served by the more efficient ReadDir method. +func (f *File) Readdir(n int) ([]FileInfo, error) { + if f == nil { + return nil, ErrInvalid + } + _, _, infos, err := f.readdir(n, readdirFileInfo) + if infos == nil { + // Readdir has historically always returned a non-nil empty slice, never nil, + // even on error (except misuse with nil receiver above). + // Keep it that way to avoid breaking overly sensitive callers. + infos = []FileInfo{} + } + return infos, err +} + +// Readdirnames reads the contents of the directory associated with file +// and returns a slice of up to n names of files in the directory, +// in directory order. Subsequent calls on the same file will yield +// further names. +// +// If n > 0, Readdirnames returns at most n names. In this case, if +// Readdirnames returns an empty slice, it will return a non-nil error +// explaining why. At the end of a directory, the error is io.EOF. +// +// If n <= 0, Readdirnames returns all the names from the directory in +// a single slice. In this case, if Readdirnames succeeds (reads all +// the way to the end of the directory), it returns the slice and a +// nil error. If it encounters an error before the end of the +// directory, Readdirnames returns the names read until that point and +// a non-nil error. +func (f *File) Readdirnames(n int) (names []string, err error) { + if f == nil { + return nil, ErrInvalid + } + names, _, _, err = f.readdir(n, readdirName) + if names == nil { + // Readdirnames has historically always returned a non-nil empty slice, never nil, + // even on error (except misuse with nil receiver above). + // Keep it that way to avoid breaking overly sensitive callers. + names = []string{} + } + return names, err +} + +// A DirEntry is an entry read from a directory +// (using the ReadDir function or a File's ReadDir method). +type DirEntry = fs.DirEntry + +// ReadDir reads the contents of the directory associated with the file f +// and returns a slice of DirEntry values in directory order. +// Subsequent calls on the same file will yield later DirEntry records in the directory. +// +// If n > 0, ReadDir returns at most n DirEntry records. +// In this case, if ReadDir returns an empty slice, it will return an error explaining why. +// At the end of a directory, the error is io.EOF. +// +// If n <= 0, ReadDir returns all the DirEntry records remaining in the directory. +// When it succeeds, it returns a nil error (not io.EOF). +func (f *File) ReadDir(n int) ([]DirEntry, error) { + if f == nil { + return nil, ErrInvalid + } + _, dirents, _, err := f.readdir(n, readdirDirEntry) + if dirents == nil { + // Match Readdir and Readdirnames: don't return nil slices. + dirents = []DirEntry{} + } + return dirents, err +} + +// testingForceReadDirLstat forces ReadDir to call Lstat, for testing that code path. +// This can be difficult to provoke on some Unix systems otherwise. +var testingForceReadDirLstat bool + +// ReadDir reads the named directory, +// returning all its directory entries sorted by filename. +// If an error occurs reading the directory, +// ReadDir returns the entries it was able to read before the error, +// along with the error. +func ReadDir(name string) ([]DirEntry, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + dirs, err := f.ReadDir(-1) + sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() }) + return dirs, err +} diff --git a/platform/dbops/binaries/go/go/src/os/dir_darwin.go b/platform/dbops/binaries/go/go/src/os/dir_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..e6d5bda24bdd490c2744c5cf5e4782fd72f1a709 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dir_darwin.go @@ -0,0 +1,140 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "io" + "runtime" + "syscall" + "unsafe" +) + +// Auxiliary information if the File describes a directory +type dirInfo struct { + dir uintptr // Pointer to DIR structure from dirent.h +} + +func (d *dirInfo) close() { + if d.dir == 0 { + return + } + closedir(d.dir) + d.dir = 0 +} + +func (f *File) readdir(n int, mode readdirMode) (names []string, dirents []DirEntry, infos []FileInfo, err error) { + if f.dirinfo == nil { + dir, call, errno := f.pfd.OpenDir() + if errno != nil { + return nil, nil, nil, &PathError{Op: call, Path: f.name, Err: errno} + } + f.dirinfo = &dirInfo{ + dir: dir, + } + } + d := f.dirinfo + + size := n + if size <= 0 { + size = 100 + n = -1 + } + + var dirent syscall.Dirent + var entptr *syscall.Dirent + for len(names)+len(dirents)+len(infos) < size || n == -1 { + if errno := readdir_r(d.dir, &dirent, &entptr); errno != 0 { + if errno == syscall.EINTR { + continue + } + return names, dirents, infos, &PathError{Op: "readdir", Path: f.name, Err: errno} + } + if entptr == nil { // EOF + break + } + // Darwin may return a zero inode when a directory entry has been + // deleted but not yet removed from the directory. The man page for + // getdirentries(2) states that programs are responsible for skipping + // those entries: + // + // Users of getdirentries() should skip entries with d_fileno = 0, + // as such entries represent files which have been deleted but not + // yet removed from the directory entry. + // + if dirent.Ino == 0 { + continue + } + name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + if mode == readdirName { + names = append(names, string(name)) + } else if mode == readdirDirEntry { + de, err := newUnixDirent(f.name, string(name), dtToType(dirent.Type)) + if IsNotExist(err) { + // File disappeared between readdir and stat. + // Treat as if it didn't exist. + continue + } + if err != nil { + return nil, dirents, nil, err + } + dirents = append(dirents, de) + } else { + info, err := lstat(f.name + "/" + string(name)) + if IsNotExist(err) { + // File disappeared between readdir + stat. + // Treat as if it didn't exist. + continue + } + if err != nil { + return nil, nil, infos, err + } + infos = append(infos, info) + } + runtime.KeepAlive(f) + } + + if n > 0 && len(names)+len(dirents)+len(infos) == 0 { + return nil, nil, nil, io.EOF + } + return names, dirents, infos, nil +} + +func dtToType(typ uint8) FileMode { + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) +} + +// Implemented in syscall/syscall_darwin.go. + +//go:linkname closedir syscall.closedir +func closedir(dir uintptr) (err error) + +//go:linkname readdir_r syscall.readdir_r +func readdir_r(dir uintptr, entry *syscall.Dirent, result **syscall.Dirent) (res syscall.Errno) diff --git a/platform/dbops/binaries/go/go/src/os/dir_plan9.go b/platform/dbops/binaries/go/go/src/os/dir_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..6ea5940e71df9fb58b6ce61fba086663cd33e60d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dir_plan9.go @@ -0,0 +1,86 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "io" + "io/fs" + "syscall" +) + +func (file *File) readdir(n int, mode readdirMode) (names []string, dirents []DirEntry, infos []FileInfo, err error) { + // If this file has no dirinfo, create one. + if file.dirinfo == nil { + file.dirinfo = new(dirInfo) + } + d := file.dirinfo + size := n + if size <= 0 { + size = 100 + n = -1 + } + for n != 0 { + // Refill the buffer if necessary. + if d.bufp >= d.nbuf { + nb, err := file.Read(d.buf[:]) + + // Update the buffer state before checking for errors. + d.bufp, d.nbuf = 0, nb + + if err != nil { + if err == io.EOF { + break + } + return names, dirents, infos, &PathError{Op: "readdir", Path: file.name, Err: err} + } + if nb < syscall.STATFIXLEN { + return names, dirents, infos, &PathError{Op: "readdir", Path: file.name, Err: syscall.ErrShortStat} + } + } + + // Get a record from the buffer. + b := d.buf[d.bufp:] + m := int(uint16(b[0])|uint16(b[1])<<8) + 2 + if m < syscall.STATFIXLEN { + return names, dirents, infos, &PathError{Op: "readdir", Path: file.name, Err: syscall.ErrShortStat} + } + + dir, err := syscall.UnmarshalDir(b[:m]) + if err != nil { + return names, dirents, infos, &PathError{Op: "readdir", Path: file.name, Err: err} + } + + if mode == readdirName { + names = append(names, dir.Name) + } else { + f := fileInfoFromStat(dir) + if mode == readdirDirEntry { + dirents = append(dirents, dirEntry{f}) + } else { + infos = append(infos, f) + } + } + d.bufp += m + n-- + } + + if n > 0 && len(names)+len(dirents)+len(infos) == 0 { + return nil, nil, nil, io.EOF + } + return names, dirents, infos, nil +} + +type dirEntry struct { + fs *fileStat +} + +func (de dirEntry) Name() string { return de.fs.Name() } +func (de dirEntry) IsDir() bool { return de.fs.IsDir() } +func (de dirEntry) Type() FileMode { return de.fs.Mode().Type() } +func (de dirEntry) Info() (FileInfo, error) { return de.fs, nil } + +func (de dirEntry) String() string { + return fs.FormatDirEntry(de) +} diff --git a/platform/dbops/binaries/go/go/src/os/dir_unix.go b/platform/dbops/binaries/go/go/src/os/dir_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..266a78acafce5ec9d7ed06aa3f4e99f9f43ce62a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dir_unix.go @@ -0,0 +1,198 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || dragonfly || freebsd || (js && wasm) || wasip1 || linux || netbsd || openbsd || solaris + +package os + +import ( + "io" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Auxiliary information if the File describes a directory +type dirInfo struct { + buf *[]byte // buffer for directory I/O + nbuf int // length of buf; return value from Getdirentries + bufp int // location of next record in buf. +} + +const ( + // More than 5760 to work around https://golang.org/issue/24015. + blockSize = 8192 +) + +var dirBufPool = sync.Pool{ + New: func() any { + // The buffer must be at least a block long. + buf := make([]byte, blockSize) + return &buf + }, +} + +func (d *dirInfo) close() { + if d.buf != nil { + dirBufPool.Put(d.buf) + d.buf = nil + } +} + +func (f *File) readdir(n int, mode readdirMode) (names []string, dirents []DirEntry, infos []FileInfo, err error) { + // If this file has no dirinfo, create one. + if f.dirinfo == nil { + f.dirinfo = new(dirInfo) + f.dirinfo.buf = dirBufPool.Get().(*[]byte) + } + d := f.dirinfo + + // Change the meaning of n for the implementation below. + // + // The n above was for the public interface of "if n <= 0, + // Readdir returns all the FileInfo from the directory in a + // single slice". + // + // But below, we use only negative to mean looping until the + // end and positive to mean bounded, with positive + // terminating at 0. + if n == 0 { + n = -1 + } + + for n != 0 { + // Refill the buffer if necessary + if d.bufp >= d.nbuf { + d.bufp = 0 + var errno error + d.nbuf, errno = f.pfd.ReadDirent(*d.buf) + runtime.KeepAlive(f) + if errno != nil { + return names, dirents, infos, &PathError{Op: "readdirent", Path: f.name, Err: errno} + } + if d.nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + buf := (*d.buf)[d.bufp:d.nbuf] + reclen, ok := direntReclen(buf) + if !ok || reclen > uint64(len(buf)) { + break + } + rec := buf[:reclen] + d.bufp += int(reclen) + ino, ok := direntIno(rec) + if !ok { + break + } + // When building to wasip1, the host runtime might be running on Windows + // or might expose a remote file system which does not have the concept + // of inodes. Therefore, we cannot make the assumption that it is safe + // to skip entries with zero inodes. + if ino == 0 && runtime.GOOS != "wasip1" { + continue + } + const namoff = uint64(unsafe.Offsetof(syscall.Dirent{}.Name)) + namlen, ok := direntNamlen(rec) + if !ok || namoff+namlen > uint64(len(rec)) { + break + } + name := rec[namoff : namoff+namlen] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + if n > 0 { // see 'n == 0' comment above + n-- + } + if mode == readdirName { + names = append(names, string(name)) + } else if mode == readdirDirEntry { + de, err := newUnixDirent(f.name, string(name), direntType(rec)) + if IsNotExist(err) { + // File disappeared between readdir and stat. + // Treat as if it didn't exist. + continue + } + if err != nil { + return nil, dirents, nil, err + } + dirents = append(dirents, de) + } else { + info, err := lstat(f.name + "/" + string(name)) + if IsNotExist(err) { + // File disappeared between readdir + stat. + // Treat as if it didn't exist. + continue + } + if err != nil { + return nil, nil, infos, err + } + infos = append(infos, info) + } + } + + if n > 0 && len(names)+len(dirents)+len(infos) == 0 { + return nil, nil, nil, io.EOF + } + return names, dirents, infos, nil +} + +// readInt returns the size-bytes unsigned integer in native byte order at offset off. +func readInt(b []byte, off, size uintptr) (u uint64, ok bool) { + if len(b) < int(off+size) { + return 0, false + } + if isBigEndian { + return readIntBE(b[off:], size), true + } + return readIntLE(b[off:], size), true +} + +func readIntBE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[1]) | uint64(b[0])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} + +func readIntLE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} diff --git a/platform/dbops/binaries/go/go/src/os/dir_windows.go b/platform/dbops/binaries/go/go/src/os/dir_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..4485dffdb184e164db696a34f57b93918c92ac67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dir_windows.go @@ -0,0 +1,204 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/syscall/windows" + "io" + "io/fs" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Auxiliary information if the File describes a directory +type dirInfo struct { + // buf is a slice pointer so the slice header + // does not escape to the heap when returning + // buf to dirBufPool. + buf *[]byte // buffer for directory I/O + bufp int // location of next record in buf + vol uint32 + class uint32 // type of entries in buf + path string // absolute directory path, empty if the file system supports FILE_ID_BOTH_DIR_INFO +} + +const ( + // dirBufSize is the size of the dirInfo buffer. + // The buffer must be big enough to hold at least a single entry. + // The filename alone can be 512 bytes (MAX_PATH*2), and the fixed part of + // the FILE_ID_BOTH_DIR_INFO structure is 105 bytes, so dirBufSize + // should not be set below 1024 bytes (512+105+safety buffer). + // Windows 8.1 and earlier only works with buffer sizes up to 64 kB. + dirBufSize = 64 * 1024 // 64kB +) + +var dirBufPool = sync.Pool{ + New: func() any { + // The buffer must be at least a block long. + buf := make([]byte, dirBufSize) + return &buf + }, +} + +func (d *dirInfo) close() { + if d.buf != nil { + dirBufPool.Put(d.buf) + d.buf = nil + } +} + +// allowReadDirFileID indicates whether File.readdir should try to use FILE_ID_BOTH_DIR_INFO +// if the underlying file system supports it. +// Useful for testing purposes. +var allowReadDirFileID = true + +func (file *File) readdir(n int, mode readdirMode) (names []string, dirents []DirEntry, infos []FileInfo, err error) { + // If this file has no dirinfo, create one. + if file.dirinfo == nil { + // vol is used by os.SameFile. + // It is safe to query it once and reuse the value. + // Hard links are not allowed to reference files in other volumes. + // Junctions and symbolic links can reference files and directories in other volumes, + // but the reparse point should still live in the parent volume. + var vol, flags uint32 + err = windows.GetVolumeInformationByHandle(file.pfd.Sysfd, nil, 0, &vol, nil, &flags, nil, 0) + runtime.KeepAlive(file) + if err != nil { + err = &PathError{Op: "readdir", Path: file.name, Err: err} + return + } + file.dirinfo = new(dirInfo) + file.dirinfo.buf = dirBufPool.Get().(*[]byte) + file.dirinfo.vol = vol + if allowReadDirFileID && flags&windows.FILE_SUPPORTS_OPEN_BY_FILE_ID != 0 { + file.dirinfo.class = windows.FileIdBothDirectoryRestartInfo + } else { + file.dirinfo.class = windows.FileFullDirectoryRestartInfo + // Set the directory path for use by os.SameFile, as it is possible that + // the file system supports retrieving the file ID using GetFileInformationByHandle. + file.dirinfo.path = file.name + if !isAbs(file.dirinfo.path) { + // If the path is relative, we need to convert it to an absolute path + // in case the current directory changes between this call and a + // call to os.SameFile. + file.dirinfo.path, err = syscall.FullPath(file.dirinfo.path) + if err != nil { + err = &PathError{Op: "readdir", Path: file.name, Err: err} + return + } + } + } + } + d := file.dirinfo + wantAll := n <= 0 + if wantAll { + n = -1 + } + for n != 0 { + // Refill the buffer if necessary + if d.bufp == 0 { + err = windows.GetFileInformationByHandleEx(file.pfd.Sysfd, d.class, (*byte)(unsafe.Pointer(&(*d.buf)[0])), uint32(len(*d.buf))) + runtime.KeepAlive(file) + if err != nil { + if err == syscall.ERROR_NO_MORE_FILES { + break + } + if err == syscall.ERROR_FILE_NOT_FOUND && + (d.class == windows.FileIdBothDirectoryRestartInfo || d.class == windows.FileFullDirectoryRestartInfo) { + // GetFileInformationByHandleEx doesn't document the return error codes when the info class is FileIdBothDirectoryRestartInfo, + // but MS-FSA 2.1.5.6.3 [1] specifies that the underlying file system driver should return STATUS_NO_SUCH_FILE when + // reading an empty root directory, which is mapped to ERROR_FILE_NOT_FOUND by Windows. + // Note that some file system drivers may never return this error code, as the spec allows to return the "." and ".." + // entries in such cases, making the directory appear non-empty. + // The chances of false positive are very low, as we know that the directory exists, else GetVolumeInformationByHandle + // would have failed, and that the handle is still valid, as we haven't closed it. + // See go.dev/issue/61159. + // [1] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fsa/fa8194e0-53ec-413b-8315-e8fa85396fd8 + break + } + if s, _ := file.Stat(); s != nil && !s.IsDir() { + err = &PathError{Op: "readdir", Path: file.name, Err: syscall.ENOTDIR} + } else { + err = &PathError{Op: "GetFileInformationByHandleEx", Path: file.name, Err: err} + } + return + } + if d.class == windows.FileIdBothDirectoryRestartInfo { + d.class = windows.FileIdBothDirectoryInfo + } else if d.class == windows.FileFullDirectoryRestartInfo { + d.class = windows.FileFullDirectoryInfo + } + } + // Drain the buffer + var islast bool + for n != 0 && !islast { + var nextEntryOffset uint32 + var nameslice []uint16 + entry := unsafe.Pointer(&(*d.buf)[d.bufp]) + if d.class == windows.FileIdBothDirectoryInfo { + info := (*windows.FILE_ID_BOTH_DIR_INFO)(entry) + nextEntryOffset = info.NextEntryOffset + nameslice = unsafe.Slice(&info.FileName[0], info.FileNameLength/2) + } else { + info := (*windows.FILE_FULL_DIR_INFO)(entry) + nextEntryOffset = info.NextEntryOffset + nameslice = unsafe.Slice(&info.FileName[0], info.FileNameLength/2) + } + d.bufp += int(nextEntryOffset) + islast = nextEntryOffset == 0 + if islast { + d.bufp = 0 + } + if (len(nameslice) == 1 && nameslice[0] == '.') || + (len(nameslice) == 2 && nameslice[0] == '.' && nameslice[1] == '.') { + // Ignore "." and ".." and avoid allocating a string for them. + continue + } + name := syscall.UTF16ToString(nameslice) + if mode == readdirName { + names = append(names, name) + } else { + var f *fileStat + if d.class == windows.FileIdBothDirectoryInfo { + f = newFileStatFromFileIDBothDirInfo((*windows.FILE_ID_BOTH_DIR_INFO)(entry)) + } else { + f = newFileStatFromFileFullDirInfo((*windows.FILE_FULL_DIR_INFO)(entry)) + // Defer appending the entry name to the parent directory path until + // it is really needed, to avoid allocating a string that may not be used. + // It is currently only used in os.SameFile. + f.appendNameToPath = true + f.path = d.path + } + f.name = name + f.vol = d.vol + if mode == readdirDirEntry { + dirents = append(dirents, dirEntry{f}) + } else { + infos = append(infos, f) + } + } + n-- + } + } + if !wantAll && len(names)+len(dirents)+len(infos) == 0 { + return nil, nil, nil, io.EOF + } + return names, dirents, infos, nil +} + +type dirEntry struct { + fs *fileStat +} + +func (de dirEntry) Name() string { return de.fs.Name() } +func (de dirEntry) IsDir() bool { return de.fs.IsDir() } +func (de dirEntry) Type() FileMode { return de.fs.Mode().Type() } +func (de dirEntry) Info() (FileInfo, error) { return de.fs, nil } + +func (de dirEntry) String() string { + return fs.FormatDirEntry(de) +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_aix.go b/platform/dbops/binaries/go/go/src/os/dirent_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..5597b8af20e3d82b5894e61f91786179f58d5391 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_aix.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(syscall.Dirent{}.Name)), true +} + +func direntType(buf []byte) FileMode { + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_dragonfly.go b/platform/dbops/binaries/go/go/src/os/dirent_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..38cbd61ed33ae57c0c740a4a7f33b5dc40176d1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_dragonfly.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Fileno), unsafe.Sizeof(syscall.Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + namlen, ok := direntNamlen(buf) + if !ok { + return 0, false + } + return (16 + namlen + 1 + 7) &^ 7, true +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen)) +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + typ := buf[off] + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DBF: + // DT_DBF is "database record file". + // fillFileStatFromSys treats as regular file. + return 0 + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_freebsd.go b/platform/dbops/binaries/go/go/src/os/dirent_freebsd.go new file mode 100644 index 0000000000000000000000000000000000000000..d600837ebb18a962409e05535ce54d38ad2a12eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_freebsd.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Fileno), unsafe.Sizeof(syscall.Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen)) +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + typ := buf[off] + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_js.go b/platform/dbops/binaries/go/go/src/os/dirent_js.go new file mode 100644 index 0000000000000000000000000000000000000000..31778c2ad8547e0098950730a1c978f1ca754dd6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_js.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return 1, true +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(syscall.Dirent{}.Name)), true +} + +func direntType(buf []byte) FileMode { + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_linux.go b/platform/dbops/binaries/go/go/src/os/dirent_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..74a34311212351e5e559d3da62671e6171053c98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_linux.go @@ -0,0 +1,51 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(syscall.Dirent{}.Name)), true +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + typ := buf[off] + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_netbsd.go b/platform/dbops/binaries/go/go/src/os/dirent_netbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..d600837ebb18a962409e05535ce54d38ad2a12eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Fileno), unsafe.Sizeof(syscall.Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen)) +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + typ := buf[off] + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_openbsd.go b/platform/dbops/binaries/go/go/src/os/dirent_openbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..d600837ebb18a962409e05535ce54d38ad2a12eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_openbsd.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Fileno), unsafe.Sizeof(syscall.Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen)) +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + typ := buf[off] + switch typ { + case syscall.DT_BLK: + return ModeDevice + case syscall.DT_CHR: + return ModeDevice | ModeCharDevice + case syscall.DT_DIR: + return ModeDir + case syscall.DT_FIFO: + return ModeNamedPipe + case syscall.DT_LNK: + return ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return ModeSocket + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_solaris.go b/platform/dbops/binaries/go/go/src/os/dirent_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..5597b8af20e3d82b5894e61f91786179f58d5391 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_solaris.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(syscall.Dirent{}.Name)), true +} + +func direntType(buf []byte) FileMode { + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/dirent_wasip1.go b/platform/dbops/binaries/go/go/src/os/dirent_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..d3f10b2aeb9b756195009e9f2fbd2deb5dce78a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/dirent_wasip1.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package os + +import ( + "syscall" + "unsafe" +) + +// https://github.com/WebAssembly/WASI/blob/main/legacy/preview1/docs.md#-dirent-record +const sizeOfDirent = 24 + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + namelen, ok := direntNamlen(buf) + return sizeOfDirent + namelen, ok +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen)) +} + +func direntType(buf []byte) FileMode { + off := unsafe.Offsetof(syscall.Dirent{}.Type) + if off >= uintptr(len(buf)) { + return ^FileMode(0) // unknown + } + switch syscall.Filetype(buf[off]) { + case syscall.FILETYPE_BLOCK_DEVICE: + return ModeDevice + case syscall.FILETYPE_CHARACTER_DEVICE: + return ModeDevice | ModeCharDevice + case syscall.FILETYPE_DIRECTORY: + return ModeDir + case syscall.FILETYPE_REGULAR_FILE: + return 0 + case syscall.FILETYPE_SOCKET_DGRAM: + return ModeSocket + case syscall.FILETYPE_SOCKET_STREAM: + return ModeSocket + case syscall.FILETYPE_SYMBOLIC_LINK: + return ModeSymlink + } + return ^FileMode(0) // unknown +} diff --git a/platform/dbops/binaries/go/go/src/os/endian_big.go b/platform/dbops/binaries/go/go/src/os/endian_big.go new file mode 100644 index 0000000000000000000000000000000000000000..0375e533726ef4f642465e6d686c64543382980e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/endian_big.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +//go:build ppc64 || s390x || mips || mips64 + +package os + +const isBigEndian = true diff --git a/platform/dbops/binaries/go/go/src/os/endian_little.go b/platform/dbops/binaries/go/go/src/os/endian_little.go new file mode 100644 index 0000000000000000000000000000000000000000..a7cf1cdda8e47e57e3d0ca8f10837dbf7467d354 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/endian_little.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +//go:build 386 || amd64 || arm || arm64 || loong64 || ppc64le || mips64le || mipsle || riscv64 || wasm + +package os + +const isBigEndian = false diff --git a/platform/dbops/binaries/go/go/src/os/env.go b/platform/dbops/binaries/go/go/src/os/env.go new file mode 100644 index 0000000000000000000000000000000000000000..63ad5ab4bd5f7f8e44752dce31b28067877d1c74 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/env.go @@ -0,0 +1,141 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// General environment variables. + +package os + +import ( + "internal/testlog" + "syscall" +) + +// Expand replaces ${var} or $var in the string based on the mapping function. +// For example, os.ExpandEnv(s) is equivalent to os.Expand(s, os.Getenv). +func Expand(s string, mapping func(string) string) string { + var buf []byte + // ${} is all ASCII, so bytes are fine for this operation. + i := 0 + for j := 0; j < len(s); j++ { + if s[j] == '$' && j+1 < len(s) { + if buf == nil { + buf = make([]byte, 0, 2*len(s)) + } + buf = append(buf, s[i:j]...) + name, w := getShellName(s[j+1:]) + if name == "" && w > 0 { + // Encountered invalid syntax; eat the + // characters. + } else if name == "" { + // Valid syntax, but $ was not followed by a + // name. Leave the dollar character untouched. + buf = append(buf, s[j]) + } else { + buf = append(buf, mapping(name)...) + } + j += w + i = j + 1 + } + } + if buf == nil { + return s + } + return string(buf) + s[i:] +} + +// ExpandEnv replaces ${var} or $var in the string according to the values +// of the current environment variables. References to undefined +// variables are replaced by the empty string. +func ExpandEnv(s string) string { + return Expand(s, Getenv) +} + +// isShellSpecialVar reports whether the character identifies a special +// shell variable such as $*. +func isShellSpecialVar(c uint8) bool { + switch c { + case '*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore. +func isAlphaNum(c uint8) bool { + return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' +} + +// getShellName returns the name that begins the string and the number of bytes +// consumed to extract it. If the name is enclosed in {}, it's part of a ${} +// expansion and two more bytes are needed than the length of the name. +func getShellName(s string) (string, int) { + switch { + case s[0] == '{': + if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' { + return s[1:2], 3 + } + // Scan to closing brace + for i := 1; i < len(s); i++ { + if s[i] == '}' { + if i == 1 { + return "", 2 // Bad syntax; eat "${}" + } + return s[1:i], i + 1 + } + } + return "", 1 // Bad syntax; eat "${" + case isShellSpecialVar(s[0]): + return s[0:1], 1 + } + // Scan alphanumerics. + var i int + for i = 0; i < len(s) && isAlphaNum(s[i]); i++ { + } + return s[:i], i +} + +// Getenv retrieves the value of the environment variable named by the key. +// It returns the value, which will be empty if the variable is not present. +// To distinguish between an empty value and an unset value, use LookupEnv. +func Getenv(key string) string { + testlog.Getenv(key) + v, _ := syscall.Getenv(key) + return v +} + +// LookupEnv retrieves the value of the environment variable named +// by the key. If the variable is present in the environment the +// value (which may be empty) is returned and the boolean is true. +// Otherwise the returned value will be empty and the boolean will +// be false. +func LookupEnv(key string) (string, bool) { + testlog.Getenv(key) + return syscall.Getenv(key) +} + +// Setenv sets the value of the environment variable named by the key. +// It returns an error, if any. +func Setenv(key, value string) error { + err := syscall.Setenv(key, value) + if err != nil { + return NewSyscallError("setenv", err) + } + return nil +} + +// Unsetenv unsets a single environment variable. +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} + +// Clearenv deletes all environment variables. +func Clearenv() { + syscall.Clearenv() +} + +// Environ returns a copy of strings representing the environment, +// in the form "key=value". +func Environ() []string { + return syscall.Environ() +} diff --git a/platform/dbops/binaries/go/go/src/os/env_test.go b/platform/dbops/binaries/go/go/src/os/env_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5809f4b866b73a38bdcea2c43ee3183f6c2cf5b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/env_test.go @@ -0,0 +1,206 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + . "os" + "reflect" + "strings" + "testing" +) + +// testGetenv gives us a controlled set of variables for testing Expand. +func testGetenv(s string) string { + switch s { + case "*": + return "all the args" + case "#": + return "NARGS" + case "$": + return "PID" + case "1": + return "ARGUMENT1" + case "HOME": + return "/usr/gopher" + case "H": + return "(Value of H)" + case "home_1": + return "/usr/foo" + case "_": + return "underscore" + } + return "" +} + +var expandTests = []struct { + in, out string +}{ + {"", ""}, + {"$*", "all the args"}, + {"$$", "PID"}, + {"${*}", "all the args"}, + {"$1", "ARGUMENT1"}, + {"${1}", "ARGUMENT1"}, + {"now is the time", "now is the time"}, + {"$HOME", "/usr/gopher"}, + {"$home_1", "/usr/foo"}, + {"${HOME}", "/usr/gopher"}, + {"${H}OME", "(Value of H)OME"}, + {"A$$$#$1$H$home_1*B", "APIDNARGSARGUMENT1(Value of H)/usr/foo*B"}, + {"start$+middle$^end$", "start$+middle$^end$"}, + {"mixed$|bag$$$", "mixed$|bagPID$"}, + {"$", "$"}, + {"$}", "$}"}, + {"${", ""}, // invalid syntax; eat up the characters + {"${}", ""}, // invalid syntax; eat up the characters +} + +func TestExpand(t *testing.T) { + for _, test := range expandTests { + result := Expand(test.in, testGetenv) + if result != test.out { + t.Errorf("Expand(%q)=%q; expected %q", test.in, result, test.out) + } + } +} + +var global any + +func BenchmarkExpand(b *testing.B) { + b.Run("noop", func(b *testing.B) { + var s string + b.ReportAllocs() + for i := 0; i < b.N; i++ { + s = Expand("tick tick tick tick", func(string) string { return "" }) + } + global = s + }) + b.Run("multiple", func(b *testing.B) { + var s string + b.ReportAllocs() + for i := 0; i < b.N; i++ { + s = Expand("$a $a $a $a", func(string) string { return "boom" }) + } + global = s + }) +} + +func TestConsistentEnviron(t *testing.T) { + e0 := Environ() + for i := 0; i < 10; i++ { + e1 := Environ() + if !reflect.DeepEqual(e0, e1) { + t.Fatalf("environment changed") + } + } +} + +func TestUnsetenv(t *testing.T) { + const testKey = "GO_TEST_UNSETENV" + set := func() bool { + prefix := testKey + "=" + for _, key := range Environ() { + if strings.HasPrefix(key, prefix) { + return true + } + } + return false + } + if err := Setenv(testKey, "1"); err != nil { + t.Fatalf("Setenv: %v", err) + } + if !set() { + t.Error("Setenv didn't set TestUnsetenv") + } + if err := Unsetenv(testKey); err != nil { + t.Fatalf("Unsetenv: %v", err) + } + if set() { + t.Fatal("Unsetenv didn't clear TestUnsetenv") + } +} + +func TestClearenv(t *testing.T) { + const testKey = "GO_TEST_CLEARENV" + const testValue = "1" + + // reset env + defer func(origEnv []string) { + for _, pair := range origEnv { + // Environment variables on Windows can begin with = + // https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133 + i := strings.Index(pair[1:], "=") + 1 + if err := Setenv(pair[:i], pair[i+1:]); err != nil { + t.Errorf("Setenv(%q, %q) failed during reset: %v", pair[:i], pair[i+1:], err) + } + } + }(Environ()) + + if err := Setenv(testKey, testValue); err != nil { + t.Fatalf("Setenv(%q, %q) failed: %v", testKey, testValue, err) + } + if _, ok := LookupEnv(testKey); !ok { + t.Errorf("Setenv(%q, %q) didn't set $%s", testKey, testValue, testKey) + } + Clearenv() + if val, ok := LookupEnv(testKey); ok { + t.Errorf("Clearenv() didn't clear $%s, remained with value %q", testKey, val) + } +} + +func TestLookupEnv(t *testing.T) { + const smallpox = "SMALLPOX" // No one has smallpox. + value, ok := LookupEnv(smallpox) // Should not exist. + if ok || value != "" { + t.Fatalf("%s=%q", smallpox, value) + } + defer Unsetenv(smallpox) + err := Setenv(smallpox, "virus") + if err != nil { + t.Fatalf("failed to release smallpox virus") + } + _, ok = LookupEnv(smallpox) + if !ok { + t.Errorf("smallpox release failed; world remains safe but LookupEnv is broken") + } +} + +// On Windows, Environ was observed to report keys with a single leading "=". +// Check that they are properly reported by LookupEnv and can be set by SetEnv. +// See https://golang.org/issue/49886. +func TestEnvironConsistency(t *testing.T) { + t.Parallel() + + for _, kv := range Environ() { + i := strings.Index(kv, "=") + if i == 0 { + // We observe in practice keys with a single leading "=" on Windows. + // TODO(#49886): Should we consume only the first leading "=" as part + // of the key, or parse through arbitrarily many of them until a non-=, + // or try each possible key/value boundary until LookupEnv succeeds? + i = strings.Index(kv[1:], "=") + 1 + } + if i < 0 { + t.Errorf("Environ entry missing '=': %q", kv) + } + + k := kv[:i] + v := kv[i+1:] + v2, ok := LookupEnv(k) + if ok && v == v2 { + t.Logf("LookupEnv(%q) = %q, %t", k, v2, ok) + } else { + t.Errorf("Environ contains %q, but LookupEnv(%q) = %q, %t", kv, k, v2, ok) + } + + // Since k=v is already present in the environment, + // setting it should be a no-op. + if err := Setenv(k, v); err == nil { + t.Logf("Setenv(%q, %q)", k, v) + } else { + t.Errorf("Environ contains %q, but SetEnv(%q, %q) = %q", kv, k, v, err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/os/env_unix_test.go b/platform/dbops/binaries/go/go/src/os/env_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4609fc3b94d404137c6c354d3da3a72a07c914d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/env_unix_test.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package os_test + +import ( + "fmt" + . "os" + "testing" +) + +var setenvEinvalTests = []struct { + k, v string +}{ + {"", ""}, // empty key + {"k=v", ""}, // '=' in key + {"\x00", ""}, // '\x00' in key + {"k", "\x00"}, // '\x00' in value +} + +func TestSetenvUnixEinval(t *testing.T) { + for _, tt := range setenvEinvalTests { + err := Setenv(tt.k, tt.v) + if err == nil { + t.Errorf(`Setenv(%q, %q) == nil, want error`, tt.k, tt.v) + } + } +} + +var shellSpecialVarTests = []struct { + k, v string +}{ + {"*", "asterisk"}, + {"#", "pound"}, + {"$", "dollar"}, + {"@", "at"}, + {"!", "exclamation mark"}, + {"?", "question mark"}, + {"-", "dash"}, +} + +func TestExpandEnvShellSpecialVar(t *testing.T) { + for _, tt := range shellSpecialVarTests { + Setenv(tt.k, tt.v) + defer Unsetenv(tt.k) + + argRaw := fmt.Sprintf("$%s", tt.k) + argWithBrace := fmt.Sprintf("${%s}", tt.k) + if gotRaw, gotBrace := ExpandEnv(argRaw), ExpandEnv(argWithBrace); gotRaw != gotBrace { + t.Errorf("ExpandEnv(%q) = %q, ExpandEnv(%q) = %q; expect them to be equal", argRaw, gotRaw, argWithBrace, gotBrace) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/os/error.go b/platform/dbops/binaries/go/go/src/os/error.go new file mode 100644 index 0000000000000000000000000000000000000000..62ede9ded3bcec749b9ccd9978b86af0a1e69604 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error.go @@ -0,0 +1,141 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/poll" + "io/fs" +) + +// Portable analogs of some common system call errors. +// +// Errors returned from this package may be tested against these errors +// with errors.Is. +var ( + // ErrInvalid indicates an invalid argument. + // Methods on File will return this error when the receiver is nil. + ErrInvalid = fs.ErrInvalid // "invalid argument" + + ErrPermission = fs.ErrPermission // "permission denied" + ErrExist = fs.ErrExist // "file already exists" + ErrNotExist = fs.ErrNotExist // "file does not exist" + ErrClosed = fs.ErrClosed // "file already closed" + + ErrNoDeadline = errNoDeadline() // "file type does not support deadline" + ErrDeadlineExceeded = errDeadlineExceeded() // "i/o timeout" +) + +func errNoDeadline() error { return poll.ErrNoDeadline } + +// errDeadlineExceeded returns the value for os.ErrDeadlineExceeded. +// This error comes from the internal/poll package, which is also +// used by package net. Doing it this way ensures that the net +// package will return os.ErrDeadlineExceeded for an exceeded deadline, +// as documented by net.Conn.SetDeadline, without requiring any extra +// work in the net package and without requiring the internal/poll +// package to import os (which it can't, because that would be circular). +func errDeadlineExceeded() error { return poll.ErrDeadlineExceeded } + +type timeout interface { + Timeout() bool +} + +// PathError records an error and the operation and file path that caused it. +type PathError = fs.PathError + +// SyscallError records an error from a specific system call. +type SyscallError struct { + Syscall string + Err error +} + +func (e *SyscallError) Error() string { return e.Syscall + ": " + e.Err.Error() } + +func (e *SyscallError) Unwrap() error { return e.Err } + +// Timeout reports whether this error represents a timeout. +func (e *SyscallError) Timeout() bool { + t, ok := e.Err.(timeout) + return ok && t.Timeout() +} + +// NewSyscallError returns, as an error, a new SyscallError +// with the given system call name and error details. +// As a convenience, if err is nil, NewSyscallError returns nil. +func NewSyscallError(syscall string, err error) error { + if err == nil { + return nil + } + return &SyscallError{syscall, err} +} + +// IsExist returns a boolean indicating whether the error is known to report +// that a file or directory already exists. It is satisfied by ErrExist as +// well as some syscall errors. +// +// This function predates errors.Is. It only supports errors returned by +// the os package. New code should use errors.Is(err, fs.ErrExist). +func IsExist(err error) bool { + return underlyingErrorIs(err, ErrExist) +} + +// IsNotExist returns a boolean indicating whether the error is known to +// report that a file or directory does not exist. It is satisfied by +// ErrNotExist as well as some syscall errors. +// +// This function predates errors.Is. It only supports errors returned by +// the os package. New code should use errors.Is(err, fs.ErrNotExist). +func IsNotExist(err error) bool { + return underlyingErrorIs(err, ErrNotExist) +} + +// IsPermission returns a boolean indicating whether the error is known to +// report that permission is denied. It is satisfied by ErrPermission as well +// as some syscall errors. +// +// This function predates errors.Is. It only supports errors returned by +// the os package. New code should use errors.Is(err, fs.ErrPermission). +func IsPermission(err error) bool { + return underlyingErrorIs(err, ErrPermission) +} + +// IsTimeout returns a boolean indicating whether the error is known +// to report that a timeout occurred. +// +// This function predates errors.Is, and the notion of whether an +// error indicates a timeout can be ambiguous. For example, the Unix +// error EWOULDBLOCK sometimes indicates a timeout and sometimes does not. +// New code should use errors.Is with a value appropriate to the call +// returning the error, such as os.ErrDeadlineExceeded. +func IsTimeout(err error) bool { + terr, ok := underlyingError(err).(timeout) + return ok && terr.Timeout() +} + +func underlyingErrorIs(err, target error) bool { + // Note that this function is not errors.Is: + // underlyingError only unwraps the specific error-wrapping types + // that it historically did, not all errors implementing Unwrap(). + err = underlyingError(err) + if err == target { + return true + } + // To preserve prior behavior, only examine syscall errors. + e, ok := err.(syscallErrorType) + return ok && e.Is(target) +} + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *PathError: + return err.Err + case *LinkError: + return err.Err + case *SyscallError: + return err.Err + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/os/error_errno.go b/platform/dbops/binaries/go/go/src/os/error_errno.go new file mode 100644 index 0000000000000000000000000000000000000000..c8140461a4dd9fd2596c23a70a39456fe2ecb141 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_errno.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package os + +import "syscall" + +type syscallErrorType = syscall.Errno diff --git a/platform/dbops/binaries/go/go/src/os/error_plan9.go b/platform/dbops/binaries/go/go/src/os/error_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..af6065db568edc3c94cf882a6ac93db7e6b1fd90 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_plan9.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "syscall" + +type syscallErrorType = syscall.ErrorString diff --git a/platform/dbops/binaries/go/go/src/os/error_posix.go b/platform/dbops/binaries/go/go/src/os/error_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..b159c036c110757170edd315bb3b3c0626c51f16 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_posix.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 || windows + +package os + +import "syscall" + +// wrapSyscallError takes an error and a syscall name. If the error is +// a syscall.Errno, it wraps it in an os.SyscallError using the syscall name. +func wrapSyscallError(name string, err error) error { + if _, ok := err.(syscall.Errno); ok { + err = NewSyscallError(name, err) + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/os/error_test.go b/platform/dbops/binaries/go/go/src/os/error_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8f82ae65d36b8b1e8d6b063147e91f1366ed0372 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_test.go @@ -0,0 +1,189 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "testing" +) + +func TestErrIsExist(t *testing.T) { + t.Parallel() + + f, err := os.CreateTemp("", "_Go_ErrIsExist") + if err != nil { + t.Fatalf("open ErrIsExist tempfile: %s", err) + return + } + defer os.Remove(f.Name()) + defer f.Close() + f2, err := os.OpenFile(f.Name(), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err == nil { + f2.Close() + t.Fatal("Open should have failed") + } + if s := checkErrorPredicate("os.IsExist", os.IsExist, err, fs.ErrExist); s != "" { + t.Fatal(s) + } +} + +func testErrNotExist(t *testing.T, name string) string { + originalWD, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(name) + if err == nil { + f.Close() + return "Open should have failed" + } + if s := checkErrorPredicate("os.IsNotExist", os.IsNotExist, err, fs.ErrNotExist); s != "" { + return s + } + + err = os.Chdir(name) + if err == nil { + if err := os.Chdir(originalWD); err != nil { + t.Fatalf("Chdir should have failed, failed to restore original working directory: %v", err) + } + return "Chdir should have failed, restored original working directory" + } + if s := checkErrorPredicate("os.IsNotExist", os.IsNotExist, err, fs.ErrNotExist); s != "" { + return s + } + return "" +} + +func TestErrIsNotExist(t *testing.T) { + tmpDir := t.TempDir() + name := filepath.Join(tmpDir, "NotExists") + if s := testErrNotExist(t, name); s != "" { + t.Fatal(s) + } + + name = filepath.Join(name, "NotExists2") + if s := testErrNotExist(t, name); s != "" { + t.Fatal(s) + } +} + +func checkErrorPredicate(predName string, pred func(error) bool, err, target error) string { + if !pred(err) { + return fmt.Sprintf("%s does not work as expected for %#v", predName, err) + } + if !errors.Is(err, target) { + return fmt.Sprintf("errors.Is(%#v, %#v) = false, want true", err, target) + } + return "" +} + +type isExistTest struct { + err error + is bool + isnot bool +} + +var isExistTests = []isExistTest{ + {&fs.PathError{Err: fs.ErrInvalid}, false, false}, + {&fs.PathError{Err: fs.ErrPermission}, false, false}, + {&fs.PathError{Err: fs.ErrExist}, true, false}, + {&fs.PathError{Err: fs.ErrNotExist}, false, true}, + {&fs.PathError{Err: fs.ErrClosed}, false, false}, + {&os.LinkError{Err: fs.ErrInvalid}, false, false}, + {&os.LinkError{Err: fs.ErrPermission}, false, false}, + {&os.LinkError{Err: fs.ErrExist}, true, false}, + {&os.LinkError{Err: fs.ErrNotExist}, false, true}, + {&os.LinkError{Err: fs.ErrClosed}, false, false}, + {&os.SyscallError{Err: fs.ErrNotExist}, false, true}, + {&os.SyscallError{Err: fs.ErrExist}, true, false}, + {nil, false, false}, +} + +func TestIsExist(t *testing.T) { + for _, tt := range isExistTests { + if is := os.IsExist(tt.err); is != tt.is { + t.Errorf("os.IsExist(%T %v) = %v, want %v", tt.err, tt.err, is, tt.is) + } + if is := errors.Is(tt.err, fs.ErrExist); is != tt.is { + t.Errorf("errors.Is(%T %v, fs.ErrExist) = %v, want %v", tt.err, tt.err, is, tt.is) + } + if isnot := os.IsNotExist(tt.err); isnot != tt.isnot { + t.Errorf("os.IsNotExist(%T %v) = %v, want %v", tt.err, tt.err, isnot, tt.isnot) + } + if isnot := errors.Is(tt.err, fs.ErrNotExist); isnot != tt.isnot { + t.Errorf("errors.Is(%T %v, fs.ErrNotExist) = %v, want %v", tt.err, tt.err, isnot, tt.isnot) + } + } +} + +type isPermissionTest struct { + err error + want bool +} + +var isPermissionTests = []isPermissionTest{ + {nil, false}, + {&fs.PathError{Err: fs.ErrPermission}, true}, + {&os.SyscallError{Err: fs.ErrPermission}, true}, +} + +func TestIsPermission(t *testing.T) { + for _, tt := range isPermissionTests { + if got := os.IsPermission(tt.err); got != tt.want { + t.Errorf("os.IsPermission(%#v) = %v; want %v", tt.err, got, tt.want) + } + if got := errors.Is(tt.err, fs.ErrPermission); got != tt.want { + t.Errorf("errors.Is(%#v, fs.ErrPermission) = %v; want %v", tt.err, got, tt.want) + } + } +} + +func TestErrPathNUL(t *testing.T) { + t.Parallel() + + f, err := os.CreateTemp("", "_Go_ErrPathNUL\x00") + if err == nil { + f.Close() + t.Fatal("TempFile should have failed") + } + f, err = os.CreateTemp("", "_Go_ErrPathNUL") + if err != nil { + t.Fatalf("open ErrPathNUL tempfile: %s", err) + } + defer os.Remove(f.Name()) + defer f.Close() + f2, err := os.OpenFile(f.Name(), os.O_RDWR, 0600) + if err != nil { + t.Fatalf("open ErrPathNUL: %s", err) + } + f2.Close() + f2, err = os.OpenFile(f.Name()+"\x00", os.O_RDWR, 0600) + if err == nil { + f2.Close() + t.Fatal("Open should have failed") + } +} + +func TestPathErrorUnwrap(t *testing.T) { + pe := &fs.PathError{Err: fs.ErrInvalid} + if !errors.Is(pe, fs.ErrInvalid) { + t.Error("errors.Is failed, wanted success") + } +} + +type myErrorIs struct{ error } + +func (e myErrorIs) Is(target error) bool { return target == e.error } + +func TestErrorIsMethods(t *testing.T) { + if os.IsPermission(myErrorIs{fs.ErrPermission}) { + t.Error("os.IsPermission(err) = true when err.Is(fs.ErrPermission), wanted false") + } +} diff --git a/platform/dbops/binaries/go/go/src/os/error_unix_test.go b/platform/dbops/binaries/go/go/src/os/error_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..07a3286cd6f1743ba3dfedb042753c86cf1749d3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_unix_test.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os_test + +import ( + "io/fs" + "os" + "syscall" +) + +func init() { + isExistTests = append(isExistTests, + isExistTest{err: &fs.PathError{Err: syscall.EEXIST}, is: true, isnot: false}, + isExistTest{err: &fs.PathError{Err: syscall.ENOTEMPTY}, is: true, isnot: false}, + + isExistTest{err: &os.LinkError{Err: syscall.EEXIST}, is: true, isnot: false}, + isExistTest{err: &os.LinkError{Err: syscall.ENOTEMPTY}, is: true, isnot: false}, + + isExistTest{err: &os.SyscallError{Err: syscall.EEXIST}, is: true, isnot: false}, + isExistTest{err: &os.SyscallError{Err: syscall.ENOTEMPTY}, is: true, isnot: false}, + ) + isPermissionTests = append(isPermissionTests, + isPermissionTest{err: &fs.PathError{Err: syscall.EACCES}, want: true}, + isPermissionTest{err: &fs.PathError{Err: syscall.EPERM}, want: true}, + isPermissionTest{err: &fs.PathError{Err: syscall.EEXIST}, want: false}, + + isPermissionTest{err: &os.LinkError{Err: syscall.EACCES}, want: true}, + isPermissionTest{err: &os.LinkError{Err: syscall.EPERM}, want: true}, + isPermissionTest{err: &os.LinkError{Err: syscall.EEXIST}, want: false}, + + isPermissionTest{err: &os.SyscallError{Err: syscall.EACCES}, want: true}, + isPermissionTest{err: &os.SyscallError{Err: syscall.EPERM}, want: true}, + isPermissionTest{err: &os.SyscallError{Err: syscall.EEXIST}, want: false}, + ) + +} diff --git a/platform/dbops/binaries/go/go/src/os/error_windows_test.go b/platform/dbops/binaries/go/go/src/os/error_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..86c8a985bb3359af155eb91604edb16af92cd11d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/error_windows_test.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package os_test + +import ( + "io/fs" + "os" + "syscall" +) + +func init() { + const _ERROR_BAD_NETPATH = syscall.Errno(53) + + isExistTests = append(isExistTests, + isExistTest{err: &fs.PathError{Err: syscall.ERROR_FILE_NOT_FOUND}, is: false, isnot: true}, + isExistTest{err: &os.LinkError{Err: syscall.ERROR_FILE_NOT_FOUND}, is: false, isnot: true}, + isExistTest{err: &os.SyscallError{Err: syscall.ERROR_FILE_NOT_FOUND}, is: false, isnot: true}, + + isExistTest{err: &fs.PathError{Err: _ERROR_BAD_NETPATH}, is: false, isnot: true}, + isExistTest{err: &os.LinkError{Err: _ERROR_BAD_NETPATH}, is: false, isnot: true}, + isExistTest{err: &os.SyscallError{Err: _ERROR_BAD_NETPATH}, is: false, isnot: true}, + + isExistTest{err: &fs.PathError{Err: syscall.ERROR_PATH_NOT_FOUND}, is: false, isnot: true}, + isExistTest{err: &os.LinkError{Err: syscall.ERROR_PATH_NOT_FOUND}, is: false, isnot: true}, + isExistTest{err: &os.SyscallError{Err: syscall.ERROR_PATH_NOT_FOUND}, is: false, isnot: true}, + + isExistTest{err: &fs.PathError{Err: syscall.ERROR_DIR_NOT_EMPTY}, is: true, isnot: false}, + isExistTest{err: &os.LinkError{Err: syscall.ERROR_DIR_NOT_EMPTY}, is: true, isnot: false}, + isExistTest{err: &os.SyscallError{Err: syscall.ERROR_DIR_NOT_EMPTY}, is: true, isnot: false}, + ) + isPermissionTests = append(isPermissionTests, + isPermissionTest{err: &fs.PathError{Err: syscall.ERROR_ACCESS_DENIED}, want: true}, + isPermissionTest{err: &os.LinkError{Err: syscall.ERROR_ACCESS_DENIED}, want: true}, + isPermissionTest{err: &os.SyscallError{Err: syscall.ERROR_ACCESS_DENIED}, want: true}, + ) +} diff --git a/platform/dbops/binaries/go/go/src/os/example_test.go b/platform/dbops/binaries/go/go/src/os/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7437a74cd0c66d657dd6b52e7646abf92c18bf9a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/example_test.go @@ -0,0 +1,395 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "sync" + "time" +) + +func ExampleOpenFile() { + f, err := os.OpenFile("notes.txt", os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleOpenFile_append() { + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile("access.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + if _, err := f.Write([]byte("appended some data\n")); err != nil { + f.Close() // ignore error; Write error takes precedence + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleChmod() { + if err := os.Chmod("some-filename", 0644); err != nil { + log.Fatal(err) + } +} + +func ExampleChtimes() { + mtime := time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC) + atime := time.Date(2007, time.March, 2, 4, 5, 6, 0, time.UTC) + if err := os.Chtimes("some-filename", atime, mtime); err != nil { + log.Fatal(err) + } +} + +func ExampleFileMode() { + fi, err := os.Lstat("some-filename") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc. + switch mode := fi.Mode(); { + case mode.IsRegular(): + fmt.Println("regular file") + case mode.IsDir(): + fmt.Println("directory") + case mode&fs.ModeSymlink != 0: + fmt.Println("symbolic link") + case mode&fs.ModeNamedPipe != 0: + fmt.Println("named pipe") + } +} + +func ExampleErrNotExist() { + filename := "a-nonexistent-file" + if _, err := os.Stat(filename); errors.Is(err, fs.ErrNotExist) { + fmt.Println("file does not exist") + } + // Output: + // file does not exist +} + +func ExampleExpand() { + mapper := func(placeholderName string) string { + switch placeholderName { + case "DAY_PART": + return "morning" + case "NAME": + return "Gopher" + } + + return "" + } + + fmt.Println(os.Expand("Good ${DAY_PART}, $NAME!", mapper)) + + // Output: + // Good morning, Gopher! +} + +func ExampleExpandEnv() { + os.Setenv("NAME", "gopher") + os.Setenv("BURROW", "/usr/gopher") + + fmt.Println(os.ExpandEnv("$NAME lives in ${BURROW}.")) + + // Output: + // gopher lives in /usr/gopher. +} + +func ExampleLookupEnv() { + show := func(key string) { + val, ok := os.LookupEnv(key) + if !ok { + fmt.Printf("%s not set\n", key) + } else { + fmt.Printf("%s=%s\n", key, val) + } + } + + os.Setenv("SOME_KEY", "value") + os.Setenv("EMPTY_KEY", "") + + show("SOME_KEY") + show("EMPTY_KEY") + show("MISSING_KEY") + + // Output: + // SOME_KEY=value + // EMPTY_KEY= + // MISSING_KEY not set +} + +func ExampleGetenv() { + os.Setenv("NAME", "gopher") + os.Setenv("BURROW", "/usr/gopher") + + fmt.Printf("%s lives in %s.\n", os.Getenv("NAME"), os.Getenv("BURROW")) + + // Output: + // gopher lives in /usr/gopher. +} + +func ExampleUnsetenv() { + os.Setenv("TMPDIR", "/my/tmp") + defer os.Unsetenv("TMPDIR") +} + +func ExampleReadDir() { + files, err := os.ReadDir(".") + if err != nil { + log.Fatal(err) + } + + for _, file := range files { + fmt.Println(file.Name()) + } +} + +func ExampleMkdirTemp() { + dir, err := os.MkdirTemp("", "example") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(dir) // clean up + + file := filepath.Join(dir, "tmpfile") + if err := os.WriteFile(file, []byte("content"), 0666); err != nil { + log.Fatal(err) + } +} + +func ExampleMkdirTemp_suffix() { + logsDir, err := os.MkdirTemp("", "*-logs") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(logsDir) // clean up + + // Logs can be cleaned out earlier if needed by searching + // for all directories whose suffix ends in *-logs. + globPattern := filepath.Join(os.TempDir(), "*-logs") + matches, err := filepath.Glob(globPattern) + if err != nil { + log.Fatalf("Failed to match %q: %v", globPattern, err) + } + + for _, match := range matches { + if err := os.RemoveAll(match); err != nil { + log.Printf("Failed to remove %q: %v", match, err) + } + } +} + +func ExampleCreateTemp() { + f, err := os.CreateTemp("", "example") + if err != nil { + log.Fatal(err) + } + defer os.Remove(f.Name()) // clean up + + if _, err := f.Write([]byte("content")); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleCreateTemp_suffix() { + f, err := os.CreateTemp("", "example.*.txt") + if err != nil { + log.Fatal(err) + } + defer os.Remove(f.Name()) // clean up + + if _, err := f.Write([]byte("content")); err != nil { + f.Close() + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleReadFile() { + data, err := os.ReadFile("testdata/hello") + if err != nil { + log.Fatal(err) + } + os.Stdout.Write(data) + + // Output: + // Hello, Gophers! +} + +func ExampleWriteFile() { + err := os.WriteFile("testdata/hello", []byte("Hello, Gophers!"), 0666) + if err != nil { + log.Fatal(err) + } +} + +func ExampleMkdir() { + err := os.Mkdir("testdir", 0750) + if err != nil && !os.IsExist(err) { + log.Fatal(err) + } + err = os.WriteFile("testdir/testfile.txt", []byte("Hello, Gophers!"), 0660) + if err != nil { + log.Fatal(err) + } +} + +func ExampleMkdirAll() { + err := os.MkdirAll("test/subdir", 0750) + if err != nil { + log.Fatal(err) + } + err = os.WriteFile("test/subdir/testfile.txt", []byte("Hello, Gophers!"), 0660) + if err != nil { + log.Fatal(err) + } +} + +func ExampleReadlink() { + // First, we create a relative symlink to a file. + d, err := os.MkdirTemp("", "") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(d) + targetPath := filepath.Join(d, "hello.txt") + if err := os.WriteFile(targetPath, []byte("Hello, Gophers!"), 0644); err != nil { + log.Fatal(err) + } + linkPath := filepath.Join(d, "hello.link") + if err := os.Symlink("hello.txt", filepath.Join(d, "hello.link")); err != nil { + if errors.Is(err, errors.ErrUnsupported) { + // Allow the example to run on platforms that do not support symbolic links. + fmt.Printf("%s links to %s\n", filepath.Base(linkPath), "hello.txt") + return + } + log.Fatal(err) + } + + // Readlink returns the relative path as passed to os.Symlink. + dst, err := os.Readlink(linkPath) + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s links to %s\n", filepath.Base(linkPath), dst) + + var dstAbs string + if filepath.IsAbs(dst) { + dstAbs = dst + } else { + // Symlink targets are relative to the directory containing the link. + dstAbs = filepath.Join(filepath.Dir(linkPath), dst) + } + + // Check that the target is correct by comparing it with os.Stat + // on the original target path. + dstInfo, err := os.Stat(dstAbs) + if err != nil { + log.Fatal(err) + } + targetInfo, err := os.Stat(targetPath) + if err != nil { + log.Fatal(err) + } + if !os.SameFile(dstInfo, targetInfo) { + log.Fatalf("link destination (%s) is not the same file as %s", dstAbs, targetPath) + } + + // Output: + // hello.link links to hello.txt +} + +func ExampleUserCacheDir() { + dir, dirErr := os.UserCacheDir() + if dirErr == nil { + dir = filepath.Join(dir, "ExampleUserCacheDir") + } + + getCache := func(name string) ([]byte, error) { + if dirErr != nil { + return nil, &os.PathError{Op: "getCache", Path: name, Err: os.ErrNotExist} + } + return os.ReadFile(filepath.Join(dir, name)) + } + + var mkdirOnce sync.Once + putCache := func(name string, b []byte) error { + if dirErr != nil { + return &os.PathError{Op: "putCache", Path: name, Err: dirErr} + } + mkdirOnce.Do(func() { + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("can't create user cache dir: %v", err) + } + }) + return os.WriteFile(filepath.Join(dir, name), b, 0600) + } + + // Read and store cached data. + // … + _ = getCache + _ = putCache + + // Output: +} + +func ExampleUserConfigDir() { + dir, dirErr := os.UserConfigDir() + + var ( + configPath string + origConfig []byte + ) + if dirErr == nil { + configPath = filepath.Join(dir, "ExampleUserConfigDir", "example.conf") + var err error + origConfig, err = os.ReadFile(configPath) + if err != nil && !os.IsNotExist(err) { + // The user has a config file but we couldn't read it. + // Report the error instead of ignoring their configuration. + log.Fatal(err) + } + } + + // Use and perhaps make changes to the config. + config := bytes.Clone(origConfig) + // … + + // Save changes. + if !bytes.Equal(config, origConfig) { + if configPath == "" { + log.Printf("not saving config changes: %v", dirErr) + } else { + err := os.MkdirAll(filepath.Dir(configPath), 0700) + if err == nil { + err = os.WriteFile(configPath, config, 0600) + } + if err != nil { + log.Printf("error saving config changes: %v", err) + } + } + } + + // Output: +} diff --git a/platform/dbops/binaries/go/go/src/os/exec.go b/platform/dbops/binaries/go/go/src/os/exec.go new file mode 100644 index 0000000000000000000000000000000000000000..ed5a75c4d13f87405dde7c123896d5a4bf03784a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "errors" + "internal/testlog" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +// ErrProcessDone indicates a Process has finished. +var ErrProcessDone = errors.New("os: process already finished") + +// Process stores the information about a process created by StartProcess. +type Process struct { + Pid int + handle uintptr // handle is accessed atomically on Windows + isdone atomic.Bool // process has been successfully waited on + sigMu sync.RWMutex // avoid race between wait and signal +} + +func newProcess(pid int, handle uintptr) *Process { + p := &Process{Pid: pid, handle: handle} + runtime.SetFinalizer(p, (*Process).Release) + return p +} + +func (p *Process) setDone() { + p.isdone.Store(true) +} + +func (p *Process) done() bool { + return p.isdone.Load() +} + +// ProcAttr holds the attributes that will be applied to a new process +// started by StartProcess. +type ProcAttr struct { + // If Dir is non-empty, the child changes into the directory before + // creating the process. + Dir string + // If Env is non-nil, it gives the environment variables for the + // new process in the form returned by Environ. + // If it is nil, the result of Environ will be used. + Env []string + // Files specifies the open files inherited by the new process. The + // first three entries correspond to standard input, standard output, and + // standard error. An implementation may support additional entries, + // depending on the underlying operating system. A nil entry corresponds + // to that file being closed when the process starts. + // On Unix systems, StartProcess will change these File values + // to blocking mode, which means that SetDeadline will stop working + // and calling Close will not interrupt a Read or Write. + Files []*File + + // Operating system-specific process creation attributes. + // Note that setting this field means that your program + // may not execute properly or even compile on some + // operating systems. + Sys *syscall.SysProcAttr +} + +// A Signal represents an operating system signal. +// The usual underlying implementation is operating system-dependent: +// on Unix it is syscall.Signal. +type Signal interface { + String() string + Signal() // to distinguish from other Stringers +} + +// Getpid returns the process id of the caller. +func Getpid() int { return syscall.Getpid() } + +// Getppid returns the process id of the caller's parent. +func Getppid() int { return syscall.Getppid() } + +// FindProcess looks for a running process by its pid. +// +// The Process it returns can be used to obtain information +// about the underlying operating system process. +// +// On Unix systems, FindProcess always succeeds and returns a Process +// for the given pid, regardless of whether the process exists. To test whether +// the process actually exists, see whether p.Signal(syscall.Signal(0)) reports +// an error. +func FindProcess(pid int) (*Process, error) { + return findProcess(pid) +} + +// StartProcess starts a new process with the program, arguments and attributes +// specified by name, argv and attr. The argv slice will become os.Args in the +// new process, so it normally starts with the program name. +// +// If the calling goroutine has locked the operating system thread +// with runtime.LockOSThread and modified any inheritable OS-level +// thread state (for example, Linux or Plan 9 name spaces), the new +// process will inherit the caller's thread state. +// +// StartProcess is a low-level interface. The os/exec package provides +// higher-level interfaces. +// +// If there is an error, it will be of type *PathError. +func StartProcess(name string, argv []string, attr *ProcAttr) (*Process, error) { + testlog.Open(name) + return startProcess(name, argv, attr) +} + +// Release releases any resources associated with the Process p, +// rendering it unusable in the future. +// Release only needs to be called if Wait is not. +func (p *Process) Release() error { + return p.release() +} + +// Kill causes the Process to exit immediately. Kill does not wait until +// the Process has actually exited. This only kills the Process itself, +// not any other processes it may have started. +func (p *Process) Kill() error { + return p.kill() +} + +// Wait waits for the Process to exit, and then returns a +// ProcessState describing its status and an error, if any. +// Wait releases any resources associated with the Process. +// On most operating systems, the Process must be a child +// of the current process or an error will be returned. +func (p *Process) Wait() (*ProcessState, error) { + return p.wait() +} + +// Signal sends a signal to the Process. +// Sending Interrupt on Windows is not implemented. +func (p *Process) Signal(sig Signal) error { + return p.signal(sig) +} + +// UserTime returns the user CPU time of the exited process and its children. +func (p *ProcessState) UserTime() time.Duration { + return p.userTime() +} + +// SystemTime returns the system CPU time of the exited process and its children. +func (p *ProcessState) SystemTime() time.Duration { + return p.systemTime() +} + +// Exited reports whether the program has exited. +// On Unix systems this reports true if the program exited due to calling exit, +// but false if the program terminated due to a signal. +func (p *ProcessState) Exited() bool { + return p.exited() +} + +// Success reports whether the program exited successfully, +// such as with exit status 0 on Unix. +func (p *ProcessState) Success() bool { + return p.success() +} + +// Sys returns system-dependent exit information about +// the process. Convert it to the appropriate underlying +// type, such as syscall.WaitStatus on Unix, to access its contents. +func (p *ProcessState) Sys() any { + return p.sys() +} + +// SysUsage returns system-dependent resource usage information about +// the exited process. Convert it to the appropriate underlying +// type, such as *syscall.Rusage on Unix, to access its contents. +// (On Unix, *syscall.Rusage matches struct rusage as defined in the +// getrusage(2) manual page.) +func (p *ProcessState) SysUsage() any { + return p.sysUsage() +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_plan9.go b/platform/dbops/binaries/go/go/src/os/exec_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..69714ff79830d9b9e3bc702b4a8c1b73e9b7719d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_plan9.go @@ -0,0 +1,149 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/itoa" + "runtime" + "syscall" + "time" +) + +// The only signal values guaranteed to be present in the os package +// on all systems are Interrupt (send the process an interrupt) and +// Kill (force the process to exit). Interrupt is not implemented on +// Windows; using it with os.Process.Signal will return an error. +var ( + Interrupt Signal = syscall.Note("interrupt") + Kill Signal = syscall.Note("kill") +) + +func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err error) { + sysattr := &syscall.ProcAttr{ + Dir: attr.Dir, + Env: attr.Env, + Sys: attr.Sys, + } + + sysattr.Files = make([]uintptr, 0, len(attr.Files)) + for _, f := range attr.Files { + sysattr.Files = append(sysattr.Files, f.Fd()) + } + + pid, h, e := syscall.StartProcess(name, argv, sysattr) + if e != nil { + return nil, &PathError{Op: "fork/exec", Path: name, Err: e} + } + + return newProcess(pid, h), nil +} + +func (p *Process) writeProcFile(file string, data string) error { + f, e := OpenFile("/proc/"+itoa.Itoa(p.Pid)+"/"+file, O_WRONLY, 0) + if e != nil { + return e + } + defer f.Close() + _, e = f.Write([]byte(data)) + return e +} + +func (p *Process) signal(sig Signal) error { + if p.done() { + return ErrProcessDone + } + if e := p.writeProcFile("note", sig.String()); e != nil { + return NewSyscallError("signal", e) + } + return nil +} + +func (p *Process) kill() error { + return p.signal(Kill) +} + +func (p *Process) wait() (ps *ProcessState, err error) { + var waitmsg syscall.Waitmsg + + if p.Pid == -1 { + return nil, ErrInvalid + } + err = syscall.WaitProcess(p.Pid, &waitmsg) + if err != nil { + return nil, NewSyscallError("wait", err) + } + + p.setDone() + ps = &ProcessState{ + pid: waitmsg.Pid, + status: &waitmsg, + } + return ps, nil +} + +func (p *Process) release() error { + // NOOP for Plan 9. + p.Pid = -1 + // no need for a finalizer anymore + runtime.SetFinalizer(p, nil) + return nil +} + +func findProcess(pid int) (p *Process, err error) { + // NOOP for Plan 9. + return newProcess(pid, 0), nil +} + +// ProcessState stores information about a process, as reported by Wait. +type ProcessState struct { + pid int // The process's id. + status *syscall.Waitmsg // System-dependent status info. +} + +// Pid returns the process id of the exited process. +func (p *ProcessState) Pid() int { + return p.pid +} + +func (p *ProcessState) exited() bool { + return p.status.Exited() +} + +func (p *ProcessState) success() bool { + return p.status.ExitStatus() == 0 +} + +func (p *ProcessState) sys() any { + return p.status +} + +func (p *ProcessState) sysUsage() any { + return p.status +} + +func (p *ProcessState) userTime() time.Duration { + return time.Duration(p.status.Time[0]) * time.Millisecond +} + +func (p *ProcessState) systemTime() time.Duration { + return time.Duration(p.status.Time[1]) * time.Millisecond +} + +func (p *ProcessState) String() string { + if p == nil { + return "" + } + return "exit status: " + p.status.Msg +} + +// ExitCode returns the exit code of the exited process, or -1 +// if the process hasn't exited or was terminated by a signal. +func (p *ProcessState) ExitCode() int { + // return -1 if the process hasn't started. + if p == nil { + return -1 + } + return p.status.ExitStatus() +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_posix.go b/platform/dbops/binaries/go/go/src/os/exec_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..4f9ea08cde51d374b57f7ed63f5f07182f2a6a2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_posix.go @@ -0,0 +1,136 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 || windows + +package os + +import ( + "internal/itoa" + "internal/syscall/execenv" + "runtime" + "syscall" +) + +// The only signal values guaranteed to be present in the os package on all +// systems are os.Interrupt (send the process an interrupt) and os.Kill (force +// the process to exit). On Windows, sending os.Interrupt to a process with +// os.Process.Signal is not implemented; it will return an error instead of +// sending a signal. +var ( + Interrupt Signal = syscall.SIGINT + Kill Signal = syscall.SIGKILL +) + +func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err error) { + // If there is no SysProcAttr (ie. no Chroot or changed + // UID/GID), double-check existence of the directory we want + // to chdir into. We can make the error clearer this way. + if attr != nil && attr.Sys == nil && attr.Dir != "" { + if _, err := Stat(attr.Dir); err != nil { + pe := err.(*PathError) + pe.Op = "chdir" + return nil, pe + } + } + + sysattr := &syscall.ProcAttr{ + Dir: attr.Dir, + Env: attr.Env, + Sys: attr.Sys, + } + if sysattr.Env == nil { + sysattr.Env, err = execenv.Default(sysattr.Sys) + if err != nil { + return nil, err + } + } + sysattr.Files = make([]uintptr, 0, len(attr.Files)) + for _, f := range attr.Files { + sysattr.Files = append(sysattr.Files, f.Fd()) + } + + pid, h, e := syscall.StartProcess(name, argv, sysattr) + + // Make sure we don't run the finalizers of attr.Files. + runtime.KeepAlive(attr) + + if e != nil { + return nil, &PathError{Op: "fork/exec", Path: name, Err: e} + } + + return newProcess(pid, h), nil +} + +func (p *Process) kill() error { + return p.Signal(Kill) +} + +// ProcessState stores information about a process, as reported by Wait. +type ProcessState struct { + pid int // The process's id. + status syscall.WaitStatus // System-dependent status info. + rusage *syscall.Rusage +} + +// Pid returns the process id of the exited process. +func (p *ProcessState) Pid() int { + return p.pid +} + +func (p *ProcessState) exited() bool { + return p.status.Exited() +} + +func (p *ProcessState) success() bool { + return p.status.ExitStatus() == 0 +} + +func (p *ProcessState) sys() any { + return p.status +} + +func (p *ProcessState) sysUsage() any { + return p.rusage +} + +func (p *ProcessState) String() string { + if p == nil { + return "" + } + status := p.Sys().(syscall.WaitStatus) + res := "" + switch { + case status.Exited(): + code := status.ExitStatus() + if runtime.GOOS == "windows" && uint(code) >= 1<<16 { // windows uses large hex numbers + res = "exit status " + itoa.Uitox(uint(code)) + } else { // unix systems use small decimal integers + res = "exit status " + itoa.Itoa(code) // unix + } + case status.Signaled(): + res = "signal: " + status.Signal().String() + case status.Stopped(): + res = "stop signal: " + status.StopSignal().String() + if status.StopSignal() == syscall.SIGTRAP && status.TrapCause() != 0 { + res += " (trap " + itoa.Itoa(status.TrapCause()) + ")" + } + case status.Continued(): + res = "continued" + } + if status.CoreDump() { + res += " (core dumped)" + } + return res +} + +// ExitCode returns the exit code of the exited process, or -1 +// if the process hasn't exited or was terminated by a signal. +func (p *ProcessState) ExitCode() int { + // return -1 if the process hasn't started. + if p == nil { + return -1 + } + return p.status.ExitStatus() +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_unix.go b/platform/dbops/binaries/go/go/src/os/exec_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..36b320df1894d6481ec5bfb0c6b984d61af7cf1e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_unix.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os + +import ( + "errors" + "runtime" + "syscall" + "time" +) + +func (p *Process) wait() (ps *ProcessState, err error) { + if p.Pid == -1 { + return nil, syscall.EINVAL + } + + // If we can block until Wait4 will succeed immediately, do so. + ready, err := p.blockUntilWaitable() + if err != nil { + return nil, err + } + if ready { + // Mark the process done now, before the call to Wait4, + // so that Process.signal will not send a signal. + p.setDone() + // Acquire a write lock on sigMu to wait for any + // active call to the signal method to complete. + p.sigMu.Lock() + p.sigMu.Unlock() + } + + var ( + status syscall.WaitStatus + rusage syscall.Rusage + pid1 int + e error + ) + for { + pid1, e = syscall.Wait4(p.Pid, &status, 0, &rusage) + if e != syscall.EINTR { + break + } + } + if e != nil { + return nil, NewSyscallError("wait", e) + } + p.setDone() + ps = &ProcessState{ + pid: pid1, + status: status, + rusage: &rusage, + } + return ps, nil +} + +func (p *Process) signal(sig Signal) error { + if p.Pid == -1 { + return errors.New("os: process already released") + } + if p.Pid == 0 { + return errors.New("os: process not initialized") + } + p.sigMu.RLock() + defer p.sigMu.RUnlock() + if p.done() { + return ErrProcessDone + } + s, ok := sig.(syscall.Signal) + if !ok { + return errors.New("os: unsupported signal type") + } + if e := syscall.Kill(p.Pid, s); e != nil { + if e == syscall.ESRCH { + return ErrProcessDone + } + return e + } + return nil +} + +func (p *Process) release() error { + // NOOP for unix. + p.Pid = -1 + // no need for a finalizer anymore + runtime.SetFinalizer(p, nil) + return nil +} + +func findProcess(pid int) (p *Process, err error) { + // NOOP for unix. + return newProcess(pid, 0), nil +} + +func (p *ProcessState) userTime() time.Duration { + return time.Duration(p.rusage.Utime.Nano()) * time.Nanosecond +} + +func (p *ProcessState) systemTime() time.Duration { + return time.Duration(p.rusage.Stime.Nano()) * time.Nanosecond +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_unix_test.go b/platform/dbops/binaries/go/go/src/os/exec_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..88e1b63a99e14362bb2320dacbc9826ed3d70a9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_unix_test.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package os_test + +import ( + "internal/testenv" + . "os" + "syscall" + "testing" +) + +func TestErrProcessDone(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + p, err := StartProcess(testenv.GoToolPath(t), []string{"go"}, &ProcAttr{}) + if err != nil { + t.Fatalf("starting test process: %v", err) + } + p.Wait() + if got := p.Signal(Kill); got != ErrProcessDone { + t.Errorf("got %v want %v", got, ErrProcessDone) + } +} + +func TestUNIXProcessAlive(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + p, err := StartProcess(testenv.GoToolPath(t), []string{"sleep", "1"}, &ProcAttr{}) + if err != nil { + t.Skipf("starting test process: %v", err) + } + defer p.Kill() + + proc, _ := FindProcess(p.Pid) + err = proc.Signal(syscall.Signal(0)) + if err != nil { + t.Errorf("OS reported error for running process: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_windows.go b/platform/dbops/binaries/go/go/src/os/exec_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..061a12b10f3d9f014bb0f05d0bba90a1ac27735d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_windows.go @@ -0,0 +1,175 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "errors" + "internal/syscall/windows" + "runtime" + "sync/atomic" + "syscall" + "time" +) + +func (p *Process) wait() (ps *ProcessState, err error) { + handle := atomic.LoadUintptr(&p.handle) + s, e := syscall.WaitForSingleObject(syscall.Handle(handle), syscall.INFINITE) + switch s { + case syscall.WAIT_OBJECT_0: + break + case syscall.WAIT_FAILED: + return nil, NewSyscallError("WaitForSingleObject", e) + default: + return nil, errors.New("os: unexpected result from WaitForSingleObject") + } + var ec uint32 + e = syscall.GetExitCodeProcess(syscall.Handle(handle), &ec) + if e != nil { + return nil, NewSyscallError("GetExitCodeProcess", e) + } + var u syscall.Rusage + e = syscall.GetProcessTimes(syscall.Handle(handle), &u.CreationTime, &u.ExitTime, &u.KernelTime, &u.UserTime) + if e != nil { + return nil, NewSyscallError("GetProcessTimes", e) + } + p.setDone() + defer p.Release() + return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil +} + +func (p *Process) signal(sig Signal) error { + handle := atomic.LoadUintptr(&p.handle) + if handle == uintptr(syscall.InvalidHandle) { + return syscall.EINVAL + } + if p.done() { + return ErrProcessDone + } + if sig == Kill { + var terminationHandle syscall.Handle + e := syscall.DuplicateHandle(^syscall.Handle(0), syscall.Handle(handle), ^syscall.Handle(0), &terminationHandle, syscall.PROCESS_TERMINATE, false, 0) + if e != nil { + return NewSyscallError("DuplicateHandle", e) + } + runtime.KeepAlive(p) + defer syscall.CloseHandle(terminationHandle) + e = syscall.TerminateProcess(syscall.Handle(terminationHandle), 1) + return NewSyscallError("TerminateProcess", e) + } + // TODO(rsc): Handle Interrupt too? + return syscall.Errno(syscall.EWINDOWS) +} + +func (p *Process) release() error { + handle := atomic.SwapUintptr(&p.handle, uintptr(syscall.InvalidHandle)) + if handle == uintptr(syscall.InvalidHandle) { + return syscall.EINVAL + } + e := syscall.CloseHandle(syscall.Handle(handle)) + if e != nil { + return NewSyscallError("CloseHandle", e) + } + // no need for a finalizer anymore + runtime.SetFinalizer(p, nil) + return nil +} + +func findProcess(pid int) (p *Process, err error) { + const da = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, e := syscall.OpenProcess(da, false, uint32(pid)) + if e != nil { + return nil, NewSyscallError("OpenProcess", e) + } + return newProcess(pid, uintptr(h)), nil +} + +func init() { + cmd := windows.UTF16PtrToString(syscall.GetCommandLine()) + if len(cmd) == 0 { + arg0, _ := Executable() + Args = []string{arg0} + } else { + Args = commandLineToArgv(cmd) + } +} + +// appendBSBytes appends n '\\' bytes to b and returns the resulting slice. +func appendBSBytes(b []byte, n int) []byte { + for ; n > 0; n-- { + b = append(b, '\\') + } + return b +} + +// readNextArg splits command line string cmd into next +// argument and command line remainder. +func readNextArg(cmd string) (arg []byte, rest string) { + var b []byte + var inquote bool + var nslash int + for ; len(cmd) > 0; cmd = cmd[1:] { + c := cmd[0] + switch c { + case ' ', '\t': + if !inquote { + return appendBSBytes(b, nslash), cmd[1:] + } + case '"': + b = appendBSBytes(b, nslash/2) + if nslash%2 == 0 { + // use "Prior to 2008" rule from + // http://daviddeley.com/autohotkey/parameters/parameters.htm + // section 5.2 to deal with double double quotes + if inquote && len(cmd) > 1 && cmd[1] == '"' { + b = append(b, c) + cmd = cmd[1:] + } + inquote = !inquote + } else { + b = append(b, c) + } + nslash = 0 + continue + case '\\': + nslash++ + continue + } + b = appendBSBytes(b, nslash) + nslash = 0 + b = append(b, c) + } + return appendBSBytes(b, nslash), "" +} + +// commandLineToArgv splits a command line into individual argument +// strings, following the Windows conventions documented +// at http://daviddeley.com/autohotkey/parameters/parameters.htm#WINARGV +func commandLineToArgv(cmd string) []string { + var args []string + for len(cmd) > 0 { + if cmd[0] == ' ' || cmd[0] == '\t' { + cmd = cmd[1:] + continue + } + var arg []byte + arg, cmd = readNextArg(cmd) + args = append(args, string(arg)) + } + return args +} + +func ftToDuration(ft *syscall.Filetime) time.Duration { + n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals + return time.Duration(n*100) * time.Nanosecond +} + +func (p *ProcessState) userTime() time.Duration { + return ftToDuration(&p.rusage.UserTime) +} + +func (p *ProcessState) systemTime() time.Duration { + return ftToDuration(&p.rusage.KernelTime) +} diff --git a/platform/dbops/binaries/go/go/src/os/exec_windows_test.go b/platform/dbops/binaries/go/go/src/os/exec_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f8ed4cdf1c9266efbf6c4d18b4c13ca0e6803a9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/exec_windows_test.go @@ -0,0 +1,83 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package os_test + +import ( + "internal/testenv" + "io" + . "os" + "path/filepath" + "sync" + "testing" +) + +func TestRemoveAllWithExecutedProcess(t *testing.T) { + // Regression test for golang.org/issue/25965. + if testing.Short() { + t.Skip("slow test; skipping") + } + testenv.MustHaveExec(t) + + name, err := Executable() + if err != nil { + t.Fatal(err) + } + r, err := Open(name) + if err != nil { + t.Fatal(err) + } + defer r.Close() + const n = 100 + var execs [n]string + // First create n executables. + for i := 0; i < n; i++ { + // Rewind r. + if _, err := r.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + name := filepath.Join(t.TempDir(), "test.exe") + execs[i] = name + w, err := Create(name) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(w, r); err != nil { + w.Close() + t.Fatal(err) + } + if err := w.Sync(); err != nil { + w.Close() + t.Fatal(err) + } + if err = w.Close(); err != nil { + t.Fatal(err) + } + } + // Then run each executable and remove its directory. + // Run each executable in a separate goroutine to add some load + // and increase the chance of triggering the bug. + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + name := execs[i] + dir := filepath.Dir(name) + // Run test.exe without executing any test, just to make it do something. + cmd := testenv.Command(t, name, "-test.run=^$") + if err := cmd.Run(); err != nil { + t.Errorf("exec failed: %v", err) + } + // Remove dir and check that it doesn't return `ERROR_ACCESS_DENIED`. + err = RemoveAll(dir) + if err != nil { + t.Errorf("RemoveAll failed: %v", err) + } + }(i) + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/os/executable.go b/platform/dbops/binaries/go/go/src/os/executable.go new file mode 100644 index 0000000000000000000000000000000000000000..cc3134af1c1ebe9e0dd4ad1c367293b4666a4feb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable.go @@ -0,0 +1,20 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// Executable returns the path name for the executable that started +// the current process. There is no guarantee that the path is still +// pointing to the correct executable. If a symlink was used to start +// the process, depending on the operating system, the result might +// be the symlink or the path it pointed to. If a stable result is +// needed, path/filepath.EvalSymlinks might help. +// +// Executable returns an absolute path unless an error occurred. +// +// The main use case is finding resources located relative to an +// executable. +func Executable() (string, error) { + return executable() +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_darwin.go b/platform/dbops/binaries/go/go/src/os/executable_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..dae9f4ee18ecb96aa32712d1375a8dd2cc8fb6d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_darwin.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "errors" + +var executablePath string // set by ../runtime/os_darwin.go + +var initCwd, initCwdErr = Getwd() + +func executable() (string, error) { + ep := executablePath + if len(ep) == 0 { + return ep, errors.New("cannot find executable path") + } + if ep[0] != '/' { + if initCwdErr != nil { + return ep, initCwdErr + } + if len(ep) > 2 && ep[0:2] == "./" { + // skip "./" + ep = ep[2:] + } + ep = initCwd + "/" + ep + } + return ep, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_dragonfly.go b/platform/dbops/binaries/go/go/src/os/executable_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..19c2ae890f9a53cf1b5ece85aec677c05f1921eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_dragonfly.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// From DragonFly's +const ( + _CTL_KERN = 1 + _KERN_PROC = 14 + _KERN_PROC_PATHNAME = 9 +) diff --git a/platform/dbops/binaries/go/go/src/os/executable_freebsd.go b/platform/dbops/binaries/go/go/src/os/executable_freebsd.go new file mode 100644 index 0000000000000000000000000000000000000000..95f1a93cb9512c738dc5f40bd5878d746a5546f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_freebsd.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// From FreeBSD's +const ( + _CTL_KERN = 1 + _KERN_PROC = 14 + _KERN_PROC_PATHNAME = 12 +) diff --git a/platform/dbops/binaries/go/go/src/os/executable_path.go b/platform/dbops/binaries/go/go/src/os/executable_path.go new file mode 100644 index 0000000000000000000000000000000000000000..d6161bcb08bbecae79f716de1b53acc29ebe3ff2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_path.go @@ -0,0 +1,104 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || openbsd + +package os + +// We query the working directory at init, to use it later to search for the +// executable file +// errWd will be checked later, if we need to use initWd +var initWd, errWd = Getwd() + +func executable() (string, error) { + var exePath string + if len(Args) == 0 || Args[0] == "" { + return "", ErrNotExist + } + if IsPathSeparator(Args[0][0]) { + // Args[0] is an absolute path, so it is the executable. + // Note that we only need to worry about Unix paths here. + exePath = Args[0] + } else { + for i := 1; i < len(Args[0]); i++ { + if IsPathSeparator(Args[0][i]) { + // Args[0] is a relative path: prepend the + // initial working directory. + if errWd != nil { + return "", errWd + } + exePath = initWd + string(PathSeparator) + Args[0] + break + } + } + } + if exePath != "" { + if err := isExecutable(exePath); err != nil { + return "", err + } + return exePath, nil + } + // Search for executable in $PATH. + for _, dir := range splitPathList(Getenv("PATH")) { + if len(dir) == 0 { + dir = "." + } + if !IsPathSeparator(dir[0]) { + if errWd != nil { + return "", errWd + } + dir = initWd + string(PathSeparator) + dir + } + exePath = dir + string(PathSeparator) + Args[0] + switch isExecutable(exePath) { + case nil: + return exePath, nil + case ErrPermission: + return "", ErrPermission + } + } + return "", ErrNotExist +} + +// isExecutable returns an error if a given file is not an executable. +func isExecutable(path string) error { + stat, err := Stat(path) + if err != nil { + return err + } + mode := stat.Mode() + if !mode.IsRegular() { + return ErrPermission + } + if (mode & 0111) == 0 { + return ErrPermission + } + return nil +} + +// splitPathList splits a path list. +// This is based on genSplit from strings/strings.go +func splitPathList(pathList string) []string { + if pathList == "" { + return nil + } + n := 1 + for i := 0; i < len(pathList); i++ { + if pathList[i] == PathListSeparator { + n++ + } + } + start := 0 + a := make([]string, n) + na := 0 + for i := 0; i+1 <= len(pathList) && na+1 < n; i++ { + if pathList[i] == PathListSeparator { + a[na] = pathList[start:i] + na++ + start = i + 1 + } + } + a[na] = pathList[start:] + return a[:na+1] +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_plan9.go b/platform/dbops/binaries/go/go/src/os/executable_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..8d8c83260f59ca6601be1499cedd522fd76b8fea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_plan9.go @@ -0,0 +1,22 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package os + +import ( + "internal/itoa" + "syscall" +) + +func executable() (string, error) { + fn := "/proc/" + itoa.Itoa(Getpid()) + "/text" + f, err := Open(fn) + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_procfs.go b/platform/dbops/binaries/go/go/src/os/executable_procfs.go new file mode 100644 index 0000000000000000000000000000000000000000..94e674e364867494b631a5f9de3e85680716e354 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_procfs.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || netbsd + +package os + +import ( + "errors" + "runtime" +) + +func executable() (string, error) { + var procfn string + switch runtime.GOOS { + default: + return "", errors.New("Executable not implemented for " + runtime.GOOS) + case "linux", "android": + procfn = "/proc/self/exe" + case "netbsd": + procfn = "/proc/curproc/exe" + } + path, err := Readlink(procfn) + + // When the executable has been deleted then Readlink returns a + // path appended with " (deleted)". + return stringsTrimSuffix(path, " (deleted)"), err +} + +// stringsTrimSuffix is the same as strings.TrimSuffix. +func stringsTrimSuffix(s, suffix string) string { + if len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix { + return s[:len(s)-len(suffix)] + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_solaris.go b/platform/dbops/binaries/go/go/src/os/executable_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..b145980c5656b625cd47388eaa4ff0e32d5dedf0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "syscall" + +var executablePath string // set by sysauxv in ../runtime/os3_solaris.go + +var initCwd, initCwdErr = Getwd() + +func executable() (string, error) { + path := executablePath + if len(path) == 0 { + path, err := syscall.Getexecname() + if err != nil { + return path, err + } + } + if len(path) > 0 && path[0] != '/' { + if initCwdErr != nil { + return path, initCwdErr + } + if len(path) > 2 && path[0:2] == "./" { + // skip "./" + path = path[2:] + } + return initCwd + "/" + path, nil + } + return path, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_sysctl.go b/platform/dbops/binaries/go/go/src/os/executable_sysctl.go new file mode 100644 index 0000000000000000000000000000000000000000..3c2aeacf7da53f82e43ed698596ac2c4018adf78 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_sysctl.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || dragonfly + +package os + +import ( + "syscall" + "unsafe" +) + +func executable() (string, error) { + mib := [4]int32{_CTL_KERN, _KERN_PROC, _KERN_PROC_PATHNAME, -1} + + n := uintptr(0) + // get length + _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + buf := make([]byte, n) + _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + return string(buf[:n-1]), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_test.go b/platform/dbops/binaries/go/go/src/os/executable_test.go new file mode 100644 index 0000000000000000000000000000000000000000..98b72d7d5e42d032b799618c797276aec53f1011 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_test.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "fmt" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "testing" +) + +const executable_EnvVar = "OSTEST_OUTPUT_EXECPATH" + +func TestExecutable(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + ep, err := os.Executable() + if err != nil { + t.Fatalf("Executable failed: %v", err) + } + // we want fn to be of the form "dir/prog" + dir := filepath.Dir(filepath.Dir(ep)) + fn, err := filepath.Rel(dir, ep) + if err != nil { + t.Fatalf("filepath.Rel: %v", err) + } + + cmd := testenv.Command(t, fn, "-test.run=^$") + // make child start with a relative program path + cmd.Dir = dir + cmd.Path = fn + if runtime.GOOS == "openbsd" || runtime.GOOS == "aix" { + // OpenBSD and AIX rely on argv[0] + } else { + // forge argv[0] for child, so that we can verify we could correctly + // get real path of the executable without influenced by argv[0]. + cmd.Args[0] = "-" + } + cmd.Env = append(cmd.Environ(), fmt.Sprintf("%s=1", executable_EnvVar)) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("exec(self) failed: %v", err) + } + outs := string(out) + if !filepath.IsAbs(outs) { + t.Fatalf("Child returned %q, want an absolute path", out) + } + if !sameFile(outs, ep) { + t.Fatalf("Child returned %q, not the same file as %q", out, ep) + } +} + +func sameFile(fn1, fn2 string) bool { + fi1, err := os.Stat(fn1) + if err != nil { + return false + } + fi2, err := os.Stat(fn2) + if err != nil { + return false + } + return os.SameFile(fi1, fi2) +} + +func init() { + if e := os.Getenv(executable_EnvVar); e != "" { + // first chdir to another path + dir := "/" + if runtime.GOOS == "windows" { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + dir = filepath.VolumeName(cwd) + } + os.Chdir(dir) + if ep, err := os.Executable(); err != nil { + fmt.Fprint(os.Stderr, "ERROR: ", err) + } else { + fmt.Fprint(os.Stderr, ep) + } + os.Exit(0) + } +} + +func TestExecutableDeleted(t *testing.T) { + testenv.MustHaveGoBuild(t) + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("%v does not support deleting running binary", runtime.GOOS) + case "openbsd", "freebsd", "aix": + t.Skipf("%v does not support reading deleted binary name", runtime.GOOS) + } + t.Parallel() + + dir := t.TempDir() + + src := filepath.Join(dir, "testdel.go") + exe := filepath.Join(dir, "testdel.exe") + + err := os.WriteFile(src, []byte(testExecutableDeletion), 0666) + if err != nil { + t.Fatal(err) + } + + out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, src).CombinedOutput() + t.Logf("build output:\n%s", out) + if err != nil { + t.Fatal(err) + } + + out, err = testenv.Command(t, exe).CombinedOutput() + t.Logf("exec output:\n%s", out) + if err != nil { + t.Fatal(err) + } +} + +const testExecutableDeletion = `package main + +import ( + "fmt" + "os" +) + +func main() { + before, err := os.Executable() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to read executable name before deletion: %v\n", err) + os.Exit(1) + } + + err = os.Remove(before) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to remove executable: %v\n", err) + os.Exit(1) + } + + after, err := os.Executable() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to read executable name after deletion: %v\n", err) + os.Exit(1) + } + + if before != after { + fmt.Fprintf(os.Stderr, "before and after do not match: %v != %v\n", before, after) + os.Exit(1) + } +} +` diff --git a/platform/dbops/binaries/go/go/src/os/executable_wasm.go b/platform/dbops/binaries/go/go/src/os/executable_wasm.go new file mode 100644 index 0000000000000000000000000000000000000000..a88360c16fb9196e31b7a53bed48fbee80a1212b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_wasm.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package os + +import ( + "errors" + "runtime" +) + +func executable() (string, error) { + return "", errors.New("Executable not implemented for " + runtime.GOOS) +} diff --git a/platform/dbops/binaries/go/go/src/os/executable_windows.go b/platform/dbops/binaries/go/go/src/os/executable_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..fc5cf8600533d125ca991198aeacbdb3cd3b2f37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/executable_windows.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/syscall/windows" + "syscall" +) + +func getModuleFileName(handle syscall.Handle) (string, error) { + n := uint32(1024) + var buf []uint16 + for { + buf = make([]uint16, n) + r, err := windows.GetModuleFileName(handle, &buf[0], n) + if err != nil { + return "", err + } + if r < n { + break + } + // r == n means n not big enough + n += 1024 + } + return syscall.UTF16ToString(buf), nil +} + +func executable() (string, error) { + return getModuleFileName(0) +} diff --git a/platform/dbops/binaries/go/go/src/os/export_linux_test.go b/platform/dbops/binaries/go/go/src/os/export_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..942b48a17d802d371fc34e7e6dd0f97ce8988092 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/export_linux_test.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +var ( + PollCopyFileRangeP = &pollCopyFileRange + PollSpliceFile = &pollSplice + PollSendFile = &pollSendFile + GetPollFDAndNetwork = getPollFDAndNetwork +) diff --git a/platform/dbops/binaries/go/go/src/os/export_test.go b/platform/dbops/binaries/go/go/src/os/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dc7caae267d7041134c6a700885eb117c567de7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/export_test.go @@ -0,0 +1,17 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// Export for testing. + +var Atime = atime +var LstatP = &lstat +var ErrWriteAtInAppendMode = errWriteAtInAppendMode +var TestingForceReadDirLstat = &testingForceReadDirLstat +var ErrPatternHasSeparator = errPatternHasSeparator + +func init() { + checkWrapErr = true +} diff --git a/platform/dbops/binaries/go/go/src/os/export_unix_test.go b/platform/dbops/binaries/go/go/src/os/export_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b8dcca0f8f862f8815221ef849062cd9ffdb72cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/export_unix_test.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os + +var SplitPath = splitPath diff --git a/platform/dbops/binaries/go/go/src/os/export_windows_test.go b/platform/dbops/binaries/go/go/src/os/export_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6e1188816bf1b274522dfb4504148c5c154fe274 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/export_windows_test.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// Export for testing. + +var ( + FixLongPath = fixLongPath + CanUseLongPaths = canUseLongPaths + NewConsoleFile = newConsoleFile + CommandLineToArgv = commandLineToArgv + AllowReadDirFileID = &allowReadDirFileID +) diff --git a/platform/dbops/binaries/go/go/src/os/fifo_test.go b/platform/dbops/binaries/go/go/src/os/fifo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..df4b2ee757c6a58ef6f146b0ae37ee9bacf944a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/fifo_test.go @@ -0,0 +1,207 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || (linux && !android) || netbsd || openbsd + +package os_test + +import ( + "errors" + "internal/syscall/unix" + "internal/testenv" + "io/fs" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "testing" +) + +func TestFifoEOF(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + fifoName := filepath.Join(dir, "fifo") + if err := syscall.Mkfifo(fifoName, 0600); err != nil { + t.Fatal(err) + } + + // Per https://pubs.opengroup.org/onlinepubs/9699919799/functions/open.html#tag_16_357_03: + // + // - “If O_NONBLOCK is clear, an open() for reading-only shall block the + // calling thread until a thread opens the file for writing. An open() for + // writing-only shall block the calling thread until a thread opens the file + // for reading.” + // + // In order to unblock both open calls, we open the two ends of the FIFO + // simultaneously in separate goroutines. + + rc := make(chan *os.File, 1) + go func() { + r, err := os.Open(fifoName) + if err != nil { + t.Error(err) + } + rc <- r + }() + + w, err := os.OpenFile(fifoName, os.O_WRONLY, 0) + if err != nil { + t.Error(err) + } + + r := <-rc + if t.Failed() { + if r != nil { + r.Close() + } + if w != nil { + w.Close() + } + return + } + + testPipeEOF(t, r, w) +} + +// Issue #59545. +func TestNonPollable(t *testing.T) { + if testing.Short() { + t.Skip("skipping test with tight loops in short mode") + } + + // We need to open a non-pollable file. + // This is almost certainly Linux-specific, + // but if other systems have non-pollable files, + // we can add them here. + const nonPollable = "/dev/net/tun" + + f, err := os.OpenFile(nonPollable, os.O_RDWR, 0) + if err != nil { + if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrPermission) || testenv.SyscallIsNotSupported(err) { + t.Skipf("can't open %q: %v", nonPollable, err) + } + t.Fatal(err) + } + f.Close() + + // On a Linux laptop, before the problem was fixed, + // this test failed about 50% of the time with this + // number of iterations. + // It takes about 1/2 second when it passes. + const attempts = 20000 + + start := make(chan bool) + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + close(start) + for i := 0; i < attempts; i++ { + f, err := os.OpenFile(nonPollable, os.O_RDWR, 0) + if err != nil { + t.Error(err) + return + } + if err := f.Close(); err != nil { + t.Error(err) + return + } + } + }() + + dir := t.TempDir() + <-start + for i := 0; i < attempts; i++ { + name := filepath.Join(dir, strconv.Itoa(i)) + if err := syscall.Mkfifo(name, 0o600); err != nil { + t.Fatal(err) + } + // The problem only occurs if we use O_NONBLOCK here. + rd, err := os.OpenFile(name, os.O_RDONLY|syscall.O_NONBLOCK, 0o600) + if err != nil { + t.Fatal(err) + } + wr, err := os.OpenFile(name, os.O_WRONLY|syscall.O_NONBLOCK, 0o600) + if err != nil { + t.Fatal(err) + } + const msg = "message" + if _, err := wr.Write([]byte(msg)); err != nil { + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.ENOBUFS) { + t.Logf("ignoring write error %v", err) + rd.Close() + wr.Close() + continue + } + t.Fatalf("write to fifo %d failed: %v", i, err) + } + if _, err := rd.Read(make([]byte, len(msg))); err != nil { + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.ENOBUFS) { + t.Logf("ignoring read error %v", err) + rd.Close() + wr.Close() + continue + } + t.Fatalf("read from fifo %d failed; %v", i, err) + } + if err := rd.Close(); err != nil { + t.Fatal(err) + } + if err := wr.Close(); err != nil { + t.Fatal(err) + } + } +} + +// Issue 60211. +func TestOpenFileNonBlocking(t *testing.T) { + exe, err := os.Executable() + if err != nil { + t.Skipf("can't find executable: %v", err) + } + f, err := os.OpenFile(exe, os.O_RDONLY|syscall.O_NONBLOCK, 0666) + if err != nil { + t.Fatal(err) + } + defer f.Close() + nonblock, err := unix.IsNonblock(int(f.Fd())) + if err != nil { + t.Fatal(err) + } + if !nonblock { + t.Errorf("file opened with O_NONBLOCK but in blocking mode") + } +} + +func TestNewFileNonBlocking(t *testing.T) { + var p [2]int + if err := syscall.Pipe(p[:]); err != nil { + t.Fatal(err) + } + if err := syscall.SetNonblock(p[0], true); err != nil { + t.Fatal(err) + } + f := os.NewFile(uintptr(p[0]), "pipe") + nonblock, err := unix.IsNonblock(p[0]) + if err != nil { + t.Fatal(err) + } + if !nonblock { + t.Error("pipe blocking after NewFile") + } + fd := f.Fd() + if fd != uintptr(p[0]) { + t.Errorf("Fd returned %d, want %d", fd, p[0]) + } + nonblock, err = unix.IsNonblock(p[0]) + if err != nil { + t.Fatal(err) + } + if !nonblock { + t.Error("pipe blocking after Fd") + } +} diff --git a/platform/dbops/binaries/go/go/src/os/file.go b/platform/dbops/binaries/go/go/src/os/file.go new file mode 100644 index 0000000000000000000000000000000000000000..090ffba4dc738c2b482053befbf896485df0acc3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file.go @@ -0,0 +1,824 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package os provides a platform-independent interface to operating system +// functionality. The design is Unix-like, although the error handling is +// Go-like; failing calls return values of type error rather than error numbers. +// Often, more information is available within the error. For example, +// if a call that takes a file name fails, such as Open or Stat, the error +// will include the failing file name when printed and will be of type +// *PathError, which may be unpacked for more information. +// +// The os interface is intended to be uniform across all operating systems. +// Features not generally available appear in the system-specific package syscall. +// +// Here is a simple example, opening a file and reading some of it. +// +// file, err := os.Open("file.go") // For read access. +// if err != nil { +// log.Fatal(err) +// } +// +// If the open fails, the error string will be self-explanatory, like +// +// open file.go: no such file or directory +// +// The file's data can then be read into a slice of bytes. Read and +// Write take their byte counts from the length of the argument slice. +// +// data := make([]byte, 100) +// count, err := file.Read(data) +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("read %d bytes: %q\n", count, data[:count]) +// +// Note: The maximum number of concurrent operations on a File may be limited by +// the OS or the system. The number should be high, but exceeding it may degrade +// performance or cause other issues. +package os + +import ( + "errors" + "internal/poll" + "internal/safefilepath" + "internal/testlog" + "io" + "io/fs" + "runtime" + "syscall" + "time" + "unsafe" +) + +// Name returns the name of the file as presented to Open. +func (f *File) Name() string { return f.name } + +// Stdin, Stdout, and Stderr are open Files pointing to the standard input, +// standard output, and standard error file descriptors. +// +// Note that the Go runtime writes to standard error for panics and crashes; +// closing Stderr may cause those messages to go elsewhere, perhaps +// to a file opened later. +var ( + Stdin = NewFile(uintptr(syscall.Stdin), "/dev/stdin") + Stdout = NewFile(uintptr(syscall.Stdout), "/dev/stdout") + Stderr = NewFile(uintptr(syscall.Stderr), "/dev/stderr") +) + +// Flags to OpenFile wrapping those of the underlying system. Not all +// flags may be implemented on a given system. +const ( + // Exactly one of O_RDONLY, O_WRONLY, or O_RDWR must be specified. + O_RDONLY int = syscall.O_RDONLY // open the file read-only. + O_WRONLY int = syscall.O_WRONLY // open the file write-only. + O_RDWR int = syscall.O_RDWR // open the file read-write. + // The remaining values may be or'ed in to control behavior. + O_APPEND int = syscall.O_APPEND // append data to the file when writing. + O_CREATE int = syscall.O_CREAT // create a new file if none exists. + O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist. + O_SYNC int = syscall.O_SYNC // open for synchronous I/O. + O_TRUNC int = syscall.O_TRUNC // truncate regular writable file when opened. +) + +// Seek whence values. +// +// Deprecated: Use io.SeekStart, io.SeekCurrent, and io.SeekEnd. +const ( + SEEK_SET int = 0 // seek relative to the origin of the file + SEEK_CUR int = 1 // seek relative to the current offset + SEEK_END int = 2 // seek relative to the end +) + +// LinkError records an error during a link or symlink or rename +// system call and the paths that caused it. +type LinkError struct { + Op string + Old string + New string + Err error +} + +func (e *LinkError) Error() string { + return e.Op + " " + e.Old + " " + e.New + ": " + e.Err.Error() +} + +func (e *LinkError) Unwrap() error { + return e.Err +} + +// Read reads up to len(b) bytes from the File and stores them in b. +// It returns the number of bytes read and any error encountered. +// At end of file, Read returns 0, io.EOF. +func (f *File) Read(b []byte) (n int, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + n, e := f.read(b) + return n, f.wrapErr("read", e) +} + +// ReadAt reads len(b) bytes from the File starting at byte offset off. +// It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). +// At end of file, that error is io.EOF. +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + + if off < 0 { + return 0, &PathError{Op: "readat", Path: f.name, Err: errors.New("negative offset")} + } + + for len(b) > 0 { + m, e := f.pread(b, off) + if e != nil { + err = f.wrapErr("read", e) + break + } + n += m + b = b[m:] + off += int64(m) + } + return +} + +// ReadFrom implements io.ReaderFrom. +func (f *File) ReadFrom(r io.Reader) (n int64, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + n, handled, e := f.readFrom(r) + if !handled { + return genericReadFrom(f, r) // without wrapping + } + return n, f.wrapErr("write", e) +} + +// noReadFrom can be embedded alongside another type to +// hide the ReadFrom method of that other type. +type noReadFrom struct{} + +// ReadFrom hides another ReadFrom method. +// It should never be called. +func (noReadFrom) ReadFrom(io.Reader) (int64, error) { + panic("can't happen") +} + +// fileWithoutReadFrom implements all the methods of *File other +// than ReadFrom. This is used to permit ReadFrom to call io.Copy +// without leading to a recursive call to ReadFrom. +type fileWithoutReadFrom struct { + noReadFrom + *File +} + +func genericReadFrom(f *File, r io.Reader) (int64, error) { + return io.Copy(fileWithoutReadFrom{File: f}, r) +} + +// Write writes len(b) bytes from b to the File. +// It returns the number of bytes written and an error, if any. +// Write returns a non-nil error when n != len(b). +func (f *File) Write(b []byte) (n int, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + n, e := f.write(b) + if n < 0 { + n = 0 + } + if n != len(b) { + err = io.ErrShortWrite + } + + epipecheck(f, e) + + if e != nil { + err = f.wrapErr("write", e) + } + + return n, err +} + +var errWriteAtInAppendMode = errors.New("os: invalid use of WriteAt on file opened with O_APPEND") + +// WriteAt writes len(b) bytes to the File starting at byte offset off. +// It returns the number of bytes written and an error, if any. +// WriteAt returns a non-nil error when n != len(b). +// +// If file was opened with the O_APPEND flag, WriteAt returns an error. +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + if f.appendMode { + return 0, errWriteAtInAppendMode + } + + if off < 0 { + return 0, &PathError{Op: "writeat", Path: f.name, Err: errors.New("negative offset")} + } + + for len(b) > 0 { + m, e := f.pwrite(b, off) + if e != nil { + err = f.wrapErr("write", e) + break + } + n += m + b = b[m:] + off += int64(m) + } + return +} + +// WriteTo implements io.WriterTo. +func (f *File) WriteTo(w io.Writer) (n int64, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + n, handled, e := f.writeTo(w) + if handled { + return n, f.wrapErr("read", e) + } + return genericWriteTo(f, w) // without wrapping +} + +// noWriteTo can be embedded alongside another type to +// hide the WriteTo method of that other type. +type noWriteTo struct{} + +// WriteTo hides another WriteTo method. +// It should never be called. +func (noWriteTo) WriteTo(io.Writer) (int64, error) { + panic("can't happen") +} + +// fileWithoutWriteTo implements all the methods of *File other +// than WriteTo. This is used to permit WriteTo to call io.Copy +// without leading to a recursive call to WriteTo. +type fileWithoutWriteTo struct { + noWriteTo + *File +} + +func genericWriteTo(f *File, w io.Writer) (int64, error) { + return io.Copy(w, fileWithoutWriteTo{File: f}) +} + +// Seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error, if any. +// The behavior of Seek on a file opened with O_APPEND is not specified. +func (f *File) Seek(offset int64, whence int) (ret int64, err error) { + if err := f.checkValid("seek"); err != nil { + return 0, err + } + r, e := f.seek(offset, whence) + if e == nil && f.dirinfo != nil && r != 0 { + e = syscall.EISDIR + } + if e != nil { + return 0, f.wrapErr("seek", e) + } + return r, nil +} + +// WriteString is like Write, but writes the contents of string s rather than +// a slice of bytes. +func (f *File) WriteString(s string) (n int, err error) { + b := unsafe.Slice(unsafe.StringData(s), len(s)) + return f.Write(b) +} + +// Mkdir creates a new directory with the specified name and permission +// bits (before umask). +// If there is an error, it will be of type *PathError. +func Mkdir(name string, perm FileMode) error { + longName := fixLongPath(name) + e := ignoringEINTR(func() error { + return syscall.Mkdir(longName, syscallMode(perm)) + }) + + if e != nil { + return &PathError{Op: "mkdir", Path: name, Err: e} + } + + // mkdir(2) itself won't handle the sticky bit on *BSD and Solaris + if !supportsCreateWithStickyBit && perm&ModeSticky != 0 { + e = setStickyBit(name) + + if e != nil { + Remove(name) + return e + } + } + + return nil +} + +// setStickyBit adds ModeSticky to the permission bits of path, non atomic. +func setStickyBit(name string) error { + fi, err := Stat(name) + if err != nil { + return err + } + return Chmod(name, fi.Mode()|ModeSticky) +} + +// Chdir changes the current working directory to the named directory. +// If there is an error, it will be of type *PathError. +func Chdir(dir string) error { + if e := syscall.Chdir(dir); e != nil { + testlog.Open(dir) // observe likely non-existent directory + return &PathError{Op: "chdir", Path: dir, Err: e} + } + if log := testlog.Logger(); log != nil { + wd, err := Getwd() + if err == nil { + log.Chdir(wd) + } + } + return nil +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*File, error) { + return OpenFile(name, O_RDONLY, 0) +} + +// Create creates or truncates the named file. If the file already exists, +// it is truncated. If the file does not exist, it is created with mode 0666 +// (before umask). If successful, methods on the returned File can +// be used for I/O; the associated file descriptor has mode O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*File, error) { + return OpenFile(name, O_RDWR|O_CREATE|O_TRUNC, 0666) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.). If the file does not exist, and the O_CREATE flag +// is passed, it is created with mode perm (before umask). If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm FileMode) (*File, error) { + testlog.Open(name) + f, err := openFileNolog(name, flag, perm) + if err != nil { + return nil, err + } + f.appendMode = flag&O_APPEND != 0 + + return f, nil +} + +// lstat is overridden in tests. +var lstat = Lstat + +// Rename renames (moves) oldpath to newpath. +// If newpath already exists and is not a directory, Rename replaces it. +// OS-specific restrictions may apply when oldpath and newpath are in different directories. +// Even within the same directory, on non-Unix platforms Rename is not an atomic operation. +// If there is an error, it will be of type *LinkError. +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +// +// If the link destination is relative, Readlink returns the relative path +// without resolving it to an absolute one. +func Readlink(name string) (string, error) { + return readlink(name) +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// checkWrapErr is the test hook to enable checking unexpected wrapped errors of poll.ErrFileClosing. +// It is set to true in the export_test.go for tests (including fuzz tests). +var checkWrapErr = false + +// wrapErr wraps an error that occurred during an operation on an open file. +// It passes io.EOF through unchanged, otherwise converts +// poll.ErrFileClosing to ErrClosed and wraps the error in a PathError. +func (f *File) wrapErr(op string, err error) error { + if err == nil || err == io.EOF { + return err + } + if err == poll.ErrFileClosing { + err = ErrClosed + } else if checkWrapErr && errors.Is(err, poll.ErrFileClosing) { + panic("unexpected error wrapping poll.ErrFileClosing: " + err.Error()) + } + return &PathError{Op: op, Path: f.name, Err: err} +} + +// TempDir returns the default directory to use for temporary files. +// +// On Unix systems, it returns $TMPDIR if non-empty, else /tmp. +// On Windows, it uses GetTempPath, returning the first non-empty +// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory. +// On Plan 9, it returns /tmp. +// +// The directory is neither guaranteed to exist nor have accessible +// permissions. +func TempDir() string { + return tempDir() +} + +// UserCacheDir returns the default root directory to use for user-specific +// cached data. Users should create their own application-specific subdirectory +// within this one and use that. +// +// On Unix systems, it returns $XDG_CACHE_HOME as specified by +// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if +// non-empty, else $HOME/.cache. +// On Darwin, it returns $HOME/Library/Caches. +// On Windows, it returns %LocalAppData%. +// On Plan 9, it returns $home/lib/cache. +// +// If the location cannot be determined (for example, $HOME is not defined), +// then it will return an error. +func UserCacheDir() (string, error) { + var dir string + + switch runtime.GOOS { + case "windows": + dir = Getenv("LocalAppData") + if dir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + + case "darwin", "ios": + dir = Getenv("HOME") + if dir == "" { + return "", errors.New("$HOME is not defined") + } + dir += "/Library/Caches" + + case "plan9": + dir = Getenv("home") + if dir == "" { + return "", errors.New("$home is not defined") + } + dir += "/lib/cache" + + default: // Unix + dir = Getenv("XDG_CACHE_HOME") + if dir == "" { + dir = Getenv("HOME") + if dir == "" { + return "", errors.New("neither $XDG_CACHE_HOME nor $HOME are defined") + } + dir += "/.cache" + } + } + + return dir, nil +} + +// UserConfigDir returns the default root directory to use for user-specific +// configuration data. Users should create their own application-specific +// subdirectory within this one and use that. +// +// On Unix systems, it returns $XDG_CONFIG_HOME as specified by +// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if +// non-empty, else $HOME/.config. +// On Darwin, it returns $HOME/Library/Application Support. +// On Windows, it returns %AppData%. +// On Plan 9, it returns $home/lib. +// +// If the location cannot be determined (for example, $HOME is not defined), +// then it will return an error. +func UserConfigDir() (string, error) { + var dir string + + switch runtime.GOOS { + case "windows": + dir = Getenv("AppData") + if dir == "" { + return "", errors.New("%AppData% is not defined") + } + + case "darwin", "ios": + dir = Getenv("HOME") + if dir == "" { + return "", errors.New("$HOME is not defined") + } + dir += "/Library/Application Support" + + case "plan9": + dir = Getenv("home") + if dir == "" { + return "", errors.New("$home is not defined") + } + dir += "/lib" + + default: // Unix + dir = Getenv("XDG_CONFIG_HOME") + if dir == "" { + dir = Getenv("HOME") + if dir == "" { + return "", errors.New("neither $XDG_CONFIG_HOME nor $HOME are defined") + } + dir += "/.config" + } + } + + return dir, nil +} + +// UserHomeDir returns the current user's home directory. +// +// On Unix, including macOS, it returns the $HOME environment variable. +// On Windows, it returns %USERPROFILE%. +// On Plan 9, it returns the $home environment variable. +// +// If the expected variable is not set in the environment, UserHomeDir +// returns either a platform-specific default value or a non-nil error. +func UserHomeDir() (string, error) { + env, enverr := "HOME", "$HOME" + switch runtime.GOOS { + case "windows": + env, enverr = "USERPROFILE", "%userprofile%" + case "plan9": + env, enverr = "home", "$home" + } + if v := Getenv(env); v != "" { + return v, nil + } + // On some geese the home directory is not always defined. + switch runtime.GOOS { + case "android": + return "/sdcard", nil + case "ios": + return "/", nil + } + return "", errors.New(enverr + " is not defined") +} + +// Chmod changes the mode of the named file to mode. +// If the file is a symbolic link, it changes the mode of the link's target. +// If there is an error, it will be of type *PathError. +// +// A different subset of the mode bits are used, depending on the +// operating system. +// +// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and +// ModeSticky are used. +// +// On Windows, only the 0200 bit (owner writable) of mode is used; it +// controls whether the file's read-only attribute is set or cleared. +// The other bits are currently unused. For compatibility with Go 1.12 +// and earlier, use a non-zero mode. Use mode 0400 for a read-only +// file and 0600 for a readable+writable file. +// +// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive, +// and ModeTemporary are used. +func Chmod(name string, mode FileMode) error { return chmod(name, mode) } + +// Chmod changes the mode of the file to mode. +// If there is an error, it will be of type *PathError. +func (f *File) Chmod(mode FileMode) error { return f.chmod(mode) } + +// SetDeadline sets the read and write deadlines for a File. +// It is equivalent to calling both SetReadDeadline and SetWriteDeadline. +// +// Only some kinds of files support setting a deadline. Calls to SetDeadline +// for files that do not support deadlines will return ErrNoDeadline. +// On most systems ordinary files do not support deadlines, but pipes do. +// +// A deadline is an absolute time after which I/O operations fail with an +// error instead of blocking. The deadline applies to all future and pending +// I/O, not just the immediately following call to Read or Write. +// After a deadline has been exceeded, the connection can be refreshed +// by setting a deadline in the future. +// +// If the deadline is exceeded a call to Read or Write or to other I/O +// methods will return an error that wraps ErrDeadlineExceeded. +// This can be tested using errors.Is(err, os.ErrDeadlineExceeded). +// That error implements the Timeout method, and calling the Timeout +// method will return true, but there are other possible errors for which +// the Timeout will return true even if the deadline has not been exceeded. +// +// An idle timeout can be implemented by repeatedly extending +// the deadline after successful Read or Write calls. +// +// A zero value for t means I/O operations will not time out. +func (f *File) SetDeadline(t time.Time) error { + return f.setDeadline(t) +} + +// SetReadDeadline sets the deadline for future Read calls and any +// currently-blocked Read call. +// A zero value for t means Read will not time out. +// Not all files support setting deadlines; see SetDeadline. +func (f *File) SetReadDeadline(t time.Time) error { + return f.setReadDeadline(t) +} + +// SetWriteDeadline sets the deadline for any future Write calls and any +// currently-blocked Write call. +// Even if Write times out, it may return n > 0, indicating that +// some of the data was successfully written. +// A zero value for t means Write will not time out. +// Not all files support setting deadlines; see SetDeadline. +func (f *File) SetWriteDeadline(t time.Time) error { + return f.setWriteDeadline(t) +} + +// SyscallConn returns a raw file. +// This implements the syscall.Conn interface. +func (f *File) SyscallConn() (syscall.RawConn, error) { + if err := f.checkValid("SyscallConn"); err != nil { + return nil, err + } + return newRawConn(f) +} + +// DirFS returns a file system (an fs.FS) for the tree of files rooted at the directory dir. +// +// Note that DirFS("/prefix") only guarantees that the Open calls it makes to the +// operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the +// same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside +// the /prefix tree, then using DirFS does not stop the access any more than using +// os.Open does. Additionally, the root of the fs.FS returned for a relative path, +// DirFS("prefix"), will be affected by later calls to Chdir. DirFS is therefore not +// a general substitute for a chroot-style security mechanism when the directory tree +// contains arbitrary content. +// +// The directory dir must not be "". +// +// The result implements [io/fs.StatFS], [io/fs.ReadFileFS] and +// [io/fs.ReadDirFS]. +func DirFS(dir string) fs.FS { + return dirFS(dir) +} + +type dirFS string + +func (dir dirFS) Open(name string) (fs.File, error) { + fullname, err := dir.join(name) + if err != nil { + return nil, &PathError{Op: "open", Path: name, Err: err} + } + f, err := Open(fullname) + if err != nil { + // DirFS takes a string appropriate for GOOS, + // while the name argument here is always slash separated. + // dir.join will have mixed the two; undo that for + // error reporting. + err.(*PathError).Path = name + return nil, err + } + return f, nil +} + +// The ReadFile method calls the [ReadFile] function for the file +// with the given name in the directory. The function provides +// robust handling for small files and special file systems. +// Through this method, dirFS implements [io/fs.ReadFileFS]. +func (dir dirFS) ReadFile(name string) ([]byte, error) { + fullname, err := dir.join(name) + if err != nil { + return nil, &PathError{Op: "readfile", Path: name, Err: err} + } + b, err := ReadFile(fullname) + if err != nil { + if e, ok := err.(*PathError); ok { + // See comment in dirFS.Open. + e.Path = name + } + return nil, err + } + return b, nil +} + +// ReadDir reads the named directory, returning all its directory entries sorted +// by filename. Through this method, dirFS implements [io/fs.ReadDirFS]. +func (dir dirFS) ReadDir(name string) ([]DirEntry, error) { + fullname, err := dir.join(name) + if err != nil { + return nil, &PathError{Op: "readdir", Path: name, Err: err} + } + entries, err := ReadDir(fullname) + if err != nil { + if e, ok := err.(*PathError); ok { + // See comment in dirFS.Open. + e.Path = name + } + return nil, err + } + return entries, nil +} + +func (dir dirFS) Stat(name string) (fs.FileInfo, error) { + fullname, err := dir.join(name) + if err != nil { + return nil, &PathError{Op: "stat", Path: name, Err: err} + } + f, err := Stat(fullname) + if err != nil { + // See comment in dirFS.Open. + err.(*PathError).Path = name + return nil, err + } + return f, nil +} + +// join returns the path for name in dir. +func (dir dirFS) join(name string) (string, error) { + if dir == "" { + return "", errors.New("os: DirFS with empty root") + } + if !fs.ValidPath(name) { + return "", ErrInvalid + } + name, err := safefilepath.FromFS(name) + if err != nil { + return "", ErrInvalid + } + if IsPathSeparator(dir[len(dir)-1]) { + return string(dir) + name, nil + } + return string(dir) + string(PathSeparator) + name, nil +} + +// ReadFile reads the named file and returns the contents. +// A successful call returns err == nil, not err == EOF. +// Because ReadFile reads the whole file, it does not treat an EOF from Read +// as an error to be reported. +func ReadFile(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + var size int + if info, err := f.Stat(); err == nil { + size64 := info.Size() + if int64(int(size64)) == size64 { + size = int(size64) + } + } + size++ // one byte for final read at EOF + + // If a file claims a small size, read at least 512 bytes. + // In particular, files in Linux's /proc claim size 0 but + // then do not work right if read in small pieces, + // so an initial read of 1 byte would not work correctly. + if size < 512 { + size = 512 + } + + data := make([]byte, 0, size) + for { + n, err := f.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] + if err != nil { + if err == io.EOF { + err = nil + } + return data, err + } + + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] + } + } +} + +// WriteFile writes data to the named file, creating it if necessary. +// If the file does not exist, WriteFile creates it with permissions perm (before umask); +// otherwise WriteFile truncates it before writing, without changing permissions. +// Since WriteFile requires multiple system calls to complete, a failure mid-operation +// can leave the file in a partially written state. +func WriteFile(name string, data []byte, perm FileMode) error { + f, err := OpenFile(name, O_WRONLY|O_CREATE|O_TRUNC, perm) + if err != nil { + return err + } + _, err = f.Write(data) + if err1 := f.Close(); err1 != nil && err == nil { + err = err1 + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/os/file_mutex_plan9.go b/platform/dbops/binaries/go/go/src/os/file_mutex_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..26bf5a7d1e5fcda2ba9384d3e453402a6edf0a41 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_mutex_plan9.go @@ -0,0 +1,70 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// File locking support for Plan 9. This uses fdMutex from the +// internal/poll package. + +// incref adds a reference to the file. It returns an error if the file +// is already closed. This method is on File so that we can incorporate +// a nil test. +func (f *File) incref(op string) (err error) { + if f == nil { + return ErrInvalid + } + if !f.fdmu.Incref() { + err = ErrClosed + if op != "" { + err = &PathError{Op: op, Path: f.name, Err: err} + } + } + return err +} + +// decref removes a reference to the file. If this is the last +// remaining reference, and the file has been marked to be closed, +// then actually close it. +func (file *file) decref() error { + if file.fdmu.Decref() { + return file.destroy() + } + return nil +} + +// readLock adds a reference to the file and locks it for reading. +// It returns an error if the file is already closed. +func (file *file) readLock() error { + if !file.fdmu.ReadLock() { + return ErrClosed + } + return nil +} + +// readUnlock removes a reference from the file and unlocks it for reading. +// It also closes the file if it marked as closed and there is no remaining +// reference. +func (file *file) readUnlock() { + if file.fdmu.ReadUnlock() { + file.destroy() + } +} + +// writeLock adds a reference to the file and locks it for writing. +// It returns an error if the file is already closed. +func (file *file) writeLock() error { + if !file.fdmu.WriteLock() { + return ErrClosed + } + return nil +} + +// writeUnlock removes a reference from the file and unlocks it for writing. +// It also closes the file if it is marked as closed and there is no remaining +// reference. +func (file *file) writeUnlock() { + if file.fdmu.WriteUnlock() { + file.destroy() + } +} diff --git a/platform/dbops/binaries/go/go/src/os/file_open_unix.go b/platform/dbops/binaries/go/go/src/os/file_open_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..a3336eac81a5d1871cfa8e1f1a8ce9c1357f4439 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_open_unix.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) + +package os + +import ( + "internal/poll" + "syscall" +) + +func open(path string, flag int, perm uint32) (int, poll.SysFile, error) { + fd, err := syscall.Open(path, flag, perm) + return fd, poll.SysFile{}, err +} diff --git a/platform/dbops/binaries/go/go/src/os/file_open_wasip1.go b/platform/dbops/binaries/go/go/src/os/file_open_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..f3ef165e6db9812d23a13dd55af50af8fe81a365 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_open_wasip1.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package os + +import ( + "internal/poll" + "syscall" +) + +func open(filePath string, flag int, perm uint32) (int, poll.SysFile, error) { + if filePath == "" { + return -1, poll.SysFile{}, syscall.EINVAL + } + absPath := filePath + // os.(*File).Chdir is emulated by setting the working directory to the + // absolute path that this file was opened at, which is why we have to + // resolve and capture it here. + if filePath[0] != '/' { + wd, err := syscall.Getwd() + if err != nil { + return -1, poll.SysFile{}, err + } + absPath = joinPath(wd, filePath) + } + fd, err := syscall.Open(absPath, flag, perm) + return fd, poll.SysFile{Path: absPath}, err +} diff --git a/platform/dbops/binaries/go/go/src/os/file_plan9.go b/platform/dbops/binaries/go/go/src/os/file_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..c0ee6b33f9f96ea8e0db810b2b4b3ffff76c7930 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_plan9.go @@ -0,0 +1,618 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/bytealg" + "internal/poll" + "io" + "runtime" + "syscall" + "time" +) + +// fixLongPath is a noop on non-Windows platforms. +func fixLongPath(path string) string { + return path +} + +// file is the real representation of *File. +// The extra level of indirection ensures that no clients of os +// can overwrite this data, which could cause the finalizer +// to close the wrong file descriptor. +type file struct { + fdmu poll.FDMutex + fd int + name string + dirinfo *dirInfo // nil unless directory being read + appendMode bool // whether file is opened for appending +} + +// Fd returns the integer Plan 9 file descriptor referencing the open file. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. +// +// As an alternative, see the f.SyscallConn method. +func (f *File) Fd() uintptr { + if f == nil { + return ^(uintptr(0)) + } + return uintptr(f.fd) +} + +// NewFile returns a new File with the given file descriptor and +// name. The returned value will be nil if fd is not a valid file +// descriptor. +func NewFile(fd uintptr, name string) *File { + fdi := int(fd) + if fdi < 0 { + return nil + } + f := &File{&file{fd: fdi, name: name}} + runtime.SetFinalizer(f.file, (*file).close) + return f +} + +// Auxiliary information if the File describes a directory +type dirInfo struct { + buf [syscall.STATMAX]byte // buffer for directory I/O + nbuf int // length of buf; return value from Read + bufp int // location of next record in buf. +} + +func epipecheck(file *File, e error) { +} + +// DevNull is the name of the operating system's “null device.” +// On Unix-like systems, it is "/dev/null"; on Windows, "NUL". +const DevNull = "/dev/null" + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&ModeAppend != 0 { + o |= syscall.DMAPPEND + } + if i&ModeExclusive != 0 { + o |= syscall.DMEXCL + } + if i&ModeTemporary != 0 { + o |= syscall.DMTMP + } + return +} + +// openFileNolog is the Plan 9 implementation of OpenFile. +func openFileNolog(name string, flag int, perm FileMode) (*File, error) { + var ( + fd int + e error + create bool + excl bool + trunc bool + append bool + ) + + if flag&O_CREATE == O_CREATE { + flag = flag & ^O_CREATE + create = true + } + if flag&O_EXCL == O_EXCL { + excl = true + } + if flag&O_TRUNC == O_TRUNC { + trunc = true + } + // O_APPEND is emulated on Plan 9 + if flag&O_APPEND == O_APPEND { + flag = flag &^ O_APPEND + append = true + } + + if (create && trunc) || excl { + fd, e = syscall.Create(name, flag, syscallMode(perm)) + } else { + fd, e = syscall.Open(name, flag) + if IsNotExist(e) && create { + fd, e = syscall.Create(name, flag, syscallMode(perm)) + if e != nil { + return nil, &PathError{Op: "create", Path: name, Err: e} + } + } + } + + if e != nil { + return nil, &PathError{Op: "open", Path: name, Err: e} + } + + if append { + if _, e = syscall.Seek(fd, 0, io.SeekEnd); e != nil { + return nil, &PathError{Op: "seek", Path: name, Err: e} + } + } + + return NewFile(uintptr(fd), name), nil +} + +// Close closes the File, rendering it unusable for I/O. +// On files that support SetDeadline, any pending I/O operations will +// be canceled and return immediately with an ErrClosed error. +// Close will return an error if it has already been called. +func (f *File) Close() error { + if f == nil { + return ErrInvalid + } + return f.file.close() +} + +func (file *file) close() error { + if !file.fdmu.IncrefAndClose() { + return &PathError{Op: "close", Path: file.name, Err: ErrClosed} + } + + // At this point we should cancel any pending I/O. + // How do we do that on Plan 9? + + err := file.decref() + + // no need for a finalizer anymore + runtime.SetFinalizer(file, nil) + return err +} + +// destroy actually closes the descriptor. This is called when +// there are no remaining references, by the decref, readUnlock, +// and writeUnlock methods. +func (file *file) destroy() error { + var err error + if e := syscall.Close(file.fd); e != nil { + err = &PathError{Op: "close", Path: file.name, Err: e} + } + return err +} + +// Stat returns the FileInfo structure describing file. +// If there is an error, it will be of type *PathError. +func (f *File) Stat() (FileInfo, error) { + if f == nil { + return nil, ErrInvalid + } + d, err := dirstat(f) + if err != nil { + return nil, err + } + return fileInfoFromStat(d), nil +} + +// Truncate changes the size of the file. +// It does not change the I/O offset. +// If there is an error, it will be of type *PathError. +func (f *File) Truncate(size int64) error { + if f == nil { + return ErrInvalid + } + + var d syscall.Dir + d.Null() + d.Length = size + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "truncate", Path: f.name, Err: err} + } + + if err := f.incref("truncate"); err != nil { + return err + } + defer f.decref() + + if err = syscall.Fwstat(f.fd, buf[:n]); err != nil { + return &PathError{Op: "truncate", Path: f.name, Err: err} + } + return nil +} + +const chmodMask = uint32(syscall.DMAPPEND | syscall.DMEXCL | syscall.DMTMP | ModePerm) + +func (f *File) chmod(mode FileMode) error { + if f == nil { + return ErrInvalid + } + var d syscall.Dir + + odir, e := dirstat(f) + if e != nil { + return &PathError{Op: "chmod", Path: f.name, Err: e} + } + d.Null() + d.Mode = odir.Mode&^chmodMask | syscallMode(mode)&chmodMask + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "chmod", Path: f.name, Err: err} + } + + if err := f.incref("chmod"); err != nil { + return err + } + defer f.decref() + + if err = syscall.Fwstat(f.fd, buf[:n]); err != nil { + return &PathError{Op: "chmod", Path: f.name, Err: err} + } + return nil +} + +// Sync commits the current contents of the file to stable storage. +// Typically, this means flushing the file system's in-memory copy +// of recently written data to disk. +func (f *File) Sync() error { + if f == nil { + return ErrInvalid + } + var d syscall.Dir + d.Null() + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "sync", Path: f.name, Err: err} + } + + if err := f.incref("sync"); err != nil { + return err + } + defer f.decref() + + if err = syscall.Fwstat(f.fd, buf[:n]); err != nil { + return &PathError{Op: "sync", Path: f.name, Err: err} + } + return nil +} + +// read reads up to len(b) bytes from the File. +// It returns the number of bytes read and an error, if any. +func (f *File) read(b []byte) (n int, err error) { + if err := f.readLock(); err != nil { + return 0, err + } + defer f.readUnlock() + n, e := fixCount(syscall.Read(f.fd, b)) + if n == 0 && len(b) > 0 && e == nil { + return 0, io.EOF + } + return n, e +} + +// pread reads len(b) bytes from the File starting at byte offset off. +// It returns the number of bytes read and the error, if any. +// EOF is signaled by a zero count with err set to nil. +func (f *File) pread(b []byte, off int64) (n int, err error) { + if err := f.readLock(); err != nil { + return 0, err + } + defer f.readUnlock() + n, e := fixCount(syscall.Pread(f.fd, b, off)) + if n == 0 && len(b) > 0 && e == nil { + return 0, io.EOF + } + return n, e +} + +// write writes len(b) bytes to the File. +// It returns the number of bytes written and an error, if any. +// Since Plan 9 preserves message boundaries, never allow +// a zero-byte write. +func (f *File) write(b []byte) (n int, err error) { + if err := f.writeLock(); err != nil { + return 0, err + } + defer f.writeUnlock() + if len(b) == 0 { + return 0, nil + } + return fixCount(syscall.Write(f.fd, b)) +} + +// pwrite writes len(b) bytes to the File starting at byte offset off. +// It returns the number of bytes written and an error, if any. +// Since Plan 9 preserves message boundaries, never allow +// a zero-byte write. +func (f *File) pwrite(b []byte, off int64) (n int, err error) { + if err := f.writeLock(); err != nil { + return 0, err + } + defer f.writeUnlock() + if len(b) == 0 { + return 0, nil + } + return fixCount(syscall.Pwrite(f.fd, b, off)) +} + +// seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error, if any. +func (f *File) seek(offset int64, whence int) (ret int64, err error) { + if err := f.incref(""); err != nil { + return 0, err + } + defer f.decref() + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo = nil + } + return syscall.Seek(f.fd, offset, whence) +} + +// Truncate changes the size of the named file. +// If the file is a symbolic link, it changes the size of the link's target. +// If there is an error, it will be of type *PathError. +func Truncate(name string, size int64) error { + var d syscall.Dir + + d.Null() + d.Length = size + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "truncate", Path: name, Err: err} + } + if err = syscall.Wstat(name, buf[:n]); err != nil { + return &PathError{Op: "truncate", Path: name, Err: err} + } + return nil +} + +// Remove removes the named file or directory. +// If there is an error, it will be of type *PathError. +func Remove(name string) error { + if e := syscall.Remove(name); e != nil { + return &PathError{Op: "remove", Path: name, Err: e} + } + return nil +} + +// hasPrefix from the strings package. +func hasPrefix(s, prefix string) bool { + return len(s) >= len(prefix) && s[0:len(prefix)] == prefix +} + +func rename(oldname, newname string) error { + dirname := oldname[:bytealg.LastIndexByteString(oldname, '/')+1] + if hasPrefix(newname, dirname) { + newname = newname[len(dirname):] + } else { + return &LinkError{"rename", oldname, newname, ErrInvalid} + } + + // If newname still contains slashes after removing the oldname + // prefix, the rename is cross-directory and must be rejected. + if bytealg.LastIndexByteString(newname, '/') >= 0 { + return &LinkError{"rename", oldname, newname, ErrInvalid} + } + + var d syscall.Dir + + d.Null() + d.Name = newname + + buf := make([]byte, syscall.STATFIXLEN+len(d.Name)) + n, err := d.Marshal(buf[:]) + if err != nil { + return &LinkError{"rename", oldname, newname, err} + } + + // If newname already exists and is not a directory, rename replaces it. + f, err := Stat(dirname + newname) + if err == nil && !f.IsDir() { + Remove(dirname + newname) + } + + if err = syscall.Wstat(oldname, buf[:n]); err != nil { + return &LinkError{"rename", oldname, newname, err} + } + return nil +} + +// See docs in file.go:Chmod. +func chmod(name string, mode FileMode) error { + var d syscall.Dir + + odir, e := dirstat(name) + if e != nil { + return &PathError{Op: "chmod", Path: name, Err: e} + } + d.Null() + d.Mode = odir.Mode&^chmodMask | syscallMode(mode)&chmodMask + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "chmod", Path: name, Err: err} + } + if err = syscall.Wstat(name, buf[:n]); err != nil { + return &PathError{Op: "chmod", Path: name, Err: err} + } + return nil +} + +// Chtimes changes the access and modification times of the named +// file, similar to the Unix utime() or utimes() functions. +// A zero time.Time value will leave the corresponding file time unchanged. +// +// The underlying filesystem may truncate or round the values to a +// less precise time unit. +// If there is an error, it will be of type *PathError. +func Chtimes(name string, atime time.Time, mtime time.Time) error { + var d syscall.Dir + + d.Null() + d.Atime = uint32(atime.Unix()) + d.Mtime = uint32(mtime.Unix()) + if atime.IsZero() { + d.Atime = 0xFFFFFFFF + } + if mtime.IsZero() { + d.Mtime = 0xFFFFFFFF + } + + var buf [syscall.STATFIXLEN]byte + n, err := d.Marshal(buf[:]) + if err != nil { + return &PathError{Op: "chtimes", Path: name, Err: err} + } + if err = syscall.Wstat(name, buf[:n]); err != nil { + return &PathError{Op: "chtimes", Path: name, Err: err} + } + return nil +} + +// Pipe returns a connected pair of Files; reads from r return bytes +// written to w. It returns the files and an error, if any. +func Pipe() (r *File, w *File, err error) { + var p [2]int + + if e := syscall.Pipe(p[0:]); e != nil { + return nil, nil, NewSyscallError("pipe", e) + } + + return NewFile(uintptr(p[0]), "|0"), NewFile(uintptr(p[1]), "|1"), nil +} + +// not supported on Plan 9 + +// Link creates newname as a hard link to the oldname file. +// If there is an error, it will be of type *LinkError. +func Link(oldname, newname string) error { + return &LinkError{"link", oldname, newname, syscall.EPLAN9} +} + +// Symlink creates newname as a symbolic link to oldname. +// On Windows, a symlink to a non-existent oldname creates a file symlink; +// if oldname is later created as a directory the symlink will not work. +// If there is an error, it will be of type *LinkError. +func Symlink(oldname, newname string) error { + return &LinkError{"symlink", oldname, newname, syscall.EPLAN9} +} + +func readlink(name string) (string, error) { + return "", &PathError{Op: "readlink", Path: name, Err: syscall.EPLAN9} +} + +// Chown changes the numeric uid and gid of the named file. +// If the file is a symbolic link, it changes the uid and gid of the link's target. +// A uid or gid of -1 means to not change that value. +// If there is an error, it will be of type *PathError. +// +// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or +// EPLAN9 error, wrapped in *PathError. +func Chown(name string, uid, gid int) error { + return &PathError{Op: "chown", Path: name, Err: syscall.EPLAN9} +} + +// Lchown changes the numeric uid and gid of the named file. +// If the file is a symbolic link, it changes the uid and gid of the link itself. +// If there is an error, it will be of type *PathError. +func Lchown(name string, uid, gid int) error { + return &PathError{Op: "lchown", Path: name, Err: syscall.EPLAN9} +} + +// Chown changes the numeric uid and gid of the named file. +// If there is an error, it will be of type *PathError. +func (f *File) Chown(uid, gid int) error { + if f == nil { + return ErrInvalid + } + return &PathError{Op: "chown", Path: f.name, Err: syscall.EPLAN9} +} + +func tempDir() string { + dir := Getenv("TMPDIR") + if dir == "" { + dir = "/tmp" + } + return dir +} + +// Chdir changes the current working directory to the file, +// which must be a directory. +// If there is an error, it will be of type *PathError. +func (f *File) Chdir() error { + if err := f.incref("chdir"); err != nil { + return err + } + defer f.decref() + if e := syscall.Fchdir(f.fd); e != nil { + return &PathError{Op: "chdir", Path: f.name, Err: e} + } + return nil +} + +// setDeadline sets the read and write deadline. +func (f *File) setDeadline(time.Time) error { + if err := f.checkValid("SetDeadline"); err != nil { + return err + } + return poll.ErrNoDeadline +} + +// setReadDeadline sets the read deadline. +func (f *File) setReadDeadline(time.Time) error { + if err := f.checkValid("SetReadDeadline"); err != nil { + return err + } + return poll.ErrNoDeadline +} + +// setWriteDeadline sets the write deadline. +func (f *File) setWriteDeadline(time.Time) error { + if err := f.checkValid("SetWriteDeadline"); err != nil { + return err + } + return poll.ErrNoDeadline +} + +// checkValid checks whether f is valid for use, but does not prepare +// to actually use it. If f is not ready checkValid returns an appropriate +// error, perhaps incorporating the operation name op. +func (f *File) checkValid(op string) error { + if f == nil { + return ErrInvalid + } + if err := f.incref(op); err != nil { + return err + } + return f.decref() +} + +type rawConn struct{} + +func (c *rawConn) Control(f func(uintptr)) error { + return syscall.EPLAN9 +} + +func (c *rawConn) Read(f func(uintptr) bool) error { + return syscall.EPLAN9 +} + +func (c *rawConn) Write(f func(uintptr) bool) error { + return syscall.EPLAN9 +} + +func newRawConn(file *File) (*rawConn, error) { + return nil, syscall.EPLAN9 +} + +func ignoringEINTR(fn func() error) error { + return fn() +} diff --git a/platform/dbops/binaries/go/go/src/os/file_posix.go b/platform/dbops/binaries/go/go/src/os/file_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..5692657753372f678363525b53df54bbb559d828 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_posix.go @@ -0,0 +1,256 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 || windows + +package os + +import ( + "runtime" + "syscall" + "time" +) + +// Close closes the File, rendering it unusable for I/O. +// On files that support SetDeadline, any pending I/O operations will +// be canceled and return immediately with an ErrClosed error. +// Close will return an error if it has already been called. +func (f *File) Close() error { + if f == nil { + return ErrInvalid + } + return f.file.close() +} + +// read reads up to len(b) bytes from the File. +// It returns the number of bytes read and an error, if any. +func (f *File) read(b []byte) (n int, err error) { + n, err = f.pfd.Read(b) + runtime.KeepAlive(f) + return n, err +} + +// pread reads len(b) bytes from the File starting at byte offset off. +// It returns the number of bytes read and the error, if any. +// EOF is signaled by a zero count with err set to nil. +func (f *File) pread(b []byte, off int64) (n int, err error) { + n, err = f.pfd.Pread(b, off) + runtime.KeepAlive(f) + return n, err +} + +// write writes len(b) bytes to the File. +// It returns the number of bytes written and an error, if any. +func (f *File) write(b []byte) (n int, err error) { + n, err = f.pfd.Write(b) + runtime.KeepAlive(f) + return n, err +} + +// pwrite writes len(b) bytes to the File starting at byte offset off. +// It returns the number of bytes written and an error, if any. +func (f *File) pwrite(b []byte, off int64) (n int, err error) { + n, err = f.pfd.Pwrite(b, off) + runtime.KeepAlive(f) + return n, err +} + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} + +// See docs in file.go:Chmod. +func chmod(name string, mode FileMode) error { + longName := fixLongPath(name) + e := ignoringEINTR(func() error { + return syscall.Chmod(longName, syscallMode(mode)) + }) + if e != nil { + return &PathError{Op: "chmod", Path: name, Err: e} + } + return nil +} + +// See docs in file.go:(*File).Chmod. +func (f *File) chmod(mode FileMode) error { + if err := f.checkValid("chmod"); err != nil { + return err + } + if e := f.pfd.Fchmod(syscallMode(mode)); e != nil { + return f.wrapErr("chmod", e) + } + return nil +} + +// Chown changes the numeric uid and gid of the named file. +// If the file is a symbolic link, it changes the uid and gid of the link's target. +// A uid or gid of -1 means to not change that value. +// If there is an error, it will be of type *PathError. +// +// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or +// EPLAN9 error, wrapped in *PathError. +func Chown(name string, uid, gid int) error { + e := ignoringEINTR(func() error { + return syscall.Chown(name, uid, gid) + }) + if e != nil { + return &PathError{Op: "chown", Path: name, Err: e} + } + return nil +} + +// Lchown changes the numeric uid and gid of the named file. +// If the file is a symbolic link, it changes the uid and gid of the link itself. +// If there is an error, it will be of type *PathError. +// +// On Windows, it always returns the syscall.EWINDOWS error, wrapped +// in *PathError. +func Lchown(name string, uid, gid int) error { + e := ignoringEINTR(func() error { + return syscall.Lchown(name, uid, gid) + }) + if e != nil { + return &PathError{Op: "lchown", Path: name, Err: e} + } + return nil +} + +// Chown changes the numeric uid and gid of the named file. +// If there is an error, it will be of type *PathError. +// +// On Windows, it always returns the syscall.EWINDOWS error, wrapped +// in *PathError. +func (f *File) Chown(uid, gid int) error { + if err := f.checkValid("chown"); err != nil { + return err + } + if e := f.pfd.Fchown(uid, gid); e != nil { + return f.wrapErr("chown", e) + } + return nil +} + +// Truncate changes the size of the file. +// It does not change the I/O offset. +// If there is an error, it will be of type *PathError. +func (f *File) Truncate(size int64) error { + if err := f.checkValid("truncate"); err != nil { + return err + } + if e := f.pfd.Ftruncate(size); e != nil { + return f.wrapErr("truncate", e) + } + return nil +} + +// Sync commits the current contents of the file to stable storage. +// Typically, this means flushing the file system's in-memory copy +// of recently written data to disk. +func (f *File) Sync() error { + if err := f.checkValid("sync"); err != nil { + return err + } + if e := f.pfd.Fsync(); e != nil { + return f.wrapErr("sync", e) + } + return nil +} + +// Chtimes changes the access and modification times of the named +// file, similar to the Unix utime() or utimes() functions. +// A zero time.Time value will leave the corresponding file time unchanged. +// +// The underlying filesystem may truncate or round the values to a +// less precise time unit. +// If there is an error, it will be of type *PathError. +func Chtimes(name string, atime time.Time, mtime time.Time) error { + var utimes [2]syscall.Timespec + set := func(i int, t time.Time) { + if t.IsZero() { + utimes[i] = syscall.Timespec{Sec: _UTIME_OMIT, Nsec: _UTIME_OMIT} + } else { + utimes[i] = syscall.NsecToTimespec(t.UnixNano()) + } + } + set(0, atime) + set(1, mtime) + if e := syscall.UtimesNano(fixLongPath(name), utimes[0:]); e != nil { + return &PathError{Op: "chtimes", Path: name, Err: e} + } + return nil +} + +// Chdir changes the current working directory to the file, +// which must be a directory. +// If there is an error, it will be of type *PathError. +func (f *File) Chdir() error { + if err := f.checkValid("chdir"); err != nil { + return err + } + if e := f.pfd.Fchdir(); e != nil { + return f.wrapErr("chdir", e) + } + return nil +} + +// setDeadline sets the read and write deadline. +func (f *File) setDeadline(t time.Time) error { + if err := f.checkValid("SetDeadline"); err != nil { + return err + } + return f.pfd.SetDeadline(t) +} + +// setReadDeadline sets the read deadline. +func (f *File) setReadDeadline(t time.Time) error { + if err := f.checkValid("SetReadDeadline"); err != nil { + return err + } + return f.pfd.SetReadDeadline(t) +} + +// setWriteDeadline sets the write deadline. +func (f *File) setWriteDeadline(t time.Time) error { + if err := f.checkValid("SetWriteDeadline"); err != nil { + return err + } + return f.pfd.SetWriteDeadline(t) +} + +// checkValid checks whether f is valid for use. +// If not, it returns an appropriate error, perhaps incorporating the operation name op. +func (f *File) checkValid(op string) error { + if f == nil { + return ErrInvalid + } + return nil +} + +// ignoringEINTR makes a function call and repeats it if it returns an +// EINTR error. This appears to be required even though we install all +// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846. +// Also #20400 and #36644 are issues in which a signal handler is +// installed without setting SA_RESTART. None of these are the common case, +// but there are enough of them that it seems that we can't avoid +// an EINTR loop. +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } +} diff --git a/platform/dbops/binaries/go/go/src/os/file_unix.go b/platform/dbops/binaries/go/go/src/os/file_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..649ec3ebb591e75999a9f030f92af9acbf822774 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_unix.go @@ -0,0 +1,495 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os + +import ( + "internal/poll" + "internal/syscall/unix" + "io/fs" + "runtime" + "syscall" + _ "unsafe" // for go:linkname +) + +const _UTIME_OMIT = unix.UTIME_OMIT + +// fixLongPath is a noop on non-Windows platforms. +func fixLongPath(path string) string { + return path +} + +func rename(oldname, newname string) error { + fi, err := Lstat(newname) + if err == nil && fi.IsDir() { + // There are two independent errors this function can return: + // one for a bad oldname, and one for a bad newname. + // At this point we've determined the newname is bad. + // But just in case oldname is also bad, prioritize returning + // the oldname error because that's what we did historically. + // However, if the old name and new name are not the same, yet + // they refer to the same file, it implies a case-only + // rename on a case-insensitive filesystem, which is ok. + if ofi, err := Lstat(oldname); err != nil { + if pe, ok := err.(*PathError); ok { + err = pe.Err + } + return &LinkError{"rename", oldname, newname, err} + } else if newname == oldname || !SameFile(fi, ofi) { + return &LinkError{"rename", oldname, newname, syscall.EEXIST} + } + } + err = ignoringEINTR(func() error { + return syscall.Rename(oldname, newname) + }) + if err != nil { + return &LinkError{"rename", oldname, newname, err} + } + return nil +} + +// file is the real representation of *File. +// The extra level of indirection ensures that no clients of os +// can overwrite this data, which could cause the finalizer +// to close the wrong file descriptor. +type file struct { + pfd poll.FD + name string + dirinfo *dirInfo // nil unless directory being read + nonblock bool // whether we set nonblocking mode + stdoutOrErr bool // whether this is stdout or stderr + appendMode bool // whether file is opened for appending +} + +// Fd returns the integer Unix file descriptor referencing the open file. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. +// Because file descriptors can be reused, the returned file descriptor may +// only be closed through the Close method of f, or by its finalizer during +// garbage collection. Otherwise, during garbage collection the finalizer +// may close an unrelated file descriptor with the same (reused) number. +// +// As an alternative, see the f.SyscallConn method. +func (f *File) Fd() uintptr { + if f == nil { + return ^(uintptr(0)) + } + + // If we put the file descriptor into nonblocking mode, + // then set it to blocking mode before we return it, + // because historically we have always returned a descriptor + // opened in blocking mode. The File will continue to work, + // but any blocking operation will tie up a thread. + if f.nonblock { + f.pfd.SetBlocking() + } + + return uintptr(f.pfd.Sysfd) +} + +// NewFile returns a new File with the given file descriptor and +// name. The returned value will be nil if fd is not a valid file +// descriptor. On Unix systems, if the file descriptor is in +// non-blocking mode, NewFile will attempt to return a pollable File +// (one for which the SetDeadline methods work). +// +// After passing it to NewFile, fd may become invalid under the same +// conditions described in the comments of the Fd method, and the same +// constraints apply. +func NewFile(fd uintptr, name string) *File { + fdi := int(fd) + if fdi < 0 { + return nil + } + + kind := kindNewFile + appendMode := false + if flags, err := unix.Fcntl(fdi, syscall.F_GETFL, 0); err == nil { + if unix.HasNonblockFlag(flags) { + kind = kindNonBlock + } + appendMode = flags&syscall.O_APPEND != 0 + } + f := newFile(fdi, name, kind) + f.appendMode = appendMode + return f +} + +// net_newUnixFile is a hidden entry point called by net.conn.File. +// This is used so that a nonblocking network connection will become +// blocking if code calls the Fd method. We don't want that for direct +// calls to NewFile: passing a nonblocking descriptor to NewFile should +// remain nonblocking if you get it back using Fd. But for net.conn.File +// the call to NewFile is hidden from the user. Historically in that case +// the Fd method has returned a blocking descriptor, and we want to +// retain that behavior because existing code expects it and depends on it. +// +//go:linkname net_newUnixFile net.newUnixFile +func net_newUnixFile(fd int, name string) *File { + if fd < 0 { + panic("invalid FD") + } + + f := newFile(fd, name, kindNonBlock) + f.nonblock = true // tell Fd to return blocking descriptor + return f +} + +// newFileKind describes the kind of file to newFile. +type newFileKind int + +const ( + // kindNewFile means that the descriptor was passed to us via NewFile. + kindNewFile newFileKind = iota + // kindOpenFile means that the descriptor was opened using + // Open, Create, or OpenFile (without O_NONBLOCK). + kindOpenFile + // kindPipe means that the descriptor was opened using Pipe. + kindPipe + // kindNonBlock means that the descriptor is already in + // non-blocking mode. + kindNonBlock + // kindNoPoll means that we should not put the descriptor into + // non-blocking mode, because we know it is not a pipe or FIFO. + // Used by openDirAt for directories. + kindNoPoll +) + +// newFile is like NewFile, but if called from OpenFile or Pipe +// (as passed in the kind parameter) it tries to add the file to +// the runtime poller. +func newFile(fd int, name string, kind newFileKind) *File { + f := &File{&file{ + pfd: poll.FD{ + Sysfd: fd, + IsStream: true, + ZeroReadIsEOF: true, + }, + name: name, + stdoutOrErr: fd == 1 || fd == 2, + }} + + pollable := kind == kindOpenFile || kind == kindPipe || kind == kindNonBlock + + // If the caller passed a non-blocking filedes (kindNonBlock), + // we assume they know what they are doing so we allow it to be + // used with kqueue. + if kind == kindOpenFile { + switch runtime.GOOS { + case "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd": + var st syscall.Stat_t + err := ignoringEINTR(func() error { + return syscall.Fstat(fd, &st) + }) + typ := st.Mode & syscall.S_IFMT + // Don't try to use kqueue with regular files on *BSDs. + // On FreeBSD a regular file is always + // reported as ready for writing. + // On Dragonfly, NetBSD and OpenBSD the fd is signaled + // only once as ready (both read and write). + // Issue 19093. + // Also don't add directories to the netpoller. + if err == nil && (typ == syscall.S_IFREG || typ == syscall.S_IFDIR) { + pollable = false + } + + // In addition to the behavior described above for regular files, + // on Darwin, kqueue does not work properly with fifos: + // closing the last writer does not cause a kqueue event + // for any readers. See issue #24164. + if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && typ == syscall.S_IFIFO { + pollable = false + } + } + } + + clearNonBlock := false + if pollable { + if kind == kindNonBlock { + // The descriptor is already in non-blocking mode. + // We only set f.nonblock if we put the file into + // non-blocking mode. + } else if err := syscall.SetNonblock(fd, true); err == nil { + f.nonblock = true + clearNonBlock = true + } else { + pollable = false + } + } + + // An error here indicates a failure to register + // with the netpoll system. That can happen for + // a file descriptor that is not supported by + // epoll/kqueue; for example, disk files on + // Linux systems. We assume that any real error + // will show up in later I/O. + // We do restore the blocking behavior if it was set by us. + if pollErr := f.pfd.Init("file", pollable); pollErr != nil && clearNonBlock { + if err := syscall.SetNonblock(fd, false); err == nil { + f.nonblock = false + } + } + + runtime.SetFinalizer(f.file, (*file).close) + return f +} + +func sigpipe() // implemented in package runtime + +// epipecheck raises SIGPIPE if we get an EPIPE error on standard +// output or standard error. See the SIGPIPE docs in os/signal, and +// issue 11845. +func epipecheck(file *File, e error) { + if e == syscall.EPIPE && file.stdoutOrErr { + sigpipe() + } +} + +// DevNull is the name of the operating system's “null device.” +// On Unix-like systems, it is "/dev/null"; on Windows, "NUL". +const DevNull = "/dev/null" + +// openFileNolog is the Unix implementation of OpenFile. +// Changes here should be reflected in openDirAt, if relevant. +func openFileNolog(name string, flag int, perm FileMode) (*File, error) { + setSticky := false + if !supportsCreateWithStickyBit && flag&O_CREATE != 0 && perm&ModeSticky != 0 { + if _, err := Stat(name); IsNotExist(err) { + setSticky = true + } + } + + var r int + var s poll.SysFile + for { + var e error + r, s, e = open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e == nil { + break + } + + // We have to check EINTR here, per issues 11180 and 39237. + if e == syscall.EINTR { + continue + } + + return nil, &PathError{Op: "open", Path: name, Err: e} + } + + // open(2) itself won't handle the sticky bit on *BSD and Solaris + if setSticky { + setStickyBit(name) + } + + // There's a race here with fork/exec, which we are + // content to live with. See ../syscall/exec_unix.go. + if !supportsCloseOnExec { + syscall.CloseOnExec(r) + } + + kind := kindOpenFile + if unix.HasNonblockFlag(flag) { + kind = kindNonBlock + } + + f := newFile(r, name, kind) + f.pfd.SysFile = s + return f, nil +} + +func (file *file) close() error { + if file == nil { + return syscall.EINVAL + } + if file.dirinfo != nil { + file.dirinfo.close() + file.dirinfo = nil + } + var err error + if e := file.pfd.Close(); e != nil { + if e == poll.ErrFileClosing { + e = ErrClosed + } + err = &PathError{Op: "close", Path: file.name, Err: e} + } + + // no need for a finalizer anymore + runtime.SetFinalizer(file, nil) + return err +} + +// seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error, if any. +func (f *File) seek(offset int64, whence int) (ret int64, err error) { + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo.close() + f.dirinfo = nil + } + ret, err = f.pfd.Seek(offset, whence) + runtime.KeepAlive(f) + return ret, err +} + +// Truncate changes the size of the named file. +// If the file is a symbolic link, it changes the size of the link's target. +// If there is an error, it will be of type *PathError. +func Truncate(name string, size int64) error { + e := ignoringEINTR(func() error { + return syscall.Truncate(name, size) + }) + if e != nil { + return &PathError{Op: "truncate", Path: name, Err: e} + } + return nil +} + +// Remove removes the named file or (empty) directory. +// If there is an error, it will be of type *PathError. +func Remove(name string) error { + // System call interface forces us to know + // whether name is a file or directory. + // Try both: it is cheaper on average than + // doing a Stat plus the right one. + e := ignoringEINTR(func() error { + return syscall.Unlink(name) + }) + if e == nil { + return nil + } + e1 := ignoringEINTR(func() error { + return syscall.Rmdir(name) + }) + if e1 == nil { + return nil + } + + // Both failed: figure out which error to return. + // OS X and Linux differ on whether unlink(dir) + // returns EISDIR, so can't use that. However, + // both agree that rmdir(file) returns ENOTDIR, + // so we can use that to decide which error is real. + // Rmdir might also return ENOTDIR if given a bad + // file path, like /etc/passwd/foo, but in that case, + // both errors will be ENOTDIR, so it's okay to + // use the error from unlink. + if e1 != syscall.ENOTDIR { + e = e1 + } + return &PathError{Op: "remove", Path: name, Err: e} +} + +func tempDir() string { + dir := Getenv("TMPDIR") + if dir == "" { + if runtime.GOOS == "android" { + dir = "/data/local/tmp" + } else { + dir = "/tmp" + } + } + return dir +} + +// Link creates newname as a hard link to the oldname file. +// If there is an error, it will be of type *LinkError. +func Link(oldname, newname string) error { + e := ignoringEINTR(func() error { + return syscall.Link(oldname, newname) + }) + if e != nil { + return &LinkError{"link", oldname, newname, e} + } + return nil +} + +// Symlink creates newname as a symbolic link to oldname. +// On Windows, a symlink to a non-existent oldname creates a file symlink; +// if oldname is later created as a directory the symlink will not work. +// If there is an error, it will be of type *LinkError. +func Symlink(oldname, newname string) error { + e := ignoringEINTR(func() error { + return syscall.Symlink(oldname, newname) + }) + if e != nil { + return &LinkError{"symlink", oldname, newname, e} + } + return nil +} + +func readlink(name string) (string, error) { + for len := 128; ; len *= 2 { + b := make([]byte, len) + var ( + n int + e error + ) + for { + n, e = fixCount(syscall.Readlink(name, b)) + if e != syscall.EINTR { + break + } + } + // buffer too small + if (runtime.GOOS == "aix" || runtime.GOOS == "wasip1") && e == syscall.ERANGE { + continue + } + if e != nil { + return "", &PathError{Op: "readlink", Path: name, Err: e} + } + if n < len { + return string(b[0:n]), nil + } + } +} + +type unixDirent struct { + parent string + name string + typ FileMode + info FileInfo +} + +func (d *unixDirent) Name() string { return d.name } +func (d *unixDirent) IsDir() bool { return d.typ.IsDir() } +func (d *unixDirent) Type() FileMode { return d.typ } + +func (d *unixDirent) Info() (FileInfo, error) { + if d.info != nil { + return d.info, nil + } + return lstat(d.parent + "/" + d.name) +} + +func (d *unixDirent) String() string { + return fs.FormatDirEntry(d) +} + +func newUnixDirent(parent, name string, typ FileMode) (DirEntry, error) { + ude := &unixDirent{ + parent: parent, + name: name, + typ: typ, + } + if typ != ^FileMode(0) && !testingForceReadDirLstat { + return ude, nil + } + + info, err := lstat(parent + "/" + name) + if err != nil { + return nil, err + } + + ude.typ = info.Mode().Type() + ude.info = info + return ude, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/file_wasip1.go b/platform/dbops/binaries/go/go/src/os/file_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..c9b05b3aedc286a868f148baaa16fdfff727f8fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_wasip1.go @@ -0,0 +1,22 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package os + +import "internal/poll" + +// PollFD returns the poll.FD of the file. +// +// Other packages in std that also import internal/poll (such as net) +// can use a type assertion to access this extension method so that +// they can pass the *poll.FD to functions like poll.Splice. +// +// There is an equivalent function in net.rawConn. +// +// PollFD is not intended for use outside the standard library. +func (f *file) PollFD() *poll.FD { + return &f.pfd +} diff --git a/platform/dbops/binaries/go/go/src/os/file_windows.go b/platform/dbops/binaries/go/go/src/os/file_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..8b04ed6e47e39a6e69299b0217c3474c100e756c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/file_windows.go @@ -0,0 +1,447 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "errors" + "internal/poll" + "internal/syscall/windows" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// This matches the value in syscall/syscall_windows.go. +const _UTIME_OMIT = -1 + +// file is the real representation of *File. +// The extra level of indirection ensures that no clients of os +// can overwrite this data, which could cause the finalizer +// to close the wrong file descriptor. +type file struct { + pfd poll.FD + name string + dirinfo *dirInfo // nil unless directory being read + appendMode bool // whether file is opened for appending +} + +// Fd returns the Windows handle referencing the open file. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. +func (file *File) Fd() uintptr { + if file == nil { + return uintptr(syscall.InvalidHandle) + } + return uintptr(file.pfd.Sysfd) +} + +// newFile returns a new File with the given file handle and name. +// Unlike NewFile, it does not check that h is syscall.InvalidHandle. +func newFile(h syscall.Handle, name string, kind string) *File { + if kind == "file" { + var m uint32 + if syscall.GetConsoleMode(h, &m) == nil { + kind = "console" + } + if t, err := syscall.GetFileType(h); err == nil && t == syscall.FILE_TYPE_PIPE { + kind = "pipe" + } + } + + f := &File{&file{ + pfd: poll.FD{ + Sysfd: h, + IsStream: true, + ZeroReadIsEOF: true, + }, + name: name, + }} + runtime.SetFinalizer(f.file, (*file).close) + + // Ignore initialization errors. + // Assume any problems will show up in later I/O. + f.pfd.Init(kind, false) + + return f +} + +// newConsoleFile creates new File that will be used as console. +func newConsoleFile(h syscall.Handle, name string) *File { + return newFile(h, name, "console") +} + +// NewFile returns a new File with the given file descriptor and +// name. The returned value will be nil if fd is not a valid file +// descriptor. +func NewFile(fd uintptr, name string) *File { + h := syscall.Handle(fd) + if h == syscall.InvalidHandle { + return nil + } + return newFile(h, name, "file") +} + +func epipecheck(file *File, e error) { +} + +// DevNull is the name of the operating system's “null device.” +// On Unix-like systems, it is "/dev/null"; on Windows, "NUL". +const DevNull = "NUL" + +// openFileNolog is the Windows implementation of OpenFile. +func openFileNolog(name string, flag int, perm FileMode) (*File, error) { + if name == "" { + return nil, &PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + path := fixLongPath(name) + r, e := syscall.Open(path, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + // We should return EISDIR when we are trying to open a directory with write access. + if e == syscall.ERROR_ACCESS_DENIED && (flag&O_WRONLY != 0 || flag&O_RDWR != 0) { + pathp, e1 := syscall.UTF16PtrFromString(path) + if e1 == nil { + var fa syscall.Win32FileAttributeData + e1 = syscall.GetFileAttributesEx(pathp, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa))) + if e1 == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + e = syscall.EISDIR + } + } + } + return nil, &PathError{Op: "open", Path: name, Err: e} + } + f, e := newFile(r, name, "file"), nil + if e != nil { + return nil, &PathError{Op: "open", Path: name, Err: e} + } + return f, nil +} + +func (file *file) close() error { + if file == nil { + return syscall.EINVAL + } + if file.dirinfo != nil { + file.dirinfo.close() + file.dirinfo = nil + } + var err error + if e := file.pfd.Close(); e != nil { + if e == poll.ErrFileClosing { + e = ErrClosed + } + err = &PathError{Op: "close", Path: file.name, Err: e} + } + + // no need for a finalizer anymore + runtime.SetFinalizer(file, nil) + return err +} + +// seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error, if any. +func (f *File) seek(offset int64, whence int) (ret int64, err error) { + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo.close() + f.dirinfo = nil + } + ret, err = f.pfd.Seek(offset, whence) + runtime.KeepAlive(f) + return ret, err +} + +// Truncate changes the size of the named file. +// If the file is a symbolic link, it changes the size of the link's target. +func Truncate(name string, size int64) error { + f, e := OpenFile(name, O_WRONLY, 0666) + if e != nil { + return e + } + defer f.Close() + e1 := f.Truncate(size) + if e1 != nil { + return e1 + } + return nil +} + +// Remove removes the named file or directory. +// If there is an error, it will be of type *PathError. +func Remove(name string) error { + p, e := syscall.UTF16PtrFromString(fixLongPath(name)) + if e != nil { + return &PathError{Op: "remove", Path: name, Err: e} + } + + // Go file interface forces us to know whether + // name is a file or directory. Try both. + e = syscall.DeleteFile(p) + if e == nil { + return nil + } + e1 := syscall.RemoveDirectory(p) + if e1 == nil { + return nil + } + + // Both failed: figure out which error to return. + if e1 != e { + a, e2 := syscall.GetFileAttributes(p) + if e2 != nil { + e = e2 + } else { + if a&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + e = e1 + } else if a&syscall.FILE_ATTRIBUTE_READONLY != 0 { + if e1 = syscall.SetFileAttributes(p, a&^syscall.FILE_ATTRIBUTE_READONLY); e1 == nil { + if e = syscall.DeleteFile(p); e == nil { + return nil + } + } + } + } + } + return &PathError{Op: "remove", Path: name, Err: e} +} + +func rename(oldname, newname string) error { + e := windows.Rename(fixLongPath(oldname), fixLongPath(newname)) + if e != nil { + return &LinkError{"rename", oldname, newname, e} + } + return nil +} + +// Pipe returns a connected pair of Files; reads from r return bytes written to w. +// It returns the files and an error, if any. The Windows handles underlying +// the returned files are marked as inheritable by child processes. +func Pipe() (r *File, w *File, err error) { + var p [2]syscall.Handle + e := syscall.Pipe(p[:]) + if e != nil { + return nil, nil, NewSyscallError("pipe", e) + } + return newFile(p[0], "|0", "pipe"), newFile(p[1], "|1", "pipe"), nil +} + +var ( + useGetTempPath2Once sync.Once + useGetTempPath2 bool +) + +func tempDir() string { + useGetTempPath2Once.Do(func() { + useGetTempPath2 = (windows.ErrorLoadingGetTempPath2() == nil) + }) + getTempPath := syscall.GetTempPath + if useGetTempPath2 { + getTempPath = windows.GetTempPath2 + } + n := uint32(syscall.MAX_PATH) + for { + b := make([]uint16, n) + n, _ = getTempPath(uint32(len(b)), &b[0]) + if n > uint32(len(b)) { + continue + } + if n == 3 && b[1] == ':' && b[2] == '\\' { + // Do nothing for path, like C:\. + } else if n > 0 && b[n-1] == '\\' { + // Otherwise remove terminating \. + n-- + } + return syscall.UTF16ToString(b[:n]) + } +} + +// Link creates newname as a hard link to the oldname file. +// If there is an error, it will be of type *LinkError. +func Link(oldname, newname string) error { + n, err := syscall.UTF16PtrFromString(fixLongPath(newname)) + if err != nil { + return &LinkError{"link", oldname, newname, err} + } + o, err := syscall.UTF16PtrFromString(fixLongPath(oldname)) + if err != nil { + return &LinkError{"link", oldname, newname, err} + } + err = syscall.CreateHardLink(n, o, 0) + if err != nil { + return &LinkError{"link", oldname, newname, err} + } + return nil +} + +// Symlink creates newname as a symbolic link to oldname. +// On Windows, a symlink to a non-existent oldname creates a file symlink; +// if oldname is later created as a directory the symlink will not work. +// If there is an error, it will be of type *LinkError. +func Symlink(oldname, newname string) error { + // '/' does not work in link's content + oldname = fromSlash(oldname) + + // need the exact location of the oldname when it's relative to determine if it's a directory + destpath := oldname + if v := volumeName(oldname); v == "" { + if len(oldname) > 0 && IsPathSeparator(oldname[0]) { + // oldname is relative to the volume containing newname. + if v = volumeName(newname); v != "" { + // Prepend the volume explicitly, because it may be different from the + // volume of the current working directory. + destpath = v + oldname + } + } else { + // oldname is relative to newname. + destpath = dirname(newname) + `\` + oldname + } + } + + fi, err := Stat(destpath) + isdir := err == nil && fi.IsDir() + + n, err := syscall.UTF16PtrFromString(fixLongPath(newname)) + if err != nil { + return &LinkError{"symlink", oldname, newname, err} + } + o, err := syscall.UTF16PtrFromString(fixLongPath(oldname)) + if err != nil { + return &LinkError{"symlink", oldname, newname, err} + } + + var flags uint32 = windows.SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE + if isdir { + flags |= syscall.SYMBOLIC_LINK_FLAG_DIRECTORY + } + err = syscall.CreateSymbolicLink(n, o, flags) + if err != nil { + // the unprivileged create flag is unsupported + // below Windows 10 (1703, v10.0.14972). retry without it. + flags &^= windows.SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE + err = syscall.CreateSymbolicLink(n, o, flags) + if err != nil { + return &LinkError{"symlink", oldname, newname, err} + } + } + return nil +} + +// openSymlink calls CreateFile Windows API with FILE_FLAG_OPEN_REPARSE_POINT +// parameter, so that Windows does not follow symlink, if path is a symlink. +// openSymlink returns opened file handle. +func openSymlink(path string) (syscall.Handle, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + // Use FILE_FLAG_OPEN_REPARSE_POINT, otherwise CreateFile will follow symlink. + // See https://docs.microsoft.com/en-us/windows/desktop/FileIO/symbolic-link-effects-on-file-systems-functions#createfile-and-createfiletransacted + attrs |= syscall.FILE_FLAG_OPEN_REPARSE_POINT + h, err := syscall.CreateFile(p, 0, 0, nil, syscall.OPEN_EXISTING, attrs, 0) + if err != nil { + return 0, err + } + return h, nil +} + +// normaliseLinkPath converts absolute paths returned by +// DeviceIoControl(h, FSCTL_GET_REPARSE_POINT, ...) +// into paths acceptable by all Windows APIs. +// For example, it converts +// +// \??\C:\foo\bar into C:\foo\bar +// \??\UNC\foo\bar into \\foo\bar +// \??\Volume{abc}\ into C:\ +func normaliseLinkPath(path string) (string, error) { + if len(path) < 4 || path[:4] != `\??\` { + // unexpected path, return it as is + return path, nil + } + // we have path that start with \??\ + s := path[4:] + switch { + case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar + return s, nil + case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar + return `\\` + s[4:], nil + } + + // handle paths, like \??\Volume{abc}\... + + h, err := openSymlink(path) + if err != nil { + return "", err + } + defer syscall.CloseHandle(h) + + buf := make([]uint16, 100) + for { + n, err := windows.GetFinalPathNameByHandle(h, &buf[0], uint32(len(buf)), windows.VOLUME_NAME_DOS) + if err != nil { + return "", err + } + if n < uint32(len(buf)) { + break + } + buf = make([]uint16, n) + } + s = syscall.UTF16ToString(buf) + if len(s) > 4 && s[:4] == `\\?\` { + s = s[4:] + if len(s) > 3 && s[:3] == `UNC` { + // return path like \\server\share\... + return `\` + s[3:], nil + } + return s, nil + } + return "", errors.New("GetFinalPathNameByHandle returned unexpected path: " + s) +} + +func readReparseLink(path string) (string, error) { + h, err := openSymlink(path) + if err != nil { + return "", err + } + defer syscall.CloseHandle(h) + + rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = syscall.DeviceIoControl(h, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return "", err + } + + rdb := (*windows.REPARSE_DATA_BUFFER)(unsafe.Pointer(&rdbbuf[0])) + switch rdb.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + rb := (*windows.SymbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.DUMMYUNIONNAME)) + s := rb.Path() + if rb.Flags&windows.SYMLINK_FLAG_RELATIVE != 0 { + return s, nil + } + return normaliseLinkPath(s) + case windows.IO_REPARSE_TAG_MOUNT_POINT: + return normaliseLinkPath((*windows.MountPointReparseBuffer)(unsafe.Pointer(&rdb.DUMMYUNIONNAME)).Path()) + default: + // the path is not a symlink or junction but another type of reparse + // point + return "", syscall.ENOENT + } +} + +func readlink(name string) (string, error) { + s, err := readReparseLink(fixLongPath(name)) + if err != nil { + return "", &PathError{Op: "readlink", Path: name, Err: err} + } + return s, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/getwd.go b/platform/dbops/binaries/go/go/src/os/getwd.go new file mode 100644 index 0000000000000000000000000000000000000000..90604cf2f4b18e57a04cdcf5f559b61a0ef0be3e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/getwd.go @@ -0,0 +1,126 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "runtime" + "sync" + "syscall" +) + +var getwdCache struct { + sync.Mutex + dir string +} + +// Getwd returns a rooted path name corresponding to the +// current directory. If the current directory can be +// reached via multiple paths (due to symbolic links), +// Getwd may return any one of them. +func Getwd() (dir string, err error) { + if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { + return syscall.Getwd() + } + + // Clumsy but widespread kludge: + // if $PWD is set and matches ".", use it. + dot, err := statNolog(".") + if err != nil { + return "", err + } + dir = Getenv("PWD") + if len(dir) > 0 && dir[0] == '/' { + d, err := statNolog(dir) + if err == nil && SameFile(dot, d) { + return dir, nil + } + } + + // If the operating system provides a Getwd call, use it. + // Otherwise, we're trying to find our way back to ".". + if syscall.ImplementsGetwd { + var ( + s string + e error + ) + for { + s, e = syscall.Getwd() + if e != syscall.EINTR { + break + } + } + return s, NewSyscallError("getwd", e) + } + + // Apply same kludge but to cached dir instead of $PWD. + getwdCache.Lock() + dir = getwdCache.dir + getwdCache.Unlock() + if len(dir) > 0 { + d, err := statNolog(dir) + if err == nil && SameFile(dot, d) { + return dir, nil + } + } + + // Root is a special case because it has no parent + // and ends in a slash. + root, err := statNolog("/") + if err != nil { + // Can't stat root - no hope. + return "", err + } + if SameFile(root, dot) { + return "/", nil + } + + // General algorithm: find name in parent + // and then find name of parent. Each iteration + // adds /name to the beginning of dir. + dir = "" + for parent := ".."; ; parent = "../" + parent { + if len(parent) >= 1024 { // Sanity check + return "", syscall.ENAMETOOLONG + } + fd, err := openFileNolog(parent, O_RDONLY, 0) + if err != nil { + return "", err + } + + for { + names, err := fd.Readdirnames(100) + if err != nil { + fd.Close() + return "", err + } + for _, name := range names { + d, _ := lstatNolog(parent + "/" + name) + if SameFile(d, dot) { + dir = "/" + name + dir + goto Found + } + } + } + + Found: + pd, err := fd.Stat() + fd.Close() + if err != nil { + return "", err + } + if SameFile(pd, root) { + break + } + // Set up for next round. + dot = pd + } + + // Save answer as hint to avoid the expensive path next time. + getwdCache.Lock() + getwdCache.dir = dir + getwdCache.Unlock() + + return dir, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/os_test.go b/platform/dbops/binaries/go/go/src/os/os_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6adc3b547924be385752a6cf0a93dd7a07a43784 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/os_test.go @@ -0,0 +1,3360 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "errors" + "flag" + "fmt" + "internal/testenv" + "io" + "io/fs" + "log" + . "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "runtime/debug" + "sort" + "strings" + "sync" + "syscall" + "testing" + "testing/fstest" + "time" +) + +func TestMain(m *testing.M) { + if Getenv("GO_OS_TEST_DRAIN_STDIN") == "1" { + Stdout.Close() + io.Copy(io.Discard, Stdin) + Exit(0) + } + + log.SetFlags(log.LstdFlags | log.Lshortfile) + + Exit(m.Run()) +} + +var dot = []string{ + "dir_unix.go", + "env.go", + "error.go", + "file.go", + "os_test.go", + "types.go", + "stat_darwin.go", + "stat_linux.go", +} + +type sysDir struct { + name string + files []string +} + +var sysdir = func() *sysDir { + switch runtime.GOOS { + case "android": + return &sysDir{ + "/system/lib", + []string{ + "libmedia.so", + "libpowermanager.so", + }, + } + case "ios": + wd, err := syscall.Getwd() + if err != nil { + wd = err.Error() + } + sd := &sysDir{ + filepath.Join(wd, "..", ".."), + []string{ + "ResourceRules.plist", + "Info.plist", + }, + } + found := true + for _, f := range sd.files { + path := filepath.Join(sd.name, f) + if _, err := Stat(path); err != nil { + found = false + break + } + } + if found { + return sd + } + // In a self-hosted iOS build the above files might + // not exist. Look for system files instead below. + case "windows": + return &sysDir{ + Getenv("SystemRoot") + "\\system32\\drivers\\etc", + []string{ + "networks", + "protocol", + "services", + }, + } + case "plan9": + return &sysDir{ + "/lib/ndb", + []string{ + "common", + "local", + }, + } + case "wasip1": + // wasmtime has issues resolving symbolic links that are often present + // in directories like /etc/group below (e.g. private/etc/group on OSX). + // For this reason we use files in the Go source tree instead. + return &sysDir{ + runtime.GOROOT(), + []string{ + "go.env", + "LICENSE", + "CONTRIBUTING.md", + }, + } + } + return &sysDir{ + "/etc", + []string{ + "group", + "hosts", + "passwd", + }, + } +}() + +func size(name string, t *testing.T) int64 { + file, err := Open(name) + if err != nil { + t.Fatal("open failed:", err) + } + defer func() { + if err := file.Close(); err != nil { + t.Error(err) + } + }() + n, err := io.Copy(io.Discard, file) + if err != nil { + t.Fatal(err) + } + return n +} + +func equal(name1, name2 string) (r bool) { + switch runtime.GOOS { + case "windows": + r = strings.EqualFold(name1, name2) + default: + r = name1 == name2 + } + return +} + +// localTmp returns a local temporary directory not on NFS. +func localTmp() string { + switch runtime.GOOS { + case "android", "ios", "windows": + return TempDir() + } + return "/tmp" +} + +func newFile(testName string, t *testing.T) (f *File) { + f, err := CreateTemp(localTmp(), "_Go_"+testName) + if err != nil { + t.Fatalf("TempFile %s: %s", testName, err) + } + return +} + +func newDir(testName string, t *testing.T) (name string) { + name, err := MkdirTemp(localTmp(), "_Go_"+testName) + if err != nil { + t.Fatalf("TempDir %s: %s", testName, err) + } + return +} + +var sfdir = sysdir.name +var sfname = sysdir.files[0] + +func TestStat(t *testing.T) { + t.Parallel() + + path := sfdir + "/" + sfname + dir, err := Stat(path) + if err != nil { + t.Fatal("stat failed:", err) + } + if !equal(sfname, dir.Name()) { + t.Error("name should be ", sfname, "; is", dir.Name()) + } + filesize := size(path, t) + if dir.Size() != filesize { + t.Error("size should be", filesize, "; is", dir.Size()) + } +} + +func TestStatError(t *testing.T) { + defer chtmpdir(t)() + + path := "no-such-file" + + fi, err := Stat(path) + if err == nil { + t.Fatal("got nil, want error") + } + if fi != nil { + t.Errorf("got %v, want nil", fi) + } + if perr, ok := err.(*PathError); !ok { + t.Errorf("got %T, want %T", err, perr) + } + + testenv.MustHaveSymlink(t) + + link := "symlink" + err = Symlink(path, link) + if err != nil { + t.Fatal(err) + } + + fi, err = Stat(link) + if err == nil { + t.Fatal("got nil, want error") + } + if fi != nil { + t.Errorf("got %v, want nil", fi) + } + if perr, ok := err.(*PathError); !ok { + t.Errorf("got %T, want %T", err, perr) + } +} + +func TestStatSymlinkLoop(t *testing.T) { + testenv.MustHaveSymlink(t) + + defer chtmpdir(t)() + + err := Symlink("x", "y") + if err != nil { + t.Fatal(err) + } + defer Remove("y") + + err = Symlink("y", "x") + if err != nil { + t.Fatal(err) + } + defer Remove("x") + + _, err = Stat("x") + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("expected *PathError, got %T: %v\n", err, err) + } +} + +func TestFstat(t *testing.T) { + t.Parallel() + + path := sfdir + "/" + sfname + file, err1 := Open(path) + if err1 != nil { + t.Fatal("open failed:", err1) + } + defer file.Close() + dir, err2 := file.Stat() + if err2 != nil { + t.Fatal("fstat failed:", err2) + } + if !equal(sfname, dir.Name()) { + t.Error("name should be ", sfname, "; is", dir.Name()) + } + filesize := size(path, t) + if dir.Size() != filesize { + t.Error("size should be", filesize, "; is", dir.Size()) + } +} + +func TestLstat(t *testing.T) { + t.Parallel() + + path := sfdir + "/" + sfname + dir, err := Lstat(path) + if err != nil { + t.Fatal("lstat failed:", err) + } + if !equal(sfname, dir.Name()) { + t.Error("name should be ", sfname, "; is", dir.Name()) + } + if dir.Mode()&ModeSymlink == 0 { + filesize := size(path, t) + if dir.Size() != filesize { + t.Error("size should be", filesize, "; is", dir.Size()) + } + } +} + +// Read with length 0 should not return EOF. +func TestRead0(t *testing.T) { + t.Parallel() + + path := sfdir + "/" + sfname + f, err := Open(path) + if err != nil { + t.Fatal("open failed:", err) + } + defer f.Close() + + b := make([]byte, 0) + n, err := f.Read(b) + if n != 0 || err != nil { + t.Errorf("Read(0) = %d, %v, want 0, nil", n, err) + } + b = make([]byte, 100) + n, err = f.Read(b) + if n <= 0 || err != nil { + t.Errorf("Read(100) = %d, %v, want >0, nil", n, err) + } +} + +// Reading a closed file should return ErrClosed error +func TestReadClosed(t *testing.T) { + t.Parallel() + + path := sfdir + "/" + sfname + file, err := Open(path) + if err != nil { + t.Fatal("open failed:", err) + } + file.Close() // close immediately + + b := make([]byte, 100) + _, err = file.Read(b) + + e, ok := err.(*PathError) + if !ok || e.Err != ErrClosed { + t.Fatalf("Read: got %T(%v), want %T(%v)", err, err, e, ErrClosed) + } +} + +func testReaddirnames(dir string, contents []string) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + + file, err := Open(dir) + if err != nil { + t.Fatalf("open %q failed: %v", dir, err) + } + defer file.Close() + s, err2 := file.Readdirnames(-1) + if err2 != nil { + t.Fatalf("Readdirnames %q failed: %v", dir, err2) + } + for _, m := range contents { + found := false + for _, n := range s { + if n == "." || n == ".." { + t.Errorf("got %q in directory", n) + } + if !equal(m, n) { + continue + } + if found { + t.Error("present twice:", m) + } + found = true + } + if !found { + t.Error("could not find", m) + } + } + if s == nil { + t.Error("Readdirnames returned nil instead of empty slice") + } + } +} + +func testReaddir(dir string, contents []string) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + + file, err := Open(dir) + if err != nil { + t.Fatalf("open %q failed: %v", dir, err) + } + defer file.Close() + s, err2 := file.Readdir(-1) + if err2 != nil { + t.Fatalf("Readdir %q failed: %v", dir, err2) + } + for _, m := range contents { + found := false + for _, n := range s { + if n.Name() == "." || n.Name() == ".." { + t.Errorf("got %q in directory", n.Name()) + } + if !equal(m, n.Name()) { + continue + } + if found { + t.Error("present twice:", m) + } + found = true + } + if !found { + t.Error("could not find", m) + } + } + if s == nil { + t.Error("Readdir returned nil instead of empty slice") + } + } +} + +func testReadDir(dir string, contents []string) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + + file, err := Open(dir) + if err != nil { + t.Fatalf("open %q failed: %v", dir, err) + } + defer file.Close() + s, err2 := file.ReadDir(-1) + if err2 != nil { + t.Fatalf("ReadDir %q failed: %v", dir, err2) + } + for _, m := range contents { + found := false + for _, n := range s { + if n.Name() == "." || n.Name() == ".." { + t.Errorf("got %q in directory", n) + } + if !equal(m, n.Name()) { + continue + } + if found { + t.Error("present twice:", m) + } + found = true + lstat, err := Lstat(dir + "/" + m) + if err != nil { + t.Fatal(err) + } + if n.IsDir() != lstat.IsDir() { + t.Errorf("%s: IsDir=%v, want %v", m, n.IsDir(), lstat.IsDir()) + } + if n.Type() != lstat.Mode().Type() { + t.Errorf("%s: IsDir=%v, want %v", m, n.Type(), lstat.Mode().Type()) + } + info, err := n.Info() + if err != nil { + t.Errorf("%s: Info: %v", m, err) + continue + } + if !SameFile(info, lstat) { + t.Errorf("%s: Info: SameFile(info, lstat) = false", m) + } + } + if !found { + t.Error("could not find", m) + } + } + if s == nil { + t.Error("ReadDir returned nil instead of empty slice") + } + } +} + +func TestFileReaddirnames(t *testing.T) { + t.Parallel() + + t.Run(".", testReaddirnames(".", dot)) + t.Run("sysdir", testReaddirnames(sysdir.name, sysdir.files)) + t.Run("TempDir", testReaddirnames(t.TempDir(), nil)) +} + +func TestFileReaddir(t *testing.T) { + t.Parallel() + + t.Run(".", testReaddir(".", dot)) + t.Run("sysdir", testReaddir(sysdir.name, sysdir.files)) + t.Run("TempDir", testReaddir(t.TempDir(), nil)) +} + +func TestFileReadDir(t *testing.T) { + t.Parallel() + + t.Run(".", testReadDir(".", dot)) + t.Run("sysdir", testReadDir(sysdir.name, sysdir.files)) + t.Run("TempDir", testReadDir(t.TempDir(), nil)) +} + +func benchmarkReaddirname(path string, b *testing.B) { + var nentries int + for i := 0; i < b.N; i++ { + f, err := Open(path) + if err != nil { + b.Fatalf("open %q failed: %v", path, err) + } + ns, err := f.Readdirnames(-1) + f.Close() + if err != nil { + b.Fatalf("readdirnames %q failed: %v", path, err) + } + nentries = len(ns) + } + b.Logf("benchmarkReaddirname %q: %d entries", path, nentries) +} + +func benchmarkReaddir(path string, b *testing.B) { + var nentries int + for i := 0; i < b.N; i++ { + f, err := Open(path) + if err != nil { + b.Fatalf("open %q failed: %v", path, err) + } + fs, err := f.Readdir(-1) + f.Close() + if err != nil { + b.Fatalf("readdir %q failed: %v", path, err) + } + nentries = len(fs) + } + b.Logf("benchmarkReaddir %q: %d entries", path, nentries) +} + +func benchmarkReadDir(path string, b *testing.B) { + var nentries int + for i := 0; i < b.N; i++ { + f, err := Open(path) + if err != nil { + b.Fatalf("open %q failed: %v", path, err) + } + fs, err := f.ReadDir(-1) + f.Close() + if err != nil { + b.Fatalf("readdir %q failed: %v", path, err) + } + nentries = len(fs) + } + b.Logf("benchmarkReadDir %q: %d entries", path, nentries) +} + +func BenchmarkReaddirname(b *testing.B) { + benchmarkReaddirname(".", b) +} + +func BenchmarkReaddir(b *testing.B) { + benchmarkReaddir(".", b) +} + +func BenchmarkReadDir(b *testing.B) { + benchmarkReadDir(".", b) +} + +func benchmarkStat(b *testing.B, path string) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Stat(path) + if err != nil { + b.Fatalf("Stat(%q) failed: %v", path, err) + } + } +} + +func benchmarkLstat(b *testing.B, path string) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Lstat(path) + if err != nil { + b.Fatalf("Lstat(%q) failed: %v", path, err) + } + } +} + +func BenchmarkStatDot(b *testing.B) { + benchmarkStat(b, ".") +} + +func BenchmarkStatFile(b *testing.B) { + benchmarkStat(b, filepath.Join(runtime.GOROOT(), "src/os/os_test.go")) +} + +func BenchmarkStatDir(b *testing.B) { + benchmarkStat(b, filepath.Join(runtime.GOROOT(), "src/os")) +} + +func BenchmarkLstatDot(b *testing.B) { + benchmarkLstat(b, ".") +} + +func BenchmarkLstatFile(b *testing.B) { + benchmarkLstat(b, filepath.Join(runtime.GOROOT(), "src/os/os_test.go")) +} + +func BenchmarkLstatDir(b *testing.B) { + benchmarkLstat(b, filepath.Join(runtime.GOROOT(), "src/os")) +} + +// Read the directory one entry at a time. +func smallReaddirnames(file *File, length int, t *testing.T) []string { + names := make([]string, length) + count := 0 + for { + d, err := file.Readdirnames(1) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("readdirnames %q failed: %v", file.Name(), err) + } + if len(d) == 0 { + t.Fatalf("readdirnames %q returned empty slice and no error", file.Name()) + } + names[count] = d[0] + count++ + } + return names[0:count] +} + +// Check that reading a directory one entry at a time gives the same result +// as reading it all at once. +func TestReaddirnamesOneAtATime(t *testing.T) { + t.Parallel() + + // big directory that doesn't change often. + dir := "/usr/bin" + switch runtime.GOOS { + case "android": + dir = "/system/bin" + case "ios", "wasip1": + wd, err := Getwd() + if err != nil { + t.Fatal(err) + } + dir = wd + case "plan9": + dir = "/bin" + case "windows": + dir = Getenv("SystemRoot") + "\\system32" + } + file, err := Open(dir) + if err != nil { + t.Fatalf("open %q failed: %v", dir, err) + } + defer file.Close() + all, err1 := file.Readdirnames(-1) + if err1 != nil { + t.Fatalf("readdirnames %q failed: %v", dir, err1) + } + file1, err2 := Open(dir) + if err2 != nil { + t.Fatalf("open %q failed: %v", dir, err2) + } + defer file1.Close() + small := smallReaddirnames(file1, len(all)+100, t) // +100 in case we screw up + if len(small) < len(all) { + t.Fatalf("len(small) is %d, less than %d", len(small), len(all)) + } + for i, n := range all { + if small[i] != n { + t.Errorf("small read %q mismatch: %v", small[i], n) + } + } +} + +func TestReaddirNValues(t *testing.T) { + if testing.Short() { + t.Skip("test.short; skipping") + } + t.Parallel() + + dir := t.TempDir() + for i := 1; i <= 105; i++ { + f, err := Create(filepath.Join(dir, fmt.Sprintf("%d", i))) + if err != nil { + t.Fatalf("Create: %v", err) + } + f.Write([]byte(strings.Repeat("X", i))) + f.Close() + } + + var d *File + openDir := func() { + var err error + d, err = Open(dir) + if err != nil { + t.Fatalf("Open directory: %v", err) + } + } + + readdirExpect := func(n, want int, wantErr error) { + t.Helper() + fi, err := d.Readdir(n) + if err != wantErr { + t.Fatalf("Readdir of %d got error %v, want %v", n, err, wantErr) + } + if g, e := len(fi), want; g != e { + t.Errorf("Readdir of %d got %d files, want %d", n, g, e) + } + } + + readDirExpect := func(n, want int, wantErr error) { + t.Helper() + de, err := d.ReadDir(n) + if err != wantErr { + t.Fatalf("ReadDir of %d got error %v, want %v", n, err, wantErr) + } + if g, e := len(de), want; g != e { + t.Errorf("ReadDir of %d got %d files, want %d", n, g, e) + } + } + + readdirnamesExpect := func(n, want int, wantErr error) { + t.Helper() + fi, err := d.Readdirnames(n) + if err != wantErr { + t.Fatalf("Readdirnames of %d got error %v, want %v", n, err, wantErr) + } + if g, e := len(fi), want; g != e { + t.Errorf("Readdirnames of %d got %d files, want %d", n, g, e) + } + } + + for _, fn := range []func(int, int, error){readdirExpect, readdirnamesExpect, readDirExpect} { + // Test the slurp case + openDir() + fn(0, 105, nil) + fn(0, 0, nil) + d.Close() + + // Slurp with -1 instead + openDir() + fn(-1, 105, nil) + fn(-2, 0, nil) + fn(0, 0, nil) + d.Close() + + // Test the bounded case + openDir() + fn(1, 1, nil) + fn(2, 2, nil) + fn(105, 102, nil) // and tests buffer >100 case + fn(3, 0, io.EOF) + d.Close() + } +} + +func touch(t *testing.T, name string) { + f, err := Create(name) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +func TestReaddirStatFailures(t *testing.T) { + switch runtime.GOOS { + case "windows", "plan9": + // Windows and Plan 9 already do this correctly, + // but are structured with different syscalls such + // that they don't use Lstat, so the hook below for + // testing it wouldn't work. + t.Skipf("skipping test on %v", runtime.GOOS) + } + + var xerr error // error to return for x + *LstatP = func(path string) (FileInfo, error) { + if xerr != nil && strings.HasSuffix(path, "x") { + return nil, xerr + } + return Lstat(path) + } + defer func() { *LstatP = Lstat }() + + dir := t.TempDir() + touch(t, filepath.Join(dir, "good1")) + touch(t, filepath.Join(dir, "x")) // will disappear or have an error + touch(t, filepath.Join(dir, "good2")) + readDir := func() ([]FileInfo, error) { + d, err := Open(dir) + if err != nil { + t.Fatal(err) + } + defer d.Close() + return d.Readdir(-1) + } + mustReadDir := func(testName string) []FileInfo { + fis, err := readDir() + if err != nil { + t.Fatalf("%s: Readdir: %v", testName, err) + } + return fis + } + names := func(fis []FileInfo) []string { + s := make([]string, len(fis)) + for i, fi := range fis { + s[i] = fi.Name() + } + sort.Strings(s) + return s + } + + if got, want := names(mustReadDir("initial readdir")), + []string{"good1", "good2", "x"}; !reflect.DeepEqual(got, want) { + t.Errorf("initial readdir got %q; want %q", got, want) + } + + xerr = ErrNotExist + if got, want := names(mustReadDir("with x disappearing")), + []string{"good1", "good2"}; !reflect.DeepEqual(got, want) { + t.Errorf("with x disappearing, got %q; want %q", got, want) + } + + xerr = errors.New("some real error") + if _, err := readDir(); err != xerr { + t.Errorf("with a non-ErrNotExist error, got error %v; want %v", err, xerr) + } +} + +// Readdir on a regular file should fail. +func TestReaddirOfFile(t *testing.T) { + t.Parallel() + + f, err := CreateTemp(t.TempDir(), "_Go_ReaddirOfFile") + if err != nil { + t.Fatal(err) + } + f.Write([]byte("foo")) + f.Close() + reg, err := Open(f.Name()) + if err != nil { + t.Fatal(err) + } + defer reg.Close() + + names, err := reg.Readdirnames(-1) + if err == nil { + t.Error("Readdirnames succeeded; want non-nil error") + } + var pe *PathError + if !errors.As(err, &pe) || pe.Path != f.Name() { + t.Errorf("Readdirnames returned %q; want a PathError with path %q", err, f.Name()) + } + if len(names) > 0 { + t.Errorf("unexpected dir names in regular file: %q", names) + } +} + +func TestHardLink(t *testing.T) { + testenv.MustHaveLink(t) + + defer chtmpdir(t)() + from, to := "hardlinktestfrom", "hardlinktestto" + file, err := Create(to) + if err != nil { + t.Fatalf("open %q failed: %v", to, err) + } + if err = file.Close(); err != nil { + t.Errorf("close %q failed: %v", to, err) + } + err = Link(to, from) + if err != nil { + t.Fatalf("link %q, %q failed: %v", to, from, err) + } + + none := "hardlinktestnone" + err = Link(none, none) + // Check the returned error is well-formed. + if lerr, ok := err.(*LinkError); !ok || lerr.Error() == "" { + t.Errorf("link %q, %q failed to return a valid error", none, none) + } + + tostat, err := Stat(to) + if err != nil { + t.Fatalf("stat %q failed: %v", to, err) + } + fromstat, err := Stat(from) + if err != nil { + t.Fatalf("stat %q failed: %v", from, err) + } + if !SameFile(tostat, fromstat) { + t.Errorf("link %q, %q did not create hard link", to, from) + } + // We should not be able to perform the same Link() a second time + err = Link(to, from) + switch err := err.(type) { + case *LinkError: + if err.Op != "link" { + t.Errorf("Link(%q, %q) err.Op = %q; want %q", to, from, err.Op, "link") + } + if err.Old != to { + t.Errorf("Link(%q, %q) err.Old = %q; want %q", to, from, err.Old, to) + } + if err.New != from { + t.Errorf("Link(%q, %q) err.New = %q; want %q", to, from, err.New, from) + } + if !IsExist(err.Err) { + t.Errorf("Link(%q, %q) err.Err = %q; want %q", to, from, err.Err, "file exists error") + } + case nil: + t.Errorf("link %q, %q: expected error, got nil", from, to) + default: + t.Errorf("link %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) + } +} + +// chtmpdir changes the working directory to a new temporary directory and +// provides a cleanup function. +func chtmpdir(t *testing.T) func() { + oldwd, err := Getwd() + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + d, err := MkdirTemp("", "test") + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + if err := Chdir(d); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + return func() { + if err := Chdir(oldwd); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + RemoveAll(d) + } +} + +func TestSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + defer chtmpdir(t)() + from, to := "symlinktestfrom", "symlinktestto" + file, err := Create(to) + if err != nil { + t.Fatalf("Create(%q) failed: %v", to, err) + } + if err = file.Close(); err != nil { + t.Errorf("Close(%q) failed: %v", to, err) + } + err = Symlink(to, from) + if err != nil { + t.Fatalf("Symlink(%q, %q) failed: %v", to, from, err) + } + tostat, err := Lstat(to) + if err != nil { + t.Fatalf("Lstat(%q) failed: %v", to, err) + } + if tostat.Mode()&ModeSymlink != 0 { + t.Fatalf("Lstat(%q).Mode()&ModeSymlink = %v, want 0", to, tostat.Mode()&ModeSymlink) + } + fromstat, err := Stat(from) + if err != nil { + t.Fatalf("Stat(%q) failed: %v", from, err) + } + if !SameFile(tostat, fromstat) { + t.Errorf("Symlink(%q, %q) did not create symlink", to, from) + } + fromstat, err = Lstat(from) + if err != nil { + t.Fatalf("Lstat(%q) failed: %v", from, err) + } + if fromstat.Mode()&ModeSymlink == 0 { + t.Fatalf("Lstat(%q).Mode()&ModeSymlink = 0, want %v", from, ModeSymlink) + } + fromstat, err = Stat(from) + if err != nil { + t.Fatalf("Stat(%q) failed: %v", from, err) + } + if fromstat.Name() != from { + t.Errorf("Stat(%q).Name() = %q, want %q", from, fromstat.Name(), from) + } + if fromstat.Mode()&ModeSymlink != 0 { + t.Fatalf("Stat(%q).Mode()&ModeSymlink = %v, want 0", from, fromstat.Mode()&ModeSymlink) + } + s, err := Readlink(from) + if err != nil { + t.Fatalf("Readlink(%q) failed: %v", from, err) + } + if s != to { + t.Fatalf("Readlink(%q) = %q, want %q", from, s, to) + } + file, err = Open(from) + if err != nil { + t.Fatalf("Open(%q) failed: %v", from, err) + } + file.Close() +} + +func TestLongSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + defer chtmpdir(t)() + s := "0123456789abcdef" + // Long, but not too long: a common limit is 255. + s = s + s + s + s + s + s + s + s + s + s + s + s + s + s + s + from := "longsymlinktestfrom" + err := Symlink(s, from) + if err != nil { + t.Fatalf("symlink %q, %q failed: %v", s, from, err) + } + r, err := Readlink(from) + if err != nil { + t.Fatalf("readlink %q failed: %v", from, err) + } + if r != s { + t.Fatalf("after symlink %q != %q", r, s) + } +} + +func TestRename(t *testing.T) { + defer chtmpdir(t)() + from, to := "renamefrom", "renameto" + + file, err := Create(from) + if err != nil { + t.Fatalf("open %q failed: %v", from, err) + } + if err = file.Close(); err != nil { + t.Errorf("close %q failed: %v", from, err) + } + err = Rename(from, to) + if err != nil { + t.Fatalf("rename %q, %q failed: %v", to, from, err) + } + _, err = Stat(to) + if err != nil { + t.Errorf("stat %q failed: %v", to, err) + } +} + +func TestRenameOverwriteDest(t *testing.T) { + defer chtmpdir(t)() + from, to := "renamefrom", "renameto" + + toData := []byte("to") + fromData := []byte("from") + + err := WriteFile(to, toData, 0777) + if err != nil { + t.Fatalf("write file %q failed: %v", to, err) + } + + err = WriteFile(from, fromData, 0777) + if err != nil { + t.Fatalf("write file %q failed: %v", from, err) + } + err = Rename(from, to) + if err != nil { + t.Fatalf("rename %q, %q failed: %v", to, from, err) + } + + _, err = Stat(from) + if err == nil { + t.Errorf("from file %q still exists", from) + } + if err != nil && !IsNotExist(err) { + t.Fatalf("stat from: %v", err) + } + toFi, err := Stat(to) + if err != nil { + t.Fatalf("stat %q failed: %v", to, err) + } + if toFi.Size() != int64(len(fromData)) { + t.Errorf(`"to" size = %d; want %d (old "from" size)`, toFi.Size(), len(fromData)) + } +} + +func TestRenameFailed(t *testing.T) { + defer chtmpdir(t)() + from, to := "renamefrom", "renameto" + + err := Rename(from, to) + switch err := err.(type) { + case *LinkError: + if err.Op != "rename" { + t.Errorf("rename %q, %q: err.Op: want %q, got %q", from, to, "rename", err.Op) + } + if err.Old != from { + t.Errorf("rename %q, %q: err.Old: want %q, got %q", from, to, from, err.Old) + } + if err.New != to { + t.Errorf("rename %q, %q: err.New: want %q, got %q", from, to, to, err.New) + } + case nil: + t.Errorf("rename %q, %q: expected error, got nil", from, to) + default: + t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) + } +} + +func TestRenameNotExisting(t *testing.T) { + defer chtmpdir(t)() + from, to := "doesnt-exist", "dest" + + Mkdir(to, 0777) + + if err := Rename(from, to); !IsNotExist(err) { + t.Errorf("Rename(%q, %q) = %v; want an IsNotExist error", from, to, err) + } +} + +func TestRenameToDirFailed(t *testing.T) { + defer chtmpdir(t)() + from, to := "renamefrom", "renameto" + + Mkdir(from, 0777) + Mkdir(to, 0777) + + err := Rename(from, to) + switch err := err.(type) { + case *LinkError: + if err.Op != "rename" { + t.Errorf("rename %q, %q: err.Op: want %q, got %q", from, to, "rename", err.Op) + } + if err.Old != from { + t.Errorf("rename %q, %q: err.Old: want %q, got %q", from, to, from, err.Old) + } + if err.New != to { + t.Errorf("rename %q, %q: err.New: want %q, got %q", from, to, to, err.New) + } + case nil: + t.Errorf("rename %q, %q: expected error, got nil", from, to) + default: + t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) + } +} + +func TestRenameCaseDifference(pt *testing.T) { + from, to := "renameFROM", "RENAMEfrom" + tests := []struct { + name string + create func() error + }{ + {"dir", func() error { + return Mkdir(from, 0777) + }}, + {"file", func() error { + fd, err := Create(from) + if err != nil { + return err + } + return fd.Close() + }}, + } + + for _, test := range tests { + pt.Run(test.name, func(t *testing.T) { + defer chtmpdir(t)() + + if err := test.create(); err != nil { + t.Fatalf("failed to create test file: %s", err) + } + + if _, err := Stat(to); err != nil { + // Sanity check that the underlying filesystem is not case sensitive. + if IsNotExist(err) { + t.Skipf("case sensitive filesystem") + } + t.Fatalf("stat %q, got: %q", to, err) + } + + if err := Rename(from, to); err != nil { + t.Fatalf("unexpected error when renaming from %q to %q: %s", from, to, err) + } + + fd, err := Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + + // Stat does not return the real case of the file (it returns what the called asked for) + // So we have to use readdir to get the real name of the file. + dirNames, err := fd.Readdirnames(-1) + fd.Close() + if err != nil { + t.Fatalf("readdirnames: %s", err) + } + + if dirNamesLen := len(dirNames); dirNamesLen != 1 { + t.Fatalf("unexpected dirNames len, got %q, want %q", dirNamesLen, 1) + } + + if dirNames[0] != to { + t.Errorf("unexpected name, got %q, want %q", dirNames[0], to) + } + }) + } +} + +func testStartProcess(dir, cmd string, args []string, expect string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + r, w, err := Pipe() + if err != nil { + t.Fatalf("Pipe: %v", err) + } + defer r.Close() + attr := &ProcAttr{Dir: dir, Files: []*File{nil, w, Stderr}} + p, err := StartProcess(cmd, args, attr) + if err != nil { + t.Fatalf("StartProcess: %v", err) + } + w.Close() + + var b strings.Builder + io.Copy(&b, r) + output := b.String() + + fi1, _ := Stat(strings.TrimSpace(output)) + fi2, _ := Stat(expect) + if !SameFile(fi1, fi2) { + t.Errorf("exec %q returned %q wanted %q", + strings.Join(append([]string{cmd}, args...), " "), output, expect) + } + p.Wait() + } +} + +func TestStartProcess(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + var dir, cmd string + var args []string + switch runtime.GOOS { + case "android": + t.Skip("android doesn't have /bin/pwd") + case "windows": + cmd = Getenv("COMSPEC") + dir = Getenv("SystemRoot") + args = []string{"/c", "cd"} + default: + var err error + cmd, err = exec.LookPath("pwd") + if err != nil { + t.Fatalf("Can't find pwd: %v", err) + } + dir = "/" + args = []string{} + t.Logf("Testing with %v", cmd) + } + cmddir, cmdbase := filepath.Split(cmd) + args = append([]string{cmdbase}, args...) + t.Run("absolute", testStartProcess(dir, cmd, args, dir)) + t.Run("relative", testStartProcess(cmddir, cmdbase, args, cmddir)) +} + +func checkMode(t *testing.T, path string, mode FileMode) { + dir, err := Stat(path) + if err != nil { + t.Fatalf("Stat %q (looking for mode %#o): %s", path, mode, err) + } + if dir.Mode()&ModePerm != mode { + t.Errorf("Stat %q: mode %#o want %#o", path, dir.Mode(), mode) + } +} + +func TestChmod(t *testing.T) { + // Chmod is not supported on wasip1. + if runtime.GOOS == "wasip1" { + t.Skip("Chmod is not supported on " + runtime.GOOS) + } + t.Parallel() + + f := newFile("TestChmod", t) + defer Remove(f.Name()) + defer f.Close() + // Creation mode is read write + + fm := FileMode(0456) + if runtime.GOOS == "windows" { + fm = FileMode(0444) // read-only file + } + if err := Chmod(f.Name(), fm); err != nil { + t.Fatalf("chmod %s %#o: %s", f.Name(), fm, err) + } + checkMode(t, f.Name(), fm) + + fm = FileMode(0123) + if runtime.GOOS == "windows" { + fm = FileMode(0666) // read-write file + } + if err := f.Chmod(fm); err != nil { + t.Fatalf("chmod %s %#o: %s", f.Name(), fm, err) + } + checkMode(t, f.Name(), fm) +} + +func checkSize(t *testing.T, f *File, size int64) { + t.Helper() + dir, err := f.Stat() + if err != nil { + t.Fatalf("Stat %q (looking for size %d): %s", f.Name(), size, err) + } + if dir.Size() != size { + t.Errorf("Stat %q: size %d want %d", f.Name(), dir.Size(), size) + } +} + +func TestFTruncate(t *testing.T) { + t.Parallel() + + f := newFile("TestFTruncate", t) + defer Remove(f.Name()) + defer f.Close() + + checkSize(t, f, 0) + f.Write([]byte("hello, world\n")) + checkSize(t, f, 13) + f.Truncate(10) + checkSize(t, f, 10) + f.Truncate(1024) + checkSize(t, f, 1024) + f.Truncate(0) + checkSize(t, f, 0) + _, err := f.Write([]byte("surprise!")) + if err == nil { + checkSize(t, f, 13+9) // wrote at offset past where hello, world was. + } +} + +func TestTruncate(t *testing.T) { + t.Parallel() + + f := newFile("TestTruncate", t) + defer Remove(f.Name()) + defer f.Close() + + checkSize(t, f, 0) + f.Write([]byte("hello, world\n")) + checkSize(t, f, 13) + Truncate(f.Name(), 10) + checkSize(t, f, 10) + Truncate(f.Name(), 1024) + checkSize(t, f, 1024) + Truncate(f.Name(), 0) + checkSize(t, f, 0) + _, err := f.Write([]byte("surprise!")) + if err == nil { + checkSize(t, f, 13+9) // wrote at offset past where hello, world was. + } +} + +func TestTruncateNonexistentFile(t *testing.T) { + t.Parallel() + + assertPathError := func(t testing.TB, path string, err error) { + t.Helper() + if pe, ok := err.(*PathError); !ok || !IsNotExist(err) || pe.Path != path { + t.Errorf("got error: %v\nwant an ErrNotExist PathError with path %q", err, path) + } + } + + path := filepath.Join(t.TempDir(), "nonexistent") + + err := Truncate(path, 1) + assertPathError(t, path, err) + + // Truncate shouldn't create any new file. + _, err = Stat(path) + assertPathError(t, path, err) +} + +// Use TempDir (via newFile) to make sure we're on a local file system, +// so that timings are not distorted by latency and caching. +// On NFS, timings can be off due to caching of meta-data on +// NFS servers (Issue 848). +func TestChtimes(t *testing.T) { + t.Parallel() + + f := newFile("TestChtimes", t) + defer Remove(f.Name()) + + f.Write([]byte("hello, world\n")) + f.Close() + + testChtimes(t, f.Name()) +} + +func TestChtimesWithZeroTimes(t *testing.T) { + file := newFile("chtimes-with-zero", t) + _, err := file.Write([]byte("hello, world\n")) + if err != nil { + t.Fatalf("Write: %s", err) + } + fName := file.Name() + defer Remove(file.Name()) + err = file.Close() + if err != nil { + t.Errorf("%v", err) + } + fs, err := Stat(fName) + if err != nil { + t.Fatal(err) + } + startAtime := Atime(fs) + startMtime := fs.ModTime() + switch runtime.GOOS { + case "js": + startAtime = startAtime.Truncate(time.Second) + startMtime = startMtime.Truncate(time.Second) + } + at0 := startAtime + mt0 := startMtime + t0 := startMtime.Truncate(time.Second).Add(1 * time.Hour) + + tests := []struct { + aTime time.Time + mTime time.Time + wantATime time.Time + wantMTime time.Time + }{ + { + aTime: time.Time{}, + mTime: time.Time{}, + wantATime: startAtime, + wantMTime: startMtime, + }, + { + aTime: t0.Add(200 * time.Second), + mTime: time.Time{}, + wantATime: t0.Add(200 * time.Second), + wantMTime: startMtime, + }, + { + aTime: time.Time{}, + mTime: t0.Add(100 * time.Second), + wantATime: t0.Add(200 * time.Second), + wantMTime: t0.Add(100 * time.Second), + }, + { + aTime: t0.Add(300 * time.Second), + mTime: t0.Add(100 * time.Second), + wantATime: t0.Add(300 * time.Second), + wantMTime: t0.Add(100 * time.Second), + }, + } + + for _, tt := range tests { + // Now change the times accordingly. + if err := Chtimes(fName, tt.aTime, tt.mTime); err != nil { + t.Error(err) + } + + // Finally verify the expectations. + fs, err = Stat(fName) + if err != nil { + t.Error(err) + } + at0 = Atime(fs) + mt0 = fs.ModTime() + + if got, want := at0, tt.wantATime; !got.Equal(want) { + errormsg := fmt.Sprintf("AccessTime mismatch with values ATime:%q-MTime:%q\ngot: %q\nwant: %q", tt.aTime, tt.mTime, got, want) + switch runtime.GOOS { + case "plan9": + // Mtime is the time of the last change of + // content. Similarly, atime is set whenever + // the contents are accessed; also, it is set + // whenever mtime is set. + case "windows": + t.Error(errormsg) + default: // unix's + if got, want := at0, tt.wantATime; !got.Equal(want) { + mounts, err := ReadFile("/bin/mounts") + if err != nil { + mounts, err = ReadFile("/etc/mtab") + } + if strings.Contains(string(mounts), "noatime") { + t.Log(errormsg) + t.Log("A filesystem is mounted with noatime; ignoring.") + } else { + switch runtime.GOOS { + case "netbsd", "dragonfly": + // On a 64-bit implementation, birth time is generally supported and cannot be changed. + // When supported, atime update is restricted and depends on the file system and on the + // OS configuration. + if strings.Contains(runtime.GOARCH, "64") { + t.Log(errormsg) + t.Log("Filesystem might not support atime changes; ignoring.") + } + default: + t.Error(errormsg) + } + } + } + } + } + if got, want := mt0, tt.wantMTime; !got.Equal(want) { + errormsg := fmt.Sprintf("ModTime mismatch with values ATime:%q-MTime:%q\ngot: %q\nwant: %q", tt.aTime, tt.mTime, got, want) + switch runtime.GOOS { + case "dragonfly": + t.Log(errormsg) + t.Log("Mtime is always updated; ignoring.") + default: + t.Error(errormsg) + } + } + } +} + +// Use TempDir (via newDir) to make sure we're on a local file system, +// so that timings are not distorted by latency and caching. +// On NFS, timings can be off due to caching of meta-data on +// NFS servers (Issue 848). +func TestChtimesDir(t *testing.T) { + t.Parallel() + + name := newDir("TestChtimes", t) + defer RemoveAll(name) + + testChtimes(t, name) +} + +func testChtimes(t *testing.T, name string) { + st, err := Stat(name) + if err != nil { + t.Fatalf("Stat %s: %s", name, err) + } + preStat := st + + // Move access and modification time back a second + at := Atime(preStat) + mt := preStat.ModTime() + err = Chtimes(name, at.Add(-time.Second), mt.Add(-time.Second)) + if err != nil { + t.Fatalf("Chtimes %s: %s", name, err) + } + + st, err = Stat(name) + if err != nil { + t.Fatalf("second Stat %s: %s", name, err) + } + postStat := st + + pat := Atime(postStat) + pmt := postStat.ModTime() + if !pat.Before(at) { + switch runtime.GOOS { + case "plan9": + // Mtime is the time of the last change of + // content. Similarly, atime is set whenever + // the contents are accessed; also, it is set + // whenever mtime is set. + case "netbsd": + mounts, _ := ReadFile("/proc/mounts") + if strings.Contains(string(mounts), "noatime") { + t.Logf("AccessTime didn't go backwards, but see a filesystem mounted noatime; ignoring. Issue 19293.") + } else { + t.Logf("AccessTime didn't go backwards; was=%v, after=%v (Ignoring on NetBSD, assuming noatime, Issue 19293)", at, pat) + } + default: + t.Errorf("AccessTime didn't go backwards; was=%v, after=%v", at, pat) + } + } + + if !pmt.Before(mt) { + t.Errorf("ModTime didn't go backwards; was=%v, after=%v", mt, pmt) + } +} + +func TestChtimesToUnixZero(t *testing.T) { + file := newFile("chtimes-to-unix-zero", t) + fn := file.Name() + defer Remove(fn) + if _, err := file.Write([]byte("hi")); err != nil { + t.Fatal(err) + } + if err := file.Close(); err != nil { + t.Fatal(err) + } + + unixZero := time.Unix(0, 0) + if err := Chtimes(fn, unixZero, unixZero); err != nil { + t.Fatalf("Chtimes failed: %v", err) + } + + st, err := Stat(fn) + if err != nil { + t.Fatal(err) + } + + if mt := st.ModTime(); mt != unixZero { + t.Errorf("mtime is %v, want %v", mt, unixZero) + } +} + +func TestFileChdir(t *testing.T) { + wd, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + defer Chdir(wd) + + fd, err := Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + defer fd.Close() + + if err := Chdir("/"); err != nil { + t.Fatalf("Chdir /: %s", err) + } + + if err := fd.Chdir(); err != nil { + t.Fatalf("fd.Chdir: %s", err) + } + + wdNew, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + + wdInfo, err := fd.Stat() + if err != nil { + t.Fatal(err) + } + newInfo, err := Stat(wdNew) + if err != nil { + t.Fatal(err) + } + if !SameFile(wdInfo, newInfo) { + t.Fatalf("fd.Chdir failed: got %s, want %s", wdNew, wd) + } +} + +func TestChdirAndGetwd(t *testing.T) { + fd, err := Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + // These are chosen carefully not to be symlinks on a Mac + // (unlike, say, /var, /etc), except /tmp, which we handle below. + dirs := []string{"/", "/usr/bin", "/tmp"} + // /usr/bin does not usually exist on Plan 9 or Android. + switch runtime.GOOS { + case "android": + dirs = []string{"/system/bin"} + case "plan9": + dirs = []string{"/", "/usr"} + case "ios", "windows", "wasip1": + dirs = nil + for _, dir := range []string{t.TempDir(), t.TempDir()} { + // Expand symlinks so path equality tests work. + dir, err = filepath.EvalSymlinks(dir) + if err != nil { + t.Fatalf("EvalSymlinks: %v", err) + } + dirs = append(dirs, dir) + } + } + oldwd := Getenv("PWD") + for mode := 0; mode < 2; mode++ { + for _, d := range dirs { + if mode == 0 { + err = Chdir(d) + } else { + fd1, err1 := Open(d) + if err1 != nil { + t.Errorf("Open %s: %s", d, err1) + continue + } + err = fd1.Chdir() + fd1.Close() + } + if d == "/tmp" { + Setenv("PWD", "/tmp") + } + pwd, err1 := Getwd() + Setenv("PWD", oldwd) + err2 := fd.Chdir() + if err2 != nil { + // We changed the current directory and cannot go back. + // Don't let the tests continue; they'll scribble + // all over some other directory. + fmt.Fprintf(Stderr, "fchdir back to dot failed: %s\n", err2) + Exit(1) + } + if err != nil { + fd.Close() + t.Fatalf("Chdir %s: %s", d, err) + } + if err1 != nil { + fd.Close() + t.Fatalf("Getwd in %s: %s", d, err1) + } + if !equal(pwd, d) { + fd.Close() + t.Fatalf("Getwd returned %q want %q", pwd, d) + } + } + } + fd.Close() +} + +// Test that Chdir+Getwd is program-wide. +func TestProgWideChdir(t *testing.T) { + const N = 10 + var wg sync.WaitGroup + hold := make(chan struct{}) + done := make(chan struct{}) + + d := t.TempDir() + oldwd, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %v", err) + } + defer func() { + if err := Chdir(oldwd); err != nil { + // It's not safe to continue with tests if we can't get back to + // the original working directory. + panic(err) + } + }() + + // Note the deferred Wait must be called after the deferred close(done), + // to ensure the N goroutines have been released even if the main goroutine + // calls Fatalf. It must be called before the Chdir back to the original + // directory, and before the deferred deletion implied by TempDir, + // so as not to interfere while the N goroutines are still running. + defer wg.Wait() + defer close(done) + + for i := 0; i < N; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + // Lock half the goroutines in their own operating system + // thread to exercise more scheduler possibilities. + if i%2 == 1 { + // On Plan 9, after calling LockOSThread, the goroutines + // run on different processes which don't share the working + // directory. This used to be an issue because Go expects + // the working directory to be program-wide. + // See issue 9428. + runtime.LockOSThread() + } + select { + case <-done: + return + case <-hold: + } + // Getwd might be wrong + f0, err := Stat(".") + if err != nil { + t.Error(err) + return + } + pwd, err := Getwd() + if err != nil { + t.Errorf("Getwd: %v", err) + return + } + if pwd != d { + t.Errorf("Getwd() = %q, want %q", pwd, d) + return + } + f1, err := Stat(pwd) + if err != nil { + t.Error(err) + return + } + if !SameFile(f0, f1) { + t.Errorf(`Samefile(Stat("."), Getwd()) reports false (%s != %s)`, f0.Name(), f1.Name()) + return + } + }(i) + } + if err = Chdir(d); err != nil { + t.Fatalf("Chdir: %v", err) + } + // OS X sets TMPDIR to a symbolic link. + // So we resolve our working directory again before the test. + d, err = Getwd() + if err != nil { + t.Fatalf("Getwd: %v", err) + } + close(hold) + wg.Wait() +} + +func TestSeek(t *testing.T) { + t.Parallel() + + f := newFile("TestSeek", t) + defer Remove(f.Name()) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + type test struct { + in int64 + whence int + out int64 + } + var tests = []test{ + {0, io.SeekCurrent, int64(len(data))}, + {0, io.SeekStart, 0}, + {5, io.SeekStart, 5}, + {0, io.SeekEnd, int64(len(data))}, + {0, io.SeekStart, 0}, + {-1, io.SeekEnd, int64(len(data)) - 1}, + {1 << 33, io.SeekStart, 1 << 33}, + {1 << 33, io.SeekEnd, 1<<33 + int64(len(data))}, + + // Issue 21681, Windows 4G-1, etc: + {1<<32 - 1, io.SeekStart, 1<<32 - 1}, + {0, io.SeekCurrent, 1<<32 - 1}, + {2<<32 - 1, io.SeekStart, 2<<32 - 1}, + {0, io.SeekCurrent, 2<<32 - 1}, + } + for i, tt := range tests { + off, err := f.Seek(tt.in, tt.whence) + if off != tt.out || err != nil { + if e, ok := err.(*PathError); ok && e.Err == syscall.EINVAL && tt.out > 1<<32 && runtime.GOOS == "linux" { + mounts, _ := ReadFile("/proc/mounts") + if strings.Contains(string(mounts), "reiserfs") { + // Reiserfs rejects the big seeks. + t.Skipf("skipping test known to fail on reiserfs; https://golang.org/issue/91") + } + } + t.Errorf("#%d: Seek(%v, %v) = %v, %v want %v, nil", i, tt.in, tt.whence, off, err, tt.out) + } + } +} + +func TestSeekError(t *testing.T) { + switch runtime.GOOS { + case "js", "plan9", "wasip1": + t.Skipf("skipping test on %v", runtime.GOOS) + } + t.Parallel() + + r, w, err := Pipe() + if err != nil { + t.Fatal(err) + } + _, err = r.Seek(0, 0) + if err == nil { + t.Fatal("Seek on pipe should fail") + } + if perr, ok := err.(*PathError); !ok || perr.Err != syscall.ESPIPE { + t.Errorf("Seek returned error %v, want &PathError{Err: syscall.ESPIPE}", err) + } + _, err = w.Seek(0, 0) + if err == nil { + t.Fatal("Seek on pipe should fail") + } + if perr, ok := err.(*PathError); !ok || perr.Err != syscall.ESPIPE { + t.Errorf("Seek returned error %v, want &PathError{Err: syscall.ESPIPE}", err) + } +} + +type openErrorTest struct { + path string + mode int + error error +} + +var openErrorTests = []openErrorTest{ + { + sfdir + "/no-such-file", + O_RDONLY, + syscall.ENOENT, + }, + { + sfdir, + O_WRONLY, + syscall.EISDIR, + }, + { + sfdir + "/" + sfname + "/no-such-file", + O_WRONLY, + syscall.ENOTDIR, + }, +} + +func TestOpenError(t *testing.T) { + t.Parallel() + + for _, tt := range openErrorTests { + f, err := OpenFile(tt.path, tt.mode, 0) + if err == nil { + t.Errorf("Open(%q, %d) succeeded", tt.path, tt.mode) + f.Close() + continue + } + perr, ok := err.(*PathError) + if !ok { + t.Errorf("Open(%q, %d) returns error of %T type; want *PathError", tt.path, tt.mode, err) + } + if perr.Err != tt.error { + if runtime.GOOS == "plan9" { + syscallErrStr := perr.Err.Error() + expectedErrStr := strings.Replace(tt.error.Error(), "file ", "", 1) + if !strings.HasSuffix(syscallErrStr, expectedErrStr) { + // Some Plan 9 file servers incorrectly return + // EACCES rather than EISDIR when a directory is + // opened for write. + if tt.error == syscall.EISDIR && strings.HasSuffix(syscallErrStr, syscall.EACCES.Error()) { + continue + } + t.Errorf("Open(%q, %d) = _, %q; want suffix %q", tt.path, tt.mode, syscallErrStr, expectedErrStr) + } + continue + } + if runtime.GOOS == "dragonfly" { + // DragonFly incorrectly returns EACCES rather + // EISDIR when a directory is opened for write. + if tt.error == syscall.EISDIR && perr.Err == syscall.EACCES { + continue + } + } + t.Errorf("Open(%q, %d) = _, %q; want %q", tt.path, tt.mode, perr.Err.Error(), tt.error.Error()) + } + } +} + +func TestOpenNoName(t *testing.T) { + f, err := Open("") + if err == nil { + f.Close() + t.Fatal(`Open("") succeeded`) + } +} + +func runBinHostname(t *testing.T) string { + // Run /bin/hostname and collect output. + r, w, err := Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + path, err := exec.LookPath("hostname") + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + t.Skip("skipping test; test requires hostname but it does not exist") + } + t.Fatal(err) + } + + argv := []string{"hostname"} + if runtime.GOOS == "aix" { + argv = []string{"hostname", "-s"} + } + p, err := StartProcess(path, argv, &ProcAttr{Files: []*File{nil, w, Stderr}}) + if err != nil { + t.Fatal(err) + } + w.Close() + + var b strings.Builder + io.Copy(&b, r) + _, err = p.Wait() + if err != nil { + t.Fatalf("run hostname Wait: %v", err) + } + err = p.Kill() + if err == nil { + t.Errorf("expected an error from Kill running 'hostname'") + } + output := b.String() + if n := len(output); n > 0 && output[n-1] == '\n' { + output = output[0 : n-1] + } + if output == "" { + t.Fatalf("/bin/hostname produced no output") + } + + return output +} + +func testWindowsHostname(t *testing.T, hostname string) { + cmd := testenv.Command(t, "hostname") + out, err := cmd.Output() + if err != nil { + t.Fatalf("Failed to execute hostname command: %v %s", err, out) + } + want := strings.Trim(string(out), "\r\n") + if hostname != want { + t.Fatalf("Hostname() = %q != system hostname of %q", hostname, want) + } +} + +func TestHostname(t *testing.T) { + t.Parallel() + + hostname, err := Hostname() + if err != nil { + t.Fatal(err) + } + if hostname == "" { + t.Fatal("Hostname returned empty string and no error") + } + if strings.Contains(hostname, "\x00") { + t.Fatalf("unexpected zero byte in hostname: %q", hostname) + } + + // There is no other way to fetch hostname on windows, but via winapi. + // On Plan 9 it can be taken from #c/sysname as Hostname() does. + switch runtime.GOOS { + case "android", "plan9": + // No /bin/hostname to verify against. + return + case "windows": + testWindowsHostname(t, hostname) + return + } + + testenv.MustHaveExec(t) + + // Check internal Hostname() against the output of /bin/hostname. + // Allow that the internal Hostname returns a Fully Qualified Domain Name + // and the /bin/hostname only returns the first component + want := runBinHostname(t) + if hostname != want { + host, _, ok := strings.Cut(hostname, ".") + if !ok || host != want { + t.Errorf("Hostname() = %q, want %q", hostname, want) + } + } +} + +func TestReadAt(t *testing.T) { + t.Parallel() + + f := newFile("TestReadAt", t) + defer Remove(f.Name()) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + b := make([]byte, 5) + n, err := f.ReadAt(b, 7) + if err != nil || n != len(b) { + t.Fatalf("ReadAt 7: %d, %v", n, err) + } + if string(b) != "world" { + t.Fatalf("ReadAt 7: have %q want %q", string(b), "world") + } +} + +// Verify that ReadAt doesn't affect seek offset. +// In the Plan 9 kernel, there used to be a bug in the implementation of +// the pread syscall, where the channel offset was erroneously updated after +// calling pread on a file. +func TestReadAtOffset(t *testing.T) { + t.Parallel() + + f := newFile("TestReadAtOffset", t) + defer Remove(f.Name()) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + f.Seek(0, 0) + b := make([]byte, 5) + + n, err := f.ReadAt(b, 7) + if err != nil || n != len(b) { + t.Fatalf("ReadAt 7: %d, %v", n, err) + } + if string(b) != "world" { + t.Fatalf("ReadAt 7: have %q want %q", string(b), "world") + } + + n, err = f.Read(b) + if err != nil || n != len(b) { + t.Fatalf("Read: %d, %v", n, err) + } + if string(b) != "hello" { + t.Fatalf("Read: have %q want %q", string(b), "hello") + } +} + +// Verify that ReadAt doesn't allow negative offset. +func TestReadAtNegativeOffset(t *testing.T) { + t.Parallel() + + f := newFile("TestReadAtNegativeOffset", t) + defer Remove(f.Name()) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + f.Seek(0, 0) + b := make([]byte, 5) + + n, err := f.ReadAt(b, -10) + + const wantsub = "negative offset" + if !strings.Contains(fmt.Sprint(err), wantsub) || n != 0 { + t.Errorf("ReadAt(-10) = %v, %v; want 0, ...%q...", n, err, wantsub) + } +} + +func TestWriteAt(t *testing.T) { + t.Parallel() + + f := newFile("TestWriteAt", t) + defer Remove(f.Name()) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + n, err := f.WriteAt([]byte("WORLD"), 7) + if err != nil || n != 5 { + t.Fatalf("WriteAt 7: %d, %v", n, err) + } + + b, err := ReadFile(f.Name()) + if err != nil { + t.Fatalf("ReadFile %s: %v", f.Name(), err) + } + if string(b) != "hello, WORLD\n" { + t.Fatalf("after write: have %q want %q", string(b), "hello, WORLD\n") + } +} + +// Verify that WriteAt doesn't allow negative offset. +func TestWriteAtNegativeOffset(t *testing.T) { + t.Parallel() + + f := newFile("TestWriteAtNegativeOffset", t) + defer Remove(f.Name()) + defer f.Close() + + n, err := f.WriteAt([]byte("WORLD"), -10) + + const wantsub = "negative offset" + if !strings.Contains(fmt.Sprint(err), wantsub) || n != 0 { + t.Errorf("WriteAt(-10) = %v, %v; want 0, ...%q...", n, err, wantsub) + } +} + +// Verify that WriteAt doesn't work in append mode. +func TestWriteAtInAppendMode(t *testing.T) { + defer chtmpdir(t)() + f, err := OpenFile("write_at_in_append_mode.txt", O_APPEND|O_CREATE, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + _, err = f.WriteAt([]byte(""), 1) + if err != ErrWriteAtInAppendMode { + t.Fatalf("f.WriteAt returned %v, expected %v", err, ErrWriteAtInAppendMode) + } +} + +func writeFile(t *testing.T, fname string, flag int, text string) string { + f, err := OpenFile(fname, flag, 0666) + if err != nil { + t.Fatalf("Open: %v", err) + } + n, err := io.WriteString(f, text) + if err != nil { + t.Fatalf("WriteString: %d, %v", n, err) + } + f.Close() + data, err := ReadFile(fname) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + return string(data) +} + +func TestAppend(t *testing.T) { + defer chtmpdir(t)() + const f = "append.txt" + s := writeFile(t, f, O_CREATE|O_TRUNC|O_RDWR, "new") + if s != "new" { + t.Fatalf("writeFile: have %q want %q", s, "new") + } + s = writeFile(t, f, O_APPEND|O_RDWR, "|append") + if s != "new|append" { + t.Fatalf("writeFile: have %q want %q", s, "new|append") + } + s = writeFile(t, f, O_CREATE|O_APPEND|O_RDWR, "|append") + if s != "new|append|append" { + t.Fatalf("writeFile: have %q want %q", s, "new|append|append") + } + err := Remove(f) + if err != nil { + t.Fatalf("Remove: %v", err) + } + s = writeFile(t, f, O_CREATE|O_APPEND|O_RDWR, "new&append") + if s != "new&append" { + t.Fatalf("writeFile: after append have %q want %q", s, "new&append") + } + s = writeFile(t, f, O_CREATE|O_RDWR, "old") + if s != "old&append" { + t.Fatalf("writeFile: after create have %q want %q", s, "old&append") + } + s = writeFile(t, f, O_CREATE|O_TRUNC|O_RDWR, "new") + if s != "new" { + t.Fatalf("writeFile: after truncate have %q want %q", s, "new") + } +} + +func TestStatDirWithTrailingSlash(t *testing.T) { + t.Parallel() + + // Create new temporary directory and arrange to clean it up. + path := t.TempDir() + + // Stat of path should succeed. + if _, err := Stat(path); err != nil { + t.Fatalf("stat %s failed: %s", path, err) + } + + // Stat of path+"/" should succeed too. + path += "/" + if _, err := Stat(path); err != nil { + t.Fatalf("stat %s failed: %s", path, err) + } +} + +func TestNilProcessStateString(t *testing.T) { + var ps *ProcessState + s := ps.String() + if s != "" { + t.Errorf("(*ProcessState)(nil).String() = %q, want %q", s, "") + } +} + +func TestSameFile(t *testing.T) { + defer chtmpdir(t)() + fa, err := Create("a") + if err != nil { + t.Fatalf("Create(a): %v", err) + } + fa.Close() + fb, err := Create("b") + if err != nil { + t.Fatalf("Create(b): %v", err) + } + fb.Close() + + ia1, err := Stat("a") + if err != nil { + t.Fatalf("Stat(a): %v", err) + } + ia2, err := Stat("a") + if err != nil { + t.Fatalf("Stat(a): %v", err) + } + if !SameFile(ia1, ia2) { + t.Errorf("files should be same") + } + + ib, err := Stat("b") + if err != nil { + t.Fatalf("Stat(b): %v", err) + } + if SameFile(ia1, ib) { + t.Errorf("files should be different") + } +} + +func testDevNullFileInfo(t *testing.T, statname, devNullName string, fi FileInfo) { + pre := fmt.Sprintf("%s(%q): ", statname, devNullName) + if fi.Size() != 0 { + t.Errorf(pre+"wrong file size have %d want 0", fi.Size()) + } + if fi.Mode()&ModeDevice == 0 { + t.Errorf(pre+"wrong file mode %q: ModeDevice is not set", fi.Mode()) + } + if fi.Mode()&ModeCharDevice == 0 { + t.Errorf(pre+"wrong file mode %q: ModeCharDevice is not set", fi.Mode()) + } + if fi.Mode().IsRegular() { + t.Errorf(pre+"wrong file mode %q: IsRegular returns true", fi.Mode()) + } +} + +func testDevNullFile(t *testing.T, devNullName string) { + f, err := Open(devNullName) + if err != nil { + t.Fatalf("Open(%s): %v", devNullName, err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatalf("Stat(%s): %v", devNullName, err) + } + testDevNullFileInfo(t, "f.Stat", devNullName, fi) + + fi, err = Stat(devNullName) + if err != nil { + t.Fatalf("Stat(%s): %v", devNullName, err) + } + testDevNullFileInfo(t, "Stat", devNullName, fi) +} + +func TestDevNullFile(t *testing.T) { + t.Parallel() + + testDevNullFile(t, DevNull) + if runtime.GOOS == "windows" { + testDevNullFile(t, "./nul") + testDevNullFile(t, "//./nul") + } +} + +var testLargeWrite = flag.Bool("large_write", false, "run TestLargeWriteToConsole test that floods console with output") + +func TestLargeWriteToConsole(t *testing.T) { + if !*testLargeWrite { + t.Skip("skipping console-flooding test; enable with -large_write") + } + b := make([]byte, 32000) + for i := range b { + b[i] = '.' + } + b[len(b)-1] = '\n' + n, err := Stdout.Write(b) + if err != nil { + t.Fatalf("Write to os.Stdout failed: %v", err) + } + if n != len(b) { + t.Errorf("Write to os.Stdout should return %d; got %d", len(b), n) + } + n, err = Stderr.Write(b) + if err != nil { + t.Fatalf("Write to os.Stderr failed: %v", err) + } + if n != len(b) { + t.Errorf("Write to os.Stderr should return %d; got %d", len(b), n) + } +} + +func TestStatDirModeExec(t *testing.T) { + if runtime.GOOS == "wasip1" { + t.Skip("Chmod is not supported on " + runtime.GOOS) + } + t.Parallel() + + const mode = 0111 + + path := t.TempDir() + if err := Chmod(path, 0777); err != nil { + t.Fatalf("Chmod %q 0777: %v", path, err) + } + + dir, err := Stat(path) + if err != nil { + t.Fatalf("Stat %q (looking for mode %#o): %s", path, mode, err) + } + if dir.Mode()&mode != mode { + t.Errorf("Stat %q: mode %#o want %#o", path, dir.Mode()&mode, mode) + } +} + +func TestStatStdin(t *testing.T) { + switch runtime.GOOS { + case "android", "plan9": + t.Skipf("%s doesn't have /bin/sh", runtime.GOOS) + } + + if Getenv("GO_WANT_HELPER_PROCESS") == "1" { + st, err := Stdin.Stat() + if err != nil { + t.Fatalf("Stat failed: %v", err) + } + fmt.Println(st.Mode() & ModeNamedPipe) + Exit(0) + } + + exe, err := Executable() + if err != nil { + t.Skipf("can't find executable: %v", err) + } + + testenv.MustHaveExec(t) + t.Parallel() + + fi, err := Stdin.Stat() + if err != nil { + t.Fatal(err) + } + switch mode := fi.Mode(); { + case mode&ModeCharDevice != 0 && mode&ModeDevice != 0: + case mode&ModeNamedPipe != 0: + default: + t.Fatalf("unexpected Stdin mode (%v), want ModeCharDevice or ModeNamedPipe", mode) + } + + cmd := testenv.Command(t, exe, "-test.run=^TestStatStdin$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + // This will make standard input a pipe. + cmd.Stdin = strings.NewReader("output") + + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) + } + + // result will be like "prw-rw-rw" + if len(output) < 1 || output[0] != 'p' { + t.Fatalf("Child process reports stdin is not pipe '%v'", string(output)) + } +} + +func TestStatRelativeSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + tmpdir := t.TempDir() + target := filepath.Join(tmpdir, "target") + f, err := Create(target) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + st, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + link := filepath.Join(tmpdir, "link") + err = Symlink(filepath.Base(target), link) + if err != nil { + t.Fatal(err) + } + + st1, err := Stat(link) + if err != nil { + t.Fatal(err) + } + + if !SameFile(st, st1) { + t.Error("Stat doesn't follow relative symlink") + } + + if runtime.GOOS == "windows" { + Remove(link) + err = Symlink(target[len(filepath.VolumeName(target)):], link) + if err != nil { + t.Fatal(err) + } + + st1, err := Stat(link) + if err != nil { + t.Fatal(err) + } + + if !SameFile(st, st1) { + t.Error("Stat doesn't follow relative symlink") + } + } +} + +func TestReadAtEOF(t *testing.T) { + t.Parallel() + + f := newFile("TestReadAtEOF", t) + defer Remove(f.Name()) + defer f.Close() + + _, err := f.ReadAt(make([]byte, 10), 0) + switch err { + case io.EOF: + // all good + case nil: + t.Fatalf("ReadAt succeeded") + default: + t.Fatalf("ReadAt failed: %s", err) + } +} + +func TestLongPath(t *testing.T) { + t.Parallel() + + tmpdir := newDir("TestLongPath", t) + defer func(d string) { + if err := RemoveAll(d); err != nil { + t.Fatalf("RemoveAll failed: %v", err) + } + }(tmpdir) + + // Test the boundary of 247 and fewer bytes (normal) and 248 and more bytes (adjusted). + sizes := []int{247, 248, 249, 400} + for len(tmpdir) < 400 { + tmpdir += "/dir3456789" + } + for _, sz := range sizes { + t.Run(fmt.Sprintf("length=%d", sz), func(t *testing.T) { + sizedTempDir := tmpdir[:sz-1] + "x" // Ensure it does not end with a slash. + + // The various sized runs are for this call to trigger the boundary + // condition. + if err := MkdirAll(sizedTempDir, 0755); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } + data := []byte("hello world\n") + if err := WriteFile(sizedTempDir+"/foo.txt", data, 0644); err != nil { + t.Fatalf("os.WriteFile() failed: %v", err) + } + if err := Rename(sizedTempDir+"/foo.txt", sizedTempDir+"/bar.txt"); err != nil { + t.Fatalf("Rename failed: %v", err) + } + mtime := time.Now().Truncate(time.Minute) + if err := Chtimes(sizedTempDir+"/bar.txt", mtime, mtime); err != nil { + t.Fatalf("Chtimes failed: %v", err) + } + names := []string{"bar.txt"} + if testenv.HasSymlink() { + if err := Symlink(sizedTempDir+"/bar.txt", sizedTempDir+"/symlink.txt"); err != nil { + t.Fatalf("Symlink failed: %v", err) + } + names = append(names, "symlink.txt") + } + if testenv.HasLink() { + if err := Link(sizedTempDir+"/bar.txt", sizedTempDir+"/link.txt"); err != nil { + t.Fatalf("Link failed: %v", err) + } + names = append(names, "link.txt") + } + for _, wantSize := range []int64{int64(len(data)), 0} { + for _, name := range names { + path := sizedTempDir + "/" + name + dir, err := Stat(path) + if err != nil { + t.Fatalf("Stat(%q) failed: %v", path, err) + } + filesize := size(path, t) + if dir.Size() != filesize || filesize != wantSize { + t.Errorf("Size(%q) is %d, len(ReadFile()) is %d, want %d", path, dir.Size(), filesize, wantSize) + } + if runtime.GOOS != "wasip1" { // Chmod is not supported on wasip1 + err = Chmod(path, dir.Mode()) + if err != nil { + t.Fatalf("Chmod(%q) failed: %v", path, err) + } + } + } + if err := Truncate(sizedTempDir+"/bar.txt", 0); err != nil { + t.Fatalf("Truncate failed: %v", err) + } + } + }) + } +} + +func testKillProcess(t *testing.T, processKiller func(p *Process)) { + testenv.MustHaveExec(t) + t.Parallel() + + // Re-exec the test binary to start a process that hangs until stdin is closed. + cmd := testenv.Command(t, Args[0]) + cmd.Env = append(cmd.Environ(), "GO_OS_TEST_DRAIN_STDIN=1") + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + stdin, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + err = cmd.Start() + if err != nil { + t.Fatalf("Failed to start test process: %v", err) + } + + defer func() { + if err := cmd.Wait(); err == nil { + t.Errorf("Test process succeeded, but expected to fail") + } + stdin.Close() // Keep stdin alive until the process has finished dying. + }() + + // Wait for the process to be started. + // (It will close its stdout when it reaches TestMain.) + io.Copy(io.Discard, stdout) + + processKiller(cmd.Process) +} + +func TestKillStartProcess(t *testing.T) { + testKillProcess(t, func(p *Process) { + err := p.Kill() + if err != nil { + t.Fatalf("Failed to kill test process: %v", err) + } + }) +} + +func TestGetppid(t *testing.T) { + if runtime.GOOS == "plan9" { + // TODO: golang.org/issue/8206 + t.Skipf("skipping test on plan9; see issue 8206") + } + + if Getenv("GO_WANT_HELPER_PROCESS") == "1" { + fmt.Print(Getppid()) + Exit(0) + } + + testenv.MustHaveExec(t) + t.Parallel() + + cmd := testenv.Command(t, Args[0], "-test.run=^TestGetppid$") + cmd.Env = append(Environ(), "GO_WANT_HELPER_PROCESS=1") + + // verify that Getppid() from the forked process reports our process id + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) + } + + childPpid := string(output) + ourPid := fmt.Sprintf("%d", Getpid()) + if childPpid != ourPid { + t.Fatalf("Child process reports parent process id '%v', expected '%v'", childPpid, ourPid) + } +} + +func TestKillFindProcess(t *testing.T) { + testKillProcess(t, func(p *Process) { + p2, err := FindProcess(p.Pid) + if err != nil { + t.Fatalf("Failed to find test process: %v", err) + } + err = p2.Kill() + if err != nil { + t.Fatalf("Failed to kill test process: %v", err) + } + }) +} + +var nilFileMethodTests = []struct { + name string + f func(*File) error +}{ + {"Chdir", func(f *File) error { return f.Chdir() }}, + {"Close", func(f *File) error { return f.Close() }}, + {"Chmod", func(f *File) error { return f.Chmod(0) }}, + {"Chown", func(f *File) error { return f.Chown(0, 0) }}, + {"Read", func(f *File) error { _, err := f.Read(make([]byte, 0)); return err }}, + {"ReadAt", func(f *File) error { _, err := f.ReadAt(make([]byte, 0), 0); return err }}, + {"Readdir", func(f *File) error { _, err := f.Readdir(1); return err }}, + {"Readdirnames", func(f *File) error { _, err := f.Readdirnames(1); return err }}, + {"Seek", func(f *File) error { _, err := f.Seek(0, io.SeekStart); return err }}, + {"Stat", func(f *File) error { _, err := f.Stat(); return err }}, + {"Sync", func(f *File) error { return f.Sync() }}, + {"Truncate", func(f *File) error { return f.Truncate(0) }}, + {"Write", func(f *File) error { _, err := f.Write(make([]byte, 0)); return err }}, + {"WriteAt", func(f *File) error { _, err := f.WriteAt(make([]byte, 0), 0); return err }}, + {"WriteString", func(f *File) error { _, err := f.WriteString(""); return err }}, +} + +// Test that all File methods give ErrInvalid if the receiver is nil. +func TestNilFileMethods(t *testing.T) { + t.Parallel() + + for _, tt := range nilFileMethodTests { + var file *File + got := tt.f(file) + if got != ErrInvalid { + t.Errorf("%v should fail when f is nil; got %v", tt.name, got) + } + } +} + +func mkdirTree(t *testing.T, root string, level, max int) { + if level >= max { + return + } + level++ + for i := 'a'; i < 'c'; i++ { + dir := filepath.Join(root, string(i)) + if err := Mkdir(dir, 0700); err != nil { + t.Fatal(err) + } + mkdirTree(t, dir, level, max) + } +} + +// Test that simultaneous RemoveAll do not report an error. +// As long as it gets removed, we should be happy. +func TestRemoveAllRace(t *testing.T) { + if runtime.GOOS == "windows" { + // Windows has very strict rules about things like + // removing directories while someone else has + // them open. The racing doesn't work out nicely + // like it does on Unix. + t.Skip("skipping on windows") + } + if runtime.GOOS == "dragonfly" { + testenv.SkipFlaky(t, 52301) + } + + n := runtime.GOMAXPROCS(16) + defer runtime.GOMAXPROCS(n) + root, err := MkdirTemp("", "issue") + if err != nil { + t.Fatal(err) + } + mkdirTree(t, root, 1, 6) + hold := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + defer wg.Done() + <-hold + err := RemoveAll(root) + if err != nil { + t.Errorf("unexpected error: %T, %q", err, err) + } + }() + } + close(hold) // let workers race to remove root + wg.Wait() +} + +// Test that reading from a pipe doesn't use up a thread. +func TestPipeThreads(t *testing.T) { + switch runtime.GOOS { + case "illumos", "solaris": + t.Skip("skipping on Solaris and illumos; issue 19111") + case "windows": + t.Skip("skipping on Windows; issue 19098") + case "plan9": + t.Skip("skipping on Plan 9; does not support runtime poller") + case "js": + t.Skip("skipping on js; no support for os.Pipe") + case "wasip1": + t.Skip("skipping on wasip1; no support for os.Pipe") + } + + threads := 100 + + // OpenBSD has a low default for max number of files. + if runtime.GOOS == "openbsd" { + threads = 50 + } + + r := make([]*File, threads) + w := make([]*File, threads) + for i := 0; i < threads; i++ { + rp, wp, err := Pipe() + if err != nil { + for j := 0; j < i; j++ { + r[j].Close() + w[j].Close() + } + t.Fatal(err) + } + r[i] = rp + w[i] = wp + } + + defer debug.SetMaxThreads(debug.SetMaxThreads(threads / 2)) + + creading := make(chan bool, threads) + cdone := make(chan bool, threads) + for i := 0; i < threads; i++ { + go func(i int) { + var b [1]byte + creading <- true + if _, err := r[i].Read(b[:]); err != nil { + t.Error(err) + } + if err := r[i].Close(); err != nil { + t.Error(err) + } + cdone <- true + }(i) + } + + for i := 0; i < threads; i++ { + <-creading + } + + // If we are still alive, it means that the 100 goroutines did + // not require 100 threads. + + for i := 0; i < threads; i++ { + if _, err := w[i].Write([]byte{0}); err != nil { + t.Error(err) + } + if err := w[i].Close(); err != nil { + t.Error(err) + } + <-cdone + } +} + +func testDoubleCloseError(path string) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + + file, err := Open(path) + if err != nil { + t.Fatal(err) + } + if err := file.Close(); err != nil { + t.Fatalf("unexpected error from Close: %v", err) + } + if err := file.Close(); err == nil { + t.Error("second Close did not fail") + } else if pe, ok := err.(*PathError); !ok { + t.Errorf("second Close: got %T, want %T", err, pe) + } else if pe.Err != ErrClosed { + t.Errorf("second Close: got %q, want %q", pe.Err, ErrClosed) + } else { + t.Logf("second close returned expected error %q", err) + } + } +} + +func TestDoubleCloseError(t *testing.T) { + t.Parallel() + t.Run("file", testDoubleCloseError(filepath.Join(sfdir, sfname))) + t.Run("dir", testDoubleCloseError(sfdir)) +} + +func TestUserCacheDir(t *testing.T) { + t.Parallel() + + dir, err := UserCacheDir() + if err != nil { + t.Skipf("skipping: %v", err) + } + if dir == "" { + t.Fatalf("UserCacheDir returned %q; want non-empty path or error", dir) + } + + fi, err := Stat(dir) + if err != nil { + if IsNotExist(err) { + t.Log(err) + return + } + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } +} + +func TestUserConfigDir(t *testing.T) { + t.Parallel() + + dir, err := UserConfigDir() + if err != nil { + t.Skipf("skipping: %v", err) + } + if dir == "" { + t.Fatalf("UserConfigDir returned %q; want non-empty path or error", dir) + } + + fi, err := Stat(dir) + if err != nil { + if IsNotExist(err) { + t.Log(err) + return + } + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } +} + +func TestUserHomeDir(t *testing.T) { + t.Parallel() + + dir, err := UserHomeDir() + if dir == "" && err == nil { + t.Fatal("UserHomeDir returned an empty string but no error") + } + if err != nil { + // UserHomeDir may return a non-nil error if the environment variable + // for the home directory is empty or unset in the environment. + t.Skipf("skipping: %v", err) + } + + fi, err := Stat(dir) + if err != nil { + if IsNotExist(err) { + // The user's home directory has a well-defined location, but does not + // exist. (Maybe nothing has written to it yet? That could happen, for + // example, on minimal VM images used for CI testing.) + t.Log(err) + return + } + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } +} + +func TestDirSeek(t *testing.T) { + t.Parallel() + + wd, err := Getwd() + if err != nil { + t.Fatal(err) + } + f, err := Open(wd) + if err != nil { + t.Fatal(err) + } + dirnames1, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + ret, err := f.Seek(0, 0) + if err != nil { + t.Fatal(err) + } + if ret != 0 { + t.Fatalf("seek result not zero: %d", ret) + } + + dirnames2, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + if len(dirnames1) != len(dirnames2) { + t.Fatalf("listings have different lengths: %d and %d\n", len(dirnames1), len(dirnames2)) + } + for i, n1 := range dirnames1 { + n2 := dirnames2[i] + if n1 != n2 { + t.Fatalf("different name i=%d n1=%s n2=%s\n", i, n1, n2) + } + } +} + +func TestReaddirSmallSeek(t *testing.T) { + // See issue 37161. Read only one entry from a directory, + // seek to the beginning, and read again. We should not see + // duplicate entries. + t.Parallel() + + wd, err := Getwd() + if err != nil { + t.Fatal(err) + } + df, err := Open(filepath.Join(wd, "testdata", "issue37161")) + if err != nil { + t.Fatal(err) + } + names1, err := df.Readdirnames(1) + if err != nil { + t.Fatal(err) + } + if _, err = df.Seek(0, 0); err != nil { + t.Fatal(err) + } + names2, err := df.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + if len(names2) != 3 { + t.Fatalf("first names: %v, second names: %v", names1, names2) + } +} + +// isDeadlineExceeded reports whether err is or wraps ErrDeadlineExceeded. +// We also check that the error has a Timeout method that returns true. +func isDeadlineExceeded(err error) bool { + if !IsTimeout(err) { + return false + } + if !errors.Is(err, ErrDeadlineExceeded) { + return false + } + return true +} + +// Test that opening a file does not change its permissions. Issue 38225. +func TestOpenFileKeepsPermissions(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + name := filepath.Join(dir, "x") + f, err := Create(name) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Error(err) + } + f, err = OpenFile(name, O_WRONLY|O_CREATE|O_TRUNC, 0) + if err != nil { + t.Fatal(err) + } + if fi, err := f.Stat(); err != nil { + t.Error(err) + } else if fi.Mode()&0222 == 0 { + t.Errorf("f.Stat.Mode after OpenFile is %v, should be writable", fi.Mode()) + } + if err := f.Close(); err != nil { + t.Error(err) + } + if fi, err := Stat(name); err != nil { + t.Error(err) + } else if fi.Mode()&0222 == 0 { + t.Errorf("Stat after OpenFile is %v, should be writable", fi.Mode()) + } +} + +func TestDirFS(t *testing.T) { + t.Parallel() + + // On Windows, we force the MFT to update by reading the actual metadata from GetFileInformationByHandle and then + // explicitly setting that. Otherwise it might get out of sync with FindFirstFile. See golang.org/issues/42637. + if runtime.GOOS == "windows" { + if err := filepath.WalkDir("./testdata/dirfs", func(path string, d fs.DirEntry, err error) error { + if err != nil { + t.Fatal(err) + } + info, err := d.Info() + if err != nil { + t.Fatal(err) + } + stat, err := Stat(path) // This uses GetFileInformationByHandle internally. + if err != nil { + t.Fatal(err) + } + if stat.ModTime() == info.ModTime() { + return nil + } + if err := Chtimes(path, stat.ModTime(), stat.ModTime()); err != nil { + t.Log(err) // We only log, not die, in case the test directory is not writable. + } + return nil + }); err != nil { + t.Fatal(err) + } + } + fsys := DirFS("./testdata/dirfs") + if err := fstest.TestFS(fsys, "a", "b", "dir/x"); err != nil { + t.Fatal(err) + } + + rdfs, ok := fsys.(fs.ReadDirFS) + if !ok { + t.Error("expected DirFS result to implement fs.ReadDirFS") + } + if _, err := rdfs.ReadDir("nonexistent"); err == nil { + t.Error("fs.ReadDir of nonexistent directory succeeded") + } + + // Test that the error message does not contain a backslash, + // and does not contain the DirFS argument. + const nonesuch = "dir/nonesuch" + _, err := fsys.Open(nonesuch) + if err == nil { + t.Error("fs.Open of nonexistent file succeeded") + } else { + if !strings.Contains(err.Error(), nonesuch) { + t.Errorf("error %q does not contain %q", err, nonesuch) + } + if strings.Contains(err.(*PathError).Path, "testdata") { + t.Errorf("error %q contains %q", err, "testdata") + } + } + + // Test that Open does not accept backslash as separator. + d := DirFS(".") + _, err = d.Open(`testdata\dirfs`) + if err == nil { + t.Fatalf(`Open testdata\dirfs succeeded`) + } + + // Test that Open does not open Windows device files. + _, err = d.Open(`NUL`) + if err == nil { + t.Errorf(`Open NUL succeeded`) + } +} + +func TestDirFSRootDir(t *testing.T) { + t.Parallel() + + cwd, err := Getwd() + if err != nil { + t.Fatal(err) + } + cwd = cwd[len(filepath.VolumeName(cwd)):] // trim volume prefix (C:) on Windows + cwd = filepath.ToSlash(cwd) // convert \ to / + cwd = strings.TrimPrefix(cwd, "/") // trim leading / + + // Test that Open can open a path starting at /. + d := DirFS("/") + f, err := d.Open(cwd + "/testdata/dirfs/a") + if err != nil { + t.Fatal(err) + } + f.Close() +} + +func TestDirFSEmptyDir(t *testing.T) { + t.Parallel() + + d := DirFS("") + cwd, _ := Getwd() + for _, path := range []string{ + "testdata/dirfs/a", // not DirFS(".") + filepath.ToSlash(cwd) + "/testdata/dirfs/a", // not DirFS("/") + } { + _, err := d.Open(path) + if err == nil { + t.Fatalf(`DirFS("").Open(%q) succeeded`, path) + } + } +} + +func TestDirFSPathsValid(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skipf("skipping on Windows") + } + t.Parallel() + + d := t.TempDir() + if err := WriteFile(filepath.Join(d, "control.txt"), []byte(string("Hello, world!")), 0644); err != nil { + t.Fatal(err) + } + if err := WriteFile(filepath.Join(d, `e:xperi\ment.txt`), []byte(string("Hello, colon and backslash!")), 0644); err != nil { + t.Fatal(err) + } + + fsys := DirFS(d) + err := fs.WalkDir(fsys, ".", func(path string, e fs.DirEntry, err error) error { + if fs.ValidPath(e.Name()) { + t.Logf("%q ok", e.Name()) + } else { + t.Errorf("%q INVALID", e.Name()) + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +func TestReadFileProc(t *testing.T) { + t.Parallel() + + // Linux files in /proc report 0 size, + // but then if ReadFile reads just a single byte at offset 0, + // the read at offset 1 returns EOF instead of more data. + // ReadFile has a minimum read size of 512 to work around this, + // but test explicitly that it's working. + name := "/proc/sys/fs/pipe-max-size" + if _, err := Stat(name); err != nil { + t.Skip(err) + } + data, err := ReadFile(name) + if err != nil { + t.Fatal(err) + } + if len(data) == 0 || data[len(data)-1] != '\n' { + t.Fatalf("read %s: not newline-terminated: %q", name, data) + } +} + +func TestDirFSReadFileProc(t *testing.T) { + t.Parallel() + + fsys := DirFS("/") + name := "proc/sys/fs/pipe-max-size" + if _, err := fs.Stat(fsys, name); err != nil { + t.Skip() + } + data, err := fs.ReadFile(fsys, name) + if err != nil { + t.Fatal(err) + } + if len(data) == 0 || data[len(data)-1] != '\n' { + t.Fatalf("read %s: not newline-terminated: %q", name, data) + } +} + +func TestWriteStringAlloc(t *testing.T) { + if runtime.GOOS == "js" { + t.Skip("js allocates a lot during File.WriteString") + } + d := t.TempDir() + f, err := Create(filepath.Join(d, "whiteboard.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + allocs := testing.AllocsPerRun(100, func() { + f.WriteString("I will not allocate when passed a string longer than 32 bytes.\n") + }) + if allocs != 0 { + t.Errorf("expected 0 allocs for File.WriteString, got %v", allocs) + } +} + +// Test that it's OK to have parallel I/O and Close on a pipe. +func TestPipeIOCloseRace(t *testing.T) { + // Skip on wasm, which doesn't have pipes. + if runtime.GOOS == "js" || runtime.GOOS == "wasip1" { + t.Skipf("skipping on %s: no pipes", runtime.GOOS) + } + t.Parallel() + + r, w, err := Pipe() + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(3) + + go func() { + defer wg.Done() + for { + n, err := w.Write([]byte("hi")) + if err != nil { + // We look at error strings as the + // expected errors are OS-specific. + switch { + case errors.Is(err, ErrClosed), + strings.Contains(err.Error(), "broken pipe"), + strings.Contains(err.Error(), "pipe is being closed"), + strings.Contains(err.Error(), "hungup channel"): + // Ignore an expected error. + default: + // Unexpected error. + t.Error(err) + } + return + } + if n != 2 { + t.Errorf("wrote %d bytes, expected 2", n) + return + } + } + }() + + go func() { + defer wg.Done() + for { + var buf [2]byte + n, err := r.Read(buf[:]) + if err != nil { + if err != io.EOF && !errors.Is(err, ErrClosed) { + t.Error(err) + } + return + } + if n != 2 { + t.Errorf("read %d bytes, want 2", n) + } + } + }() + + go func() { + defer wg.Done() + + // Let the other goroutines start. This is just to get + // a better test, the test will still pass if they + // don't start. + time.Sleep(time.Millisecond) + + if err := r.Close(); err != nil { + t.Error(err) + } + if err := w.Close(); err != nil { + t.Error(err) + } + }() + + wg.Wait() +} + +// Test that it's OK to call Close concurrently on a pipe. +func TestPipeCloseRace(t *testing.T) { + // Skip on wasm, which doesn't have pipes. + if runtime.GOOS == "js" || runtime.GOOS == "wasip1" { + t.Skipf("skipping on %s: no pipes", runtime.GOOS) + } + t.Parallel() + + r, w, err := Pipe() + if err != nil { + t.Fatal(err) + } + var wg sync.WaitGroup + c := make(chan error, 4) + f := func() { + defer wg.Done() + c <- r.Close() + c <- w.Close() + } + wg.Add(2) + go f() + go f() + nils, errs := 0, 0 + for i := 0; i < 4; i++ { + err := <-c + if err == nil { + nils++ + } else { + errs++ + } + } + if nils != 2 || errs != 2 { + t.Errorf("got nils %d errs %d, want 2 2", nils, errs) + } +} + +func TestRandomLen(t *testing.T) { + for range 5 { + dir, err := MkdirTemp(t.TempDir(), "*") + if err != nil { + t.Fatal(err) + } + base := filepath.Base(dir) + if len(base) > 10 { + t.Errorf("MkdirTemp returned len %d: %s", len(base), base) + } + } + for range 5 { + f, err := CreateTemp(t.TempDir(), "*") + if err != nil { + t.Fatal(err) + } + base := filepath.Base(f.Name()) + f.Close() + if len(base) > 10 { + t.Errorf("CreateTemp returned len %d: %s", len(base), base) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/os/os_unix_test.go b/platform/dbops/binaries/go/go/src/os/os_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..98e436fae6640a027d72eafd9c54823d1785afd8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/os_unix_test.go @@ -0,0 +1,444 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os_test + +import ( + "internal/testenv" + "io" + . "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" + "time" +) + +func init() { + isReadonlyError = func(err error) bool { return err == syscall.EROFS } +} + +// For TestRawConnReadWrite. +type syscallDescriptor = int + +func checkUidGid(t *testing.T, path string, uid, gid int) { + dir, err := Lstat(path) + if err != nil { + t.Fatalf("Lstat %q (looking for uid/gid %d/%d): %s", path, uid, gid, err) + } + sys := dir.Sys().(*syscall.Stat_t) + if int(sys.Uid) != uid { + t.Errorf("Lstat %q: uid %d want %d", path, sys.Uid, uid) + } + if int(sys.Gid) != gid { + t.Errorf("Lstat %q: gid %d want %d", path, sys.Gid, gid) + } +} + +func TestChown(t *testing.T) { + if runtime.GOOS == "wasip1" { + t.Skip("file ownership not supported on " + runtime.GOOS) + } + t.Parallel() + + // Use TempDir() to make sure we're on a local file system, + // so that the group ids returned by Getgroups will be allowed + // on the file. On NFS, the Getgroups groups are + // basically useless. + f := newFile("TestChown", t) + defer Remove(f.Name()) + defer f.Close() + dir, err := f.Stat() + if err != nil { + t.Fatalf("stat %s: %s", f.Name(), err) + } + + // Can't change uid unless root, but can try + // changing the group id. First try our current group. + gid := Getgid() + t.Log("gid:", gid) + if err = Chown(f.Name(), -1, gid); err != nil { + t.Fatalf("chown %s -1 %d: %s", f.Name(), gid, err) + } + sys := dir.Sys().(*syscall.Stat_t) + checkUidGid(t, f.Name(), int(sys.Uid), gid) + + // Then try all the auxiliary groups. + groups, err := Getgroups() + if err != nil { + t.Fatalf("getgroups: %s", err) + } + t.Log("groups: ", groups) + for _, g := range groups { + if err = Chown(f.Name(), -1, g); err != nil { + if testenv.SyscallIsNotSupported(err) { + t.Logf("chown %s -1 %d: %s (error ignored)", f.Name(), g, err) + // Since the Chown call failed, the file should be unmodified. + checkUidGid(t, f.Name(), int(sys.Uid), gid) + continue + } + t.Fatalf("chown %s -1 %d: %s", f.Name(), g, err) + } + checkUidGid(t, f.Name(), int(sys.Uid), g) + + // change back to gid to test fd.Chown + if err = f.Chown(-1, gid); err != nil { + t.Fatalf("fchown %s -1 %d: %s", f.Name(), gid, err) + } + checkUidGid(t, f.Name(), int(sys.Uid), gid) + } +} + +func TestFileChown(t *testing.T) { + if runtime.GOOS == "wasip1" { + t.Skip("file ownership not supported on " + runtime.GOOS) + } + t.Parallel() + + // Use TempDir() to make sure we're on a local file system, + // so that the group ids returned by Getgroups will be allowed + // on the file. On NFS, the Getgroups groups are + // basically useless. + f := newFile("TestFileChown", t) + defer Remove(f.Name()) + defer f.Close() + dir, err := f.Stat() + if err != nil { + t.Fatalf("stat %s: %s", f.Name(), err) + } + + // Can't change uid unless root, but can try + // changing the group id. First try our current group. + gid := Getgid() + t.Log("gid:", gid) + if err = f.Chown(-1, gid); err != nil { + t.Fatalf("fchown %s -1 %d: %s", f.Name(), gid, err) + } + sys := dir.Sys().(*syscall.Stat_t) + checkUidGid(t, f.Name(), int(sys.Uid), gid) + + // Then try all the auxiliary groups. + groups, err := Getgroups() + if err != nil { + t.Fatalf("getgroups: %s", err) + } + t.Log("groups: ", groups) + for _, g := range groups { + if err = f.Chown(-1, g); err != nil { + if testenv.SyscallIsNotSupported(err) { + t.Logf("chown %s -1 %d: %s (error ignored)", f.Name(), g, err) + // Since the Chown call failed, the file should be unmodified. + checkUidGid(t, f.Name(), int(sys.Uid), gid) + continue + } + t.Fatalf("fchown %s -1 %d: %s", f.Name(), g, err) + } + checkUidGid(t, f.Name(), int(sys.Uid), g) + + // change back to gid to test fd.Chown + if err = f.Chown(-1, gid); err != nil { + t.Fatalf("fchown %s -1 %d: %s", f.Name(), gid, err) + } + checkUidGid(t, f.Name(), int(sys.Uid), gid) + } +} + +func TestLchown(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + // Use TempDir() to make sure we're on a local file system, + // so that the group ids returned by Getgroups will be allowed + // on the file. On NFS, the Getgroups groups are + // basically useless. + f := newFile("TestLchown", t) + defer Remove(f.Name()) + defer f.Close() + dir, err := f.Stat() + if err != nil { + t.Fatalf("stat %s: %s", f.Name(), err) + } + + linkname := f.Name() + "2" + if err := Symlink(f.Name(), linkname); err != nil { + if runtime.GOOS == "android" && IsPermission(err) { + t.Skip("skipping test on Android; permission error creating symlink") + } + t.Fatalf("link %s -> %s: %v", f.Name(), linkname, err) + } + defer Remove(linkname) + + // Can't change uid unless root, but can try + // changing the group id. First try our current group. + gid := Getgid() + t.Log("gid:", gid) + if err = Lchown(linkname, -1, gid); err != nil { + if err, ok := err.(*PathError); ok && err.Err == syscall.ENOSYS { + t.Skip("lchown is unavailable") + } + t.Fatalf("lchown %s -1 %d: %s", linkname, gid, err) + } + sys := dir.Sys().(*syscall.Stat_t) + checkUidGid(t, linkname, int(sys.Uid), gid) + + // Then try all the auxiliary groups. + groups, err := Getgroups() + if err != nil { + t.Fatalf("getgroups: %s", err) + } + t.Log("groups: ", groups) + for _, g := range groups { + if err = Lchown(linkname, -1, g); err != nil { + if testenv.SyscallIsNotSupported(err) { + t.Logf("lchown %s -1 %d: %s (error ignored)", f.Name(), g, err) + // Since the Lchown call failed, the file should be unmodified. + checkUidGid(t, f.Name(), int(sys.Uid), gid) + continue + } + t.Fatalf("lchown %s -1 %d: %s", linkname, g, err) + } + checkUidGid(t, linkname, int(sys.Uid), g) + + // Check that link target's gid is unchanged. + checkUidGid(t, f.Name(), int(sys.Uid), int(sys.Gid)) + + if err = Lchown(linkname, -1, gid); err != nil { + t.Fatalf("lchown %s -1 %d: %s", f.Name(), gid, err) + } + } +} + +// Issue 16919: Readdir must return a non-empty slice or an error. +func TestReaddirRemoveRace(t *testing.T) { + oldStat := *LstatP + defer func() { *LstatP = oldStat }() + *LstatP = func(name string) (FileInfo, error) { + if strings.HasSuffix(name, "some-file") { + // Act like it's been deleted. + return nil, ErrNotExist + } + return oldStat(name) + } + dir := newDir("TestReaddirRemoveRace", t) + defer RemoveAll(dir) + if err := WriteFile(filepath.Join(dir, "some-file"), []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + d, err := Open(dir) + if err != nil { + t.Fatal(err) + } + defer d.Close() + fis, err := d.Readdir(2) // notably, greater than zero + if len(fis) == 0 && err == nil { + // This is what used to happen (Issue 16919) + t.Fatal("Readdir = empty slice & err == nil") + } + if len(fis) != 0 || err != io.EOF { + t.Errorf("Readdir = %d entries: %v; want 0, io.EOF", len(fis), err) + for i, fi := range fis { + t.Errorf(" entry[%d]: %q, %v", i, fi.Name(), fi.Mode()) + } + t.FailNow() + } +} + +// Issue 23120: respect umask when doing Mkdir with the sticky bit +func TestMkdirStickyUmask(t *testing.T) { + if runtime.GOOS == "wasip1" { + t.Skip("file permissions not supported on " + runtime.GOOS) + } + t.Parallel() + + const umask = 0077 + dir := newDir("TestMkdirStickyUmask", t) + defer RemoveAll(dir) + + oldUmask := syscall.Umask(umask) + defer syscall.Umask(oldUmask) + + // We have set a umask, but if the parent directory happens to have a default + // ACL, the umask may be ignored. To prevent spurious failures from an ACL, + // we create a non-sticky directory as a “control case” to compare against our + // sticky-bit “experiment”. + control := filepath.Join(dir, "control") + if err := Mkdir(control, 0755); err != nil { + t.Fatal(err) + } + cfi, err := Stat(control) + if err != nil { + t.Fatal(err) + } + + p := filepath.Join(dir, "dir1") + if err := Mkdir(p, ModeSticky|0755); err != nil { + t.Fatal(err) + } + fi, err := Stat(p) + if err != nil { + t.Fatal(err) + } + + got := fi.Mode() + want := cfi.Mode() | ModeSticky + if got != want { + t.Errorf("Mkdir(_, ModeSticky|0755) created dir with mode %v; want %v", got, want) + } +} + +// See also issues: 22939, 24331 +func newFileTest(t *testing.T, blocking bool) { + if runtime.GOOS == "js" || runtime.GOOS == "wasip1" { + t.Skipf("syscall.Pipe is not available on %s.", runtime.GOOS) + } + + p := make([]int, 2) + if err := syscall.Pipe(p); err != nil { + t.Fatalf("pipe: %v", err) + } + defer syscall.Close(p[1]) + + // Set the read-side to non-blocking. + if !blocking { + if err := syscall.SetNonblock(p[0], true); err != nil { + syscall.Close(p[0]) + t.Fatalf("SetNonblock: %v", err) + } + } + // Convert it to a file. + file := NewFile(uintptr(p[0]), "notapipe") + if file == nil { + syscall.Close(p[0]) + t.Fatalf("failed to convert fd to file!") + } + defer file.Close() + + timeToWrite := 100 * time.Millisecond + timeToDeadline := 1 * time.Millisecond + if !blocking { + // Use a longer time to avoid flakes. + // We won't be waiting this long anyhow. + timeToWrite = 1 * time.Second + } + + // Try to read with deadline (but don't block forever). + b := make([]byte, 1) + timer := time.AfterFunc(timeToWrite, func() { syscall.Write(p[1], []byte("a")) }) + defer timer.Stop() + file.SetReadDeadline(time.Now().Add(timeToDeadline)) + _, err := file.Read(b) + if !blocking { + // We want it to fail with a timeout. + if !isDeadlineExceeded(err) { + t.Fatalf("No timeout reading from file: %v", err) + } + } else { + // We want it to succeed after 100ms + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + } +} + +func TestNewFileBlock(t *testing.T) { + t.Parallel() + newFileTest(t, true) +} + +func TestNewFileNonBlock(t *testing.T) { + t.Parallel() + newFileTest(t, false) +} + +func TestNewFileInvalid(t *testing.T) { + t.Parallel() + const negOne = ^uintptr(0) + if f := NewFile(negOne, "invalid"); f != nil { + t.Errorf("NewFile(-1) got %v want nil", f) + } +} + +func TestSplitPath(t *testing.T) { + t.Parallel() + for _, tt := range []struct{ path, wantDir, wantBase string }{ + {"a", ".", "a"}, + {"a/", ".", "a"}, + {"a//", ".", "a"}, + {"a/b", "a", "b"}, + {"a/b/", "a", "b"}, + {"a/b/c", "a/b", "c"}, + {"/a", "/", "a"}, + {"/a/", "/", "a"}, + {"/a/b", "/a", "b"}, + {"/a/b/", "/a", "b"}, + {"/a/b/c", "/a/b", "c"}, + {"//a", "/", "a"}, + {"//a/", "/", "a"}, + {"///a", "/", "a"}, + {"///a/", "/", "a"}, + } { + if dir, base := SplitPath(tt.path); dir != tt.wantDir || base != tt.wantBase { + t.Errorf("splitPath(%q) = %q, %q, want %q, %q", tt.path, dir, base, tt.wantDir, tt.wantBase) + } + } +} + +// Test that copying to files opened with O_APPEND works and +// the copy_file_range syscall isn't used on Linux. +// +// Regression test for go.dev/issue/60181 +func TestIssue60181(t *testing.T) { + defer chtmpdir(t)() + + want := "hello gopher" + + a, err := CreateTemp("", "a") + if err != nil { + t.Fatal(err) + } + a.WriteString(want[:5]) + a.Close() + + b, err := CreateTemp("", "b") + if err != nil { + t.Fatal(err) + } + b.WriteString(want[5:]) + b.Close() + + afd, err := syscall.Open(a.Name(), syscall.O_RDWR|syscall.O_APPEND, 0) + if err != nil { + t.Fatal(err) + } + + bfd, err := syscall.Open(b.Name(), syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + + aa := NewFile(uintptr(afd), a.Name()) + defer aa.Close() + bb := NewFile(uintptr(bfd), b.Name()) + defer bb.Close() + + // This would fail on Linux in case the copy_file_range syscall was used because it doesn't + // support destination files opened with O_APPEND, see + // https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS + _, err = io.Copy(aa, bb) + if err != nil { + t.Fatal(err) + } + + buf, err := ReadFile(aa.Name()) + if err != nil { + t.Fatal(err) + } + + if got := string(buf); got != want { + t.Errorf("files not concatenated: got %q, want %q", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/os_windows_test.go b/platform/dbops/binaries/go/go/src/os/os_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7436b9a969a9bc5431aab66b73d317f9506ec432 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/os_windows_test.go @@ -0,0 +1,1596 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "errors" + "fmt" + "internal/poll" + "internal/syscall/windows" + "internal/syscall/windows/registry" + "internal/testenv" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "slices" + "sort" + "strings" + "syscall" + "testing" + "unicode/utf16" + "unsafe" +) + +// For TestRawConnReadWrite. +type syscallDescriptor = syscall.Handle + +// chdir changes the current working directory to the named directory, +// and then restore the original working directory at the end of the test. +func chdir(t *testing.T, dir string) { + olddir, err := os.Getwd() + if err != nil { + t.Fatalf("chdir: %v", err) + } + if err := os.Chdir(dir); err != nil { + t.Fatalf("chdir %s: %v", dir, err) + } + + t.Cleanup(func() { + if err := os.Chdir(olddir); err != nil { + t.Errorf("chdir to original working directory %s: %v", olddir, err) + os.Exit(1) + } + }) +} + +func TestSameWindowsFile(t *testing.T) { + temp := t.TempDir() + chdir(t, temp) + + f, err := os.Create("a") + if err != nil { + t.Fatal(err) + } + f.Close() + + ia1, err := os.Stat("a") + if err != nil { + t.Fatal(err) + } + + path, err := filepath.Abs("a") + if err != nil { + t.Fatal(err) + } + ia2, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if !os.SameFile(ia1, ia2) { + t.Errorf("files should be same") + } + + p := filepath.VolumeName(path) + filepath.Base(path) + if err != nil { + t.Fatal(err) + } + ia3, err := os.Stat(p) + if err != nil { + t.Fatal(err) + } + if !os.SameFile(ia1, ia3) { + t.Errorf("files should be same") + } +} + +type dirLinkTest struct { + name string + mklink func(link, target string) error + issueNo int // correspondent issue number (for broken tests) +} + +func testDirLinks(t *testing.T, tests []dirLinkTest) { + tmpdir := t.TempDir() + chdir(t, tmpdir) + + dir := filepath.Join(tmpdir, "dir") + err := os.Mkdir(dir, 0777) + if err != nil { + t.Fatal(err) + } + fi, err := os.Stat(dir) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(dir, "abc"), []byte("abc"), 0644) + if err != nil { + t.Fatal(err) + } + for _, test := range tests { + link := filepath.Join(tmpdir, test.name+"_link") + err := test.mklink(link, dir) + if err != nil { + t.Errorf("creating link for %q test failed: %v", test.name, err) + continue + } + + data, err := os.ReadFile(filepath.Join(link, "abc")) + if err != nil { + t.Errorf("failed to read abc file: %v", err) + continue + } + if string(data) != "abc" { + t.Errorf(`abc file is expected to have "abc" in it, but has %v`, data) + continue + } + + if test.issueNo > 0 { + t.Logf("skipping broken %q test: see issue %d", test.name, test.issueNo) + continue + } + + fi1, err := os.Stat(link) + if err != nil { + t.Errorf("failed to stat link %v: %v", link, err) + continue + } + if !fi1.IsDir() { + t.Errorf("%q should be a directory", link) + continue + } + if fi1.Name() != filepath.Base(link) { + t.Errorf("Stat(%q).Name() = %q, want %q", link, fi1.Name(), filepath.Base(link)) + continue + } + if !os.SameFile(fi, fi1) { + t.Errorf("%q should point to %q", link, dir) + continue + } + + fi2, err := os.Lstat(link) + if err != nil { + t.Errorf("failed to lstat link %v: %v", link, err) + continue + } + if m := fi2.Mode(); m&fs.ModeSymlink == 0 { + t.Errorf("%q should be a link, but is not (mode=0x%x)", link, uint32(m)) + continue + } + if m := fi2.Mode(); m&fs.ModeDir != 0 { + t.Errorf("%q should be a link, not a directory (mode=0x%x)", link, uint32(m)) + continue + } + } +} + +// reparseData is used to build reparse buffer data required for tests. +type reparseData struct { + substituteName namePosition + printName namePosition + pathBuf []uint16 +} + +type namePosition struct { + offset uint16 + length uint16 +} + +func (rd *reparseData) addUTF16s(s []uint16) (offset uint16) { + off := len(rd.pathBuf) * 2 + rd.pathBuf = append(rd.pathBuf, s...) + return uint16(off) +} + +func (rd *reparseData) addString(s string) (offset, length uint16) { + p := syscall.StringToUTF16(s) + return rd.addUTF16s(p), uint16(len(p)-1) * 2 // do not include terminating NUL in the length (as per PrintNameLength and SubstituteNameLength documentation) +} + +func (rd *reparseData) addSubstituteName(name string) { + rd.substituteName.offset, rd.substituteName.length = rd.addString(name) +} + +func (rd *reparseData) addPrintName(name string) { + rd.printName.offset, rd.printName.length = rd.addString(name) +} + +func (rd *reparseData) addStringNoNUL(s string) (offset, length uint16) { + p := syscall.StringToUTF16(s) + p = p[:len(p)-1] + return rd.addUTF16s(p), uint16(len(p)) * 2 +} + +func (rd *reparseData) addSubstituteNameNoNUL(name string) { + rd.substituteName.offset, rd.substituteName.length = rd.addStringNoNUL(name) +} + +func (rd *reparseData) addPrintNameNoNUL(name string) { + rd.printName.offset, rd.printName.length = rd.addStringNoNUL(name) +} + +// pathBuffeLen returns length of rd pathBuf in bytes. +func (rd *reparseData) pathBuffeLen() uint16 { + return uint16(len(rd.pathBuf)) * 2 +} + +// Windows REPARSE_DATA_BUFFER contains union member, and cannot be +// translated into Go directly. _REPARSE_DATA_BUFFER type is to help +// construct alternative versions of Windows REPARSE_DATA_BUFFER with +// union part of SymbolicLinkReparseBuffer or MountPointReparseBuffer type. +type _REPARSE_DATA_BUFFER struct { + header windows.REPARSE_DATA_BUFFER_HEADER + detail [syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]byte +} + +func createDirLink(link string, rdb *_REPARSE_DATA_BUFFER) error { + err := os.Mkdir(link, 0777) + if err != nil { + return err + } + + linkp := syscall.StringToUTF16(link) + fd, err := syscall.CreateFile(&linkp[0], syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return err + } + defer syscall.CloseHandle(fd) + + buflen := uint32(rdb.header.ReparseDataLength) + uint32(unsafe.Sizeof(rdb.header)) + var bytesReturned uint32 + return syscall.DeviceIoControl(fd, windows.FSCTL_SET_REPARSE_POINT, + (*byte)(unsafe.Pointer(&rdb.header)), buflen, nil, 0, &bytesReturned, nil) +} + +func createMountPoint(link string, target *reparseData) error { + var buf *windows.MountPointReparseBuffer + buflen := uint16(unsafe.Offsetof(buf.PathBuffer)) + target.pathBuffeLen() // see ReparseDataLength documentation + byteblob := make([]byte, buflen) + buf = (*windows.MountPointReparseBuffer)(unsafe.Pointer(&byteblob[0])) + buf.SubstituteNameOffset = target.substituteName.offset + buf.SubstituteNameLength = target.substituteName.length + buf.PrintNameOffset = target.printName.offset + buf.PrintNameLength = target.printName.length + pbuflen := len(target.pathBuf) + copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:pbuflen:pbuflen], target.pathBuf) + + var rdb _REPARSE_DATA_BUFFER + rdb.header.ReparseTag = windows.IO_REPARSE_TAG_MOUNT_POINT + rdb.header.ReparseDataLength = buflen + copy(rdb.detail[:], byteblob) + + return createDirLink(link, &rdb) +} + +func TestDirectoryJunction(t *testing.T) { + var tests = []dirLinkTest{ + { + // Create link similar to what mklink does, by inserting \??\ at the front of absolute target. + name: "standard", + mklink: func(link, target string) error { + var t reparseData + t.addSubstituteName(`\??\` + target) + t.addPrintName(target) + return createMountPoint(link, &t) + }, + }, + { + // Do as junction utility https://learn.microsoft.com/en-us/sysinternals/downloads/junction does - set PrintNameLength to 0. + name: "have_blank_print_name", + mklink: func(link, target string) error { + var t reparseData + t.addSubstituteName(`\??\` + target) + t.addPrintName("") + return createMountPoint(link, &t) + }, + }, + } + output, _ := testenv.Command(t, "cmd", "/c", "mklink", "/?").Output() + mklinkSupportsJunctionLinks := strings.Contains(string(output), " /J ") + if mklinkSupportsJunctionLinks { + tests = append(tests, + dirLinkTest{ + name: "use_mklink_cmd", + mklink: func(link, target string) error { + output, err := testenv.Command(t, "cmd", "/c", "mklink", "/J", link, target).CombinedOutput() + if err != nil { + t.Errorf("failed to run mklink %v %v: %v %q", link, target, err, output) + } + return nil + }, + }, + ) + } else { + t.Log(`skipping "use_mklink_cmd" test, mklink does not supports directory junctions`) + } + testDirLinks(t, tests) +} + +func enableCurrentThreadPrivilege(privilegeName string) error { + ct, err := windows.GetCurrentThread() + if err != nil { + return err + } + var t syscall.Token + err = windows.OpenThreadToken(ct, syscall.TOKEN_QUERY|windows.TOKEN_ADJUST_PRIVILEGES, false, &t) + if err != nil { + return err + } + defer syscall.CloseHandle(syscall.Handle(t)) + + var tp windows.TOKEN_PRIVILEGES + + privStr, err := syscall.UTF16PtrFromString(privilegeName) + if err != nil { + return err + } + err = windows.LookupPrivilegeValue(nil, privStr, &tp.Privileges[0].Luid) + if err != nil { + return err + } + tp.PrivilegeCount = 1 + tp.Privileges[0].Attributes = windows.SE_PRIVILEGE_ENABLED + return windows.AdjustTokenPrivileges(t, false, &tp, 0, nil, nil) +} + +func createSymbolicLink(link string, target *reparseData, isrelative bool) error { + var buf *windows.SymbolicLinkReparseBuffer + buflen := uint16(unsafe.Offsetof(buf.PathBuffer)) + target.pathBuffeLen() // see ReparseDataLength documentation + byteblob := make([]byte, buflen) + buf = (*windows.SymbolicLinkReparseBuffer)(unsafe.Pointer(&byteblob[0])) + buf.SubstituteNameOffset = target.substituteName.offset + buf.SubstituteNameLength = target.substituteName.length + buf.PrintNameOffset = target.printName.offset + buf.PrintNameLength = target.printName.length + if isrelative { + buf.Flags = windows.SYMLINK_FLAG_RELATIVE + } + pbuflen := len(target.pathBuf) + copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:pbuflen:pbuflen], target.pathBuf) + + var rdb _REPARSE_DATA_BUFFER + rdb.header.ReparseTag = syscall.IO_REPARSE_TAG_SYMLINK + rdb.header.ReparseDataLength = buflen + copy(rdb.detail[:], byteblob) + + return createDirLink(link, &rdb) +} + +func TestDirectorySymbolicLink(t *testing.T) { + var tests []dirLinkTest + output, _ := testenv.Command(t, "cmd", "/c", "mklink", "/?").Output() + mklinkSupportsDirectorySymbolicLinks := strings.Contains(string(output), " /D ") + if mklinkSupportsDirectorySymbolicLinks { + tests = append(tests, + dirLinkTest{ + name: "use_mklink_cmd", + mklink: func(link, target string) error { + output, err := testenv.Command(t, "cmd", "/c", "mklink", "/D", link, target).CombinedOutput() + if err != nil { + t.Errorf("failed to run mklink %v %v: %v %q", link, target, err, output) + } + return nil + }, + }, + ) + } else { + t.Log(`skipping "use_mklink_cmd" test, mklink does not supports directory symbolic links`) + } + + // The rest of these test requires SeCreateSymbolicLinkPrivilege to be held. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := windows.ImpersonateSelf(windows.SecurityImpersonation) + if err != nil { + t.Fatal(err) + } + defer windows.RevertToSelf() + + err = enableCurrentThreadPrivilege("SeCreateSymbolicLinkPrivilege") + if err != nil { + t.Skipf(`skipping some tests, could not enable "SeCreateSymbolicLinkPrivilege": %v`, err) + } + tests = append(tests, + dirLinkTest{ + name: "use_os_pkg", + mklink: func(link, target string) error { + return os.Symlink(target, link) + }, + }, + dirLinkTest{ + // Create link similar to what mklink does, by inserting \??\ at the front of absolute target. + name: "standard", + mklink: func(link, target string) error { + var t reparseData + t.addPrintName(target) + t.addSubstituteName(`\??\` + target) + return createSymbolicLink(link, &t, false) + }, + }, + dirLinkTest{ + name: "relative", + mklink: func(link, target string) error { + var t reparseData + t.addSubstituteNameNoNUL(filepath.Base(target)) + t.addPrintNameNoNUL(filepath.Base(target)) + return createSymbolicLink(link, &t, true) + }, + }, + ) + testDirLinks(t, tests) +} + +func mustHaveWorkstation(t *testing.T) { + mar, err := windows.OpenSCManager(nil, nil, windows.SERVICE_QUERY_STATUS) + if err != nil { + return + } + defer syscall.CloseHandle(mar) + //LanmanWorkstation is the service name, and Workstation is the display name. + srv, err := windows.OpenService(mar, syscall.StringToUTF16Ptr("LanmanWorkstation"), windows.SERVICE_QUERY_STATUS) + if err != nil { + return + } + defer syscall.CloseHandle(srv) + var state windows.SERVICE_STATUS + err = windows.QueryServiceStatus(srv, &state) + if err != nil { + return + } + if state.CurrentState != windows.SERVICE_RUNNING { + t.Skip("Requires the Windows service Workstation, but it is detected that it is not enabled.") + } +} + +func TestNetworkSymbolicLink(t *testing.T) { + testenv.MustHaveSymlink(t) + + const _NERR_ServerNotStarted = syscall.Errno(2114) + + dir := t.TempDir() + chdir(t, dir) + + pid := os.Getpid() + shareName := fmt.Sprintf("GoSymbolicLinkTestShare%d", pid) + sharePath := filepath.Join(dir, shareName) + testDir := "TestDir" + + err := os.MkdirAll(filepath.Join(sharePath, testDir), 0777) + if err != nil { + t.Fatal(err) + } + + wShareName, err := syscall.UTF16PtrFromString(shareName) + if err != nil { + t.Fatal(err) + } + wSharePath, err := syscall.UTF16PtrFromString(sharePath) + if err != nil { + t.Fatal(err) + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/lmshare/ns-lmshare-share_info_2: + // + // “[The shi2_permissions field] indicates the shared resource's permissions + // for servers running with share-level security. A server running user-level + // security ignores this member. + // … + // Note that Windows does not support share-level security.” + // + // So it shouldn't matter what permissions we set here. + const permissions = 0 + + p := windows.SHARE_INFO_2{ + Netname: wShareName, + Type: windows.STYPE_DISKTREE | windows.STYPE_TEMPORARY, + Remark: nil, + Permissions: permissions, + MaxUses: 1, + CurrentUses: 0, + Path: wSharePath, + Passwd: nil, + } + + err = windows.NetShareAdd(nil, 2, (*byte)(unsafe.Pointer(&p)), nil) + if err != nil { + if err == syscall.ERROR_ACCESS_DENIED || err == _NERR_ServerNotStarted { + t.Skipf("skipping: NetShareAdd: %v", err) + } + t.Fatal(err) + } + defer func() { + err := windows.NetShareDel(nil, wShareName, 0) + if err != nil { + t.Fatal(err) + } + }() + + UNCPath := `\\localhost\` + shareName + `\` + + fi1, err := os.Stat(sharePath) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(UNCPath) + if err != nil { + mustHaveWorkstation(t) + t.Fatal(err) + } + if !os.SameFile(fi1, fi2) { + t.Fatalf("%q and %q should be the same directory, but not", sharePath, UNCPath) + } + + target := filepath.Join(UNCPath, testDir) + link := "link" + + err = os.Symlink(target, link) + if err != nil { + t.Fatal(err) + } + defer os.Remove(link) + + got, err := os.Readlink(link) + if err != nil { + t.Fatal(err) + } + if got != target { + t.Errorf(`os.Readlink(%#q): got %v, want %v`, link, got, target) + } + + got, err = filepath.EvalSymlinks(link) + if err != nil { + t.Fatal(err) + } + if got != target { + t.Errorf(`filepath.EvalSymlinks(%#q): got %v, want %v`, link, got, target) + } +} + +func TestStatLxSymLink(t *testing.T) { + if _, err := exec.LookPath("wsl"); err != nil { + t.Skip("skipping: WSL not detected") + } + + temp := t.TempDir() + chdir(t, temp) + + const target = "target" + const link = "link" + + _, err := testenv.Command(t, "wsl", "/bin/mkdir", target).Output() + if err != nil { + // This normally happens when WSL still doesn't have a distro installed to run on. + t.Skipf("skipping: WSL is not correctly installed: %v", err) + } + + _, err = testenv.Command(t, "wsl", "/bin/ln", "-s", target, link).Output() + if err != nil { + t.Fatal(err) + } + + fi, err := os.Lstat(link) + if err != nil { + t.Fatal(err) + } + if m := fi.Mode(); m&fs.ModeSymlink != 0 { + // This can happen depending on newer WSL versions when running as admin or in developer mode. + t.Skip("skipping: WSL created reparse tag IO_REPARSE_TAG_SYMLINK instead of an IO_REPARSE_TAG_LX_SYMLINK") + } + // Stat'ing a IO_REPARSE_TAG_LX_SYMLINK from outside WSL always return ERROR_CANT_ACCESS_FILE. + // We check this condition to validate that os.Stat has tried to follow the link. + _, err = os.Stat(link) + const ERROR_CANT_ACCESS_FILE = syscall.Errno(1920) + if err == nil || !errors.Is(err, ERROR_CANT_ACCESS_FILE) { + t.Fatalf("os.Stat(%q): got %v, want ERROR_CANT_ACCESS_FILE", link, err) + } +} + +func TestStartProcessAttr(t *testing.T) { + t.Parallel() + + p, err := os.StartProcess(os.Getenv("COMSPEC"), []string{"/c", "cd"}, new(os.ProcAttr)) + if err != nil { + return + } + defer p.Wait() + t.Fatalf("StartProcess expected to fail, but succeeded.") +} + +func TestShareNotExistError(t *testing.T) { + if testing.Short() { + t.Skip("slow test that uses network; skipping") + } + t.Parallel() + + _, err := os.Stat(`\\no_such_server\no_such_share\no_such_file`) + if err == nil { + t.Fatal("stat succeeded, but expected to fail") + } + if !os.IsNotExist(err) { + t.Fatalf("os.Stat failed with %q, but os.IsNotExist(err) is false", err) + } +} + +func TestBadNetPathError(t *testing.T) { + const ERROR_BAD_NETPATH = syscall.Errno(53) + if !os.IsNotExist(ERROR_BAD_NETPATH) { + t.Fatal("os.IsNotExist(syscall.Errno(53)) is false, but want true") + } +} + +func TestStatDir(t *testing.T) { + defer chtmpdir(t)() + + f, err := os.Open(".") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + err = os.Chdir("..") + if err != nil { + t.Fatal(err) + } + + fi2, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + if !os.SameFile(fi, fi2) { + t.Fatal("race condition occurred") + } +} + +func TestOpenVolumeName(t *testing.T) { + tmpdir := t.TempDir() + chdir(t, tmpdir) + + want := []string{"file1", "file2", "file3", "gopher.txt"} + sort.Strings(want) + for _, name := range want { + err := os.WriteFile(filepath.Join(tmpdir, name), nil, 0777) + if err != nil { + t.Fatal(err) + } + } + + f, err := os.Open(filepath.VolumeName(tmpdir)) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + have, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + sort.Strings(have) + + if strings.Join(want, "/") != strings.Join(have, "/") { + t.Fatalf("unexpected file list %q, want %q", have, want) + } +} + +func TestDeleteReadOnly(t *testing.T) { + t.Parallel() + + tmpdir := t.TempDir() + p := filepath.Join(tmpdir, "a") + // This sets FILE_ATTRIBUTE_READONLY. + f, err := os.OpenFile(p, os.O_CREATE, 0400) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err = os.Chmod(p, 0400); err != nil { + t.Fatal(err) + } + if err = os.Remove(p); err != nil { + t.Fatal(err) + } +} + +func TestReadStdin(t *testing.T) { + old := poll.ReadConsole + defer func() { + poll.ReadConsole = old + }() + + p, err := syscall.GetCurrentProcess() + if err != nil { + t.Fatalf("Unable to get handle to current process: %v", err) + } + var stdinDuplicate syscall.Handle + err = syscall.DuplicateHandle(p, syscall.Handle(syscall.Stdin), p, &stdinDuplicate, 0, false, syscall.DUPLICATE_SAME_ACCESS) + if err != nil { + t.Fatalf("Unable to duplicate stdin: %v", err) + } + testConsole := os.NewConsoleFile(stdinDuplicate, "test") + + var tests = []string{ + "abc", + "äöü", + "\u3042", + "“hi”™", + "hello\x1aworld", + "\U0001F648\U0001F649\U0001F64A", + } + + for _, consoleSize := range []int{1, 2, 3, 10, 16, 100, 1000} { + for _, readSize := range []int{1, 2, 3, 4, 5, 8, 10, 16, 20, 50, 100} { + for _, s := range tests { + t.Run(fmt.Sprintf("c%d/r%d/%s", consoleSize, readSize, s), func(t *testing.T) { + s16 := utf16.Encode([]rune(s)) + poll.ReadConsole = func(h syscall.Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) error { + if inputControl != nil { + t.Fatalf("inputControl not nil") + } + n := int(toread) + if n > consoleSize { + n = consoleSize + } + n = copy((*[10000]uint16)(unsafe.Pointer(buf))[:n:n], s16) + s16 = s16[n:] + *read = uint32(n) + t.Logf("read %d -> %d", toread, *read) + return nil + } + + var all []string + var buf []byte + chunk := make([]byte, readSize) + for { + n, err := testConsole.Read(chunk) + buf = append(buf, chunk[:n]...) + if err == io.EOF { + all = append(all, string(buf)) + if len(all) >= 5 { + break + } + buf = buf[:0] + } else if err != nil { + t.Fatalf("reading %q: error: %v", s, err) + } + if len(buf) >= 2000 { + t.Fatalf("reading %q: stuck in loop: %q", s, buf) + } + } + + want := strings.Split(s, "\x1a") + for len(want) < 5 { + want = append(want, "") + } + if !reflect.DeepEqual(all, want) { + t.Errorf("reading %q:\nhave %x\nwant %x", s, all, want) + } + }) + } + } + } +} + +func TestStatPagefile(t *testing.T) { + t.Parallel() + + const path = `c:\pagefile.sys` + fi, err := os.Stat(path) + if err == nil { + if fi.Name() == "" { + t.Fatalf("Stat(%q).Name() is empty", path) + } + t.Logf("Stat(%q).Size() = %v", path, fi.Size()) + return + } + if os.IsNotExist(err) { + t.Skip(`skipping because c:\pagefile.sys is not found`) + } + t.Fatal(err) +} + +// syscallCommandLineToArgv calls syscall.CommandLineToArgv +// and converts returned result into []string. +func syscallCommandLineToArgv(cmd string) ([]string, error) { + var argc int32 + argv, err := syscall.CommandLineToArgv(&syscall.StringToUTF16(cmd)[0], &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + var args []string + for _, v := range (*argv)[:argc] { + args = append(args, syscall.UTF16ToString((*v)[:])) + } + return args, nil +} + +// compareCommandLineToArgvWithSyscall ensures that +// os.CommandLineToArgv(cmd) and syscall.CommandLineToArgv(cmd) +// return the same result. +func compareCommandLineToArgvWithSyscall(t *testing.T, cmd string) { + syscallArgs, err := syscallCommandLineToArgv(cmd) + if err != nil { + t.Fatal(err) + } + args := os.CommandLineToArgv(cmd) + if want, have := fmt.Sprintf("%q", syscallArgs), fmt.Sprintf("%q", args); want != have { + t.Errorf("testing os.commandLineToArgv(%q) failed: have %q want %q", cmd, args, syscallArgs) + return + } +} + +func TestCmdArgs(t *testing.T) { + if testing.Short() { + t.Skipf("in short mode; skipping test that builds a binary") + } + t.Parallel() + + tmpdir := t.TempDir() + + const prog = ` +package main + +import ( + "fmt" + "os" +) + +func main() { + fmt.Printf("%q", os.Args) +} +` + src := filepath.Join(tmpdir, "main.go") + if err := os.WriteFile(src, []byte(prog), 0666); err != nil { + t.Fatal(err) + } + + exe := filepath.Join(tmpdir, "main.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, src) + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("building main.exe failed: %v\n%s", err, out) + } + + var cmds = []string{ + ``, + ` a b c`, + ` "`, + ` ""`, + ` """`, + ` "" a`, + ` "123"`, + ` \"123\"`, + ` \"123 456\"`, + ` \\"`, + ` \\\"`, + ` \\\\\"`, + ` \\\"x`, + ` """"\""\\\"`, + ` abc`, + ` \\\\\""x"""y z`, + "\tb\t\"x\ty\"", + ` "Брад" d e`, + // examples from https://learn.microsoft.com/en-us/cpp/cpp/main-function-command-line-args + ` "abc" d e`, + ` a\\b d"e f"g h`, + ` a\\\"b c d`, + ` a\\\\"b c" d e`, + // http://daviddeley.com/autohotkey/parameters/parameters.htm#WINARGV + // from 5.4 Examples + ` CallMeIshmael`, + ` "Call Me Ishmael"`, + ` Cal"l Me I"shmael`, + ` CallMe\"Ishmael`, + ` "CallMe\"Ishmael"`, + ` "Call Me Ishmael\\"`, + ` "CallMe\\\"Ishmael"`, + ` a\\\b`, + ` "a\\\b"`, + // from 5.5 Some Common Tasks + ` "\"Call Me Ishmael\""`, + ` "C:\TEST A\\"`, + ` "\"C:\TEST A\\\""`, + // from 5.6 The Microsoft Examples Explained + ` "a b c" d e`, + ` "ab\"c" "\\" d`, + ` a\\\b d"e f"g h`, + ` a\\\"b c d`, + ` a\\\\"b c" d e`, + // from 5.7 Double Double Quote Examples (pre 2008) + ` "a b c""`, + ` """CallMeIshmael""" b c`, + ` """Call Me Ishmael"""`, + ` """"Call Me Ishmael"" b c`, + } + for _, cmd := range cmds { + compareCommandLineToArgvWithSyscall(t, "test"+cmd) + compareCommandLineToArgvWithSyscall(t, `"cmd line"`+cmd) + compareCommandLineToArgvWithSyscall(t, exe+cmd) + + // test both syscall.EscapeArg and os.commandLineToArgv + args := os.CommandLineToArgv(exe + cmd) + out, err := testenv.Command(t, args[0], args[1:]...).CombinedOutput() + if err != nil { + t.Fatalf("running %q failed: %v\n%v", args, err, string(out)) + } + if want, have := fmt.Sprintf("%q", args), string(out); want != have { + t.Errorf("wrong output of executing %q: have %q want %q", args, have, want) + continue + } + } +} + +func findOneDriveDir() (string, error) { + // as per https://stackoverflow.com/questions/42519624/how-to-determine-location-of-onedrive-on-windows-7-and-8-in-c + const onedrivekey = `SOFTWARE\Microsoft\OneDrive` + k, err := registry.OpenKey(registry.CURRENT_USER, onedrivekey, registry.READ) + if err != nil { + return "", fmt.Errorf("OpenKey(%q) failed: %v", onedrivekey, err) + } + defer k.Close() + + path, valtype, err := k.GetStringValue("UserFolder") + if err != nil { + return "", fmt.Errorf("reading UserFolder failed: %v", err) + } + + if valtype == registry.EXPAND_SZ { + expanded, err := registry.ExpandString(path) + if err != nil { + return "", fmt.Errorf("expanding UserFolder failed: %v", err) + } + path = expanded + } + + return path, nil +} + +// TestOneDrive verifies that OneDrive folder is a directory and not a symlink. +func TestOneDrive(t *testing.T) { + t.Parallel() + + dir, err := findOneDriveDir() + if err != nil { + t.Skipf("Skipping, because we did not find OneDrive directory: %v", err) + } + testDirStats(t, dir) +} + +func TestWindowsDevNullFile(t *testing.T) { + t.Parallel() + + f1, err := os.Open("NUL") + if err != nil { + t.Fatal(err) + } + defer f1.Close() + + fi1, err := f1.Stat() + if err != nil { + t.Fatal(err) + } + + f2, err := os.Open("nul") + if err != nil { + t.Fatal(err) + } + defer f2.Close() + + fi2, err := f2.Stat() + if err != nil { + t.Fatal(err) + } + + if !os.SameFile(fi1, fi2) { + t.Errorf(`"NUL" and "nul" are not the same file`) + } +} + +func TestFileStatNUL(t *testing.T) { + t.Parallel() + + f, err := os.Open("NUL") + if err != nil { + t.Fatal(err) + } + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + if got, want := fi.Mode(), os.ModeDevice|os.ModeCharDevice|0666; got != want { + t.Errorf("Open(%q).Stat().Mode() = %v, want %v", "NUL", got, want) + } +} + +func TestStatNUL(t *testing.T) { + t.Parallel() + + fi, err := os.Stat("NUL") + if err != nil { + t.Fatal(err) + } + if got, want := fi.Mode(), os.ModeDevice|os.ModeCharDevice|0666; got != want { + t.Errorf("Stat(%q).Mode() = %v, want %v", "NUL", got, want) + } +} + +// TestSymlinkCreation verifies that creating a symbolic link +// works on Windows when developer mode is active. +// This is supported starting Windows 10 (1703, v10.0.14972). +func TestSymlinkCreation(t *testing.T) { + if !testenv.HasSymlink() && !isWindowsDeveloperModeActive() { + t.Skip("Windows developer mode is not active") + } + t.Parallel() + + temp := t.TempDir() + dummyFile := filepath.Join(temp, "file") + if err := os.WriteFile(dummyFile, []byte(""), 0644); err != nil { + t.Fatal(err) + } + + linkFile := filepath.Join(temp, "link") + if err := os.Symlink(dummyFile, linkFile); err != nil { + t.Fatal(err) + } +} + +// isWindowsDeveloperModeActive checks whether or not the developer mode is active on Windows 10. +// Returns false for prior Windows versions. +// see https://docs.microsoft.com/en-us/windows/uwp/get-started/enable-your-device-for-development +func isWindowsDeveloperModeActive() bool { + key, err := registry.OpenKey(registry.LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\AppModelUnlock", registry.READ) + if err != nil { + return false + } + + val, _, err := key.GetIntegerValue("AllowDevelopmentWithoutDevLicense") + if err != nil { + return false + } + + return val != 0 +} + +// TestRootRelativeDirSymlink verifies that symlinks to paths relative to the +// drive root (beginning with "\" but no volume name) are created with the +// correct symlink type. +// (See https://golang.org/issue/39183#issuecomment-632175728.) +func TestRootRelativeDirSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + temp := t.TempDir() + dir := filepath.Join(temp, "dir") + if err := os.Mkdir(dir, 0755); err != nil { + t.Fatal(err) + } + + volumeRelDir := strings.TrimPrefix(dir, filepath.VolumeName(dir)) // leaves leading backslash + + link := filepath.Join(temp, "link") + err := os.Symlink(volumeRelDir, link) + if err != nil { + t.Fatal(err) + } + t.Logf("Symlink(%#q, %#q)", volumeRelDir, link) + + f, err := os.Open(link) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if fi, err := f.Stat(); err != nil { + t.Fatal(err) + } else if !fi.IsDir() { + t.Errorf("Open(%#q).Stat().IsDir() = false; want true", f.Name()) + } +} + +// TestWorkingDirectoryRelativeSymlink verifies that symlinks to paths relative +// to the current working directory for the drive, such as "C:File.txt", are +// correctly converted to absolute links of the correct symlink type (per +// https://docs.microsoft.com/en-us/windows/win32/fileio/creating-symbolic-links). +func TestWorkingDirectoryRelativeSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + // Construct a directory to be symlinked. + temp := t.TempDir() + if v := filepath.VolumeName(temp); len(v) < 2 || v[1] != ':' { + t.Skipf("Can't test relative symlinks: t.TempDir() (%#q) does not begin with a drive letter.", temp) + } + + absDir := filepath.Join(temp, `dir\sub`) + if err := os.MkdirAll(absDir, 0755); err != nil { + t.Fatal(err) + } + + // Change to the temporary directory and construct a + // working-directory-relative symlink. + oldwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer func() { + if err := os.Chdir(oldwd); err != nil { + t.Fatal(err) + } + }() + if err := os.Chdir(temp); err != nil { + t.Fatal(err) + } + t.Logf("Chdir(%#q)", temp) + + wdRelDir := filepath.VolumeName(temp) + `dir\sub` // no backslash after volume. + absLink := filepath.Join(temp, "link") + err = os.Symlink(wdRelDir, absLink) + if err != nil { + t.Fatal(err) + } + t.Logf("Symlink(%#q, %#q)", wdRelDir, absLink) + + // Now change back to the original working directory and verify that the + // symlink still refers to its original path and is correctly marked as a + // directory. + if err := os.Chdir(oldwd); err != nil { + t.Fatal(err) + } + t.Logf("Chdir(%#q)", oldwd) + + resolved, err := os.Readlink(absLink) + if err != nil { + t.Errorf("Readlink(%#q): %v", absLink, err) + } else if resolved != absDir { + t.Errorf("Readlink(%#q) = %#q; want %#q", absLink, resolved, absDir) + } + + linkFile, err := os.Open(absLink) + if err != nil { + t.Fatal(err) + } + defer linkFile.Close() + + linkInfo, err := linkFile.Stat() + if err != nil { + t.Fatal(err) + } + if !linkInfo.IsDir() { + t.Errorf("Open(%#q).Stat().IsDir() = false; want true", absLink) + } + + absInfo, err := os.Stat(absDir) + if err != nil { + t.Fatal(err) + } + + if !os.SameFile(absInfo, linkInfo) { + t.Errorf("SameFile(Stat(%#q), Open(%#q).Stat()) = false; want true", absDir, absLink) + } +} + +// TestStatOfInvalidName is regression test for issue #24999. +func TestStatOfInvalidName(t *testing.T) { + t.Parallel() + + _, err := os.Stat("*.go") + if err == nil { + t.Fatal(`os.Stat("*.go") unexpectedly succeeded`) + } +} + +// findUnusedDriveLetter searches mounted drive list on the system +// (starting from Z: and ending at D:) for unused drive letter. +// It returns path to the found drive root directory (like Z:\) or error. +func findUnusedDriveLetter() (string, error) { + // Do not use A: and B:, because they are reserved for floppy drive. + // Do not use C:, because it is normally used for main drive. + for l := 'Z'; l >= 'D'; l-- { + p := string(l) + `:\` + _, err := os.Stat(p) + if os.IsNotExist(err) { + return p, nil + } + } + return "", errors.New("Could not find unused drive letter.") +} + +func TestRootDirAsTemp(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + fmt.Print(os.TempDir()) + os.Exit(0) + } + + testenv.MustHaveExec(t) + t.Parallel() + + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + newtmp, err := findUnusedDriveLetter() + if err != nil { + t.Skip(err) + } + + cmd := testenv.Command(t, exe, "-test.run=^TestRootDirAsTemp$") + cmd.Env = cmd.Environ() + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + cmd.Env = append(cmd.Env, "TMP="+newtmp) + cmd.Env = append(cmd.Env, "TEMP="+newtmp) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) + } + if want, have := newtmp, string(output); have != want { + t.Fatalf("unexpected child process output %q, want %q", have, want) + } +} + +func testReadlink(t *testing.T, path, want string) { + got, err := os.Readlink(path) + if err != nil { + t.Error(err) + return + } + if got != want { + t.Errorf(`Readlink(%q): got %q, want %q`, path, got, want) + } +} + +func mklink(t *testing.T, link, target string) { + output, err := testenv.Command(t, "cmd", "/c", "mklink", link, target).CombinedOutput() + if err != nil { + t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) + } +} + +func mklinkj(t *testing.T, link, target string) { + output, err := testenv.Command(t, "cmd", "/c", "mklink", "/J", link, target).CombinedOutput() + if err != nil { + t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) + } +} + +func mklinkd(t *testing.T, link, target string) { + output, err := testenv.Command(t, "cmd", "/c", "mklink", "/D", link, target).CombinedOutput() + if err != nil { + t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) + } +} + +func TestWindowsReadlink(t *testing.T) { + tmpdir, err := os.MkdirTemp("", "TestWindowsReadlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // Make sure tmpdir is not a symlink, otherwise tests will fail. + tmpdir, err = filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + chdir(t, tmpdir) + + vol := filepath.VolumeName(tmpdir) + output, err := testenv.Command(t, "cmd", "/c", "mountvol", vol, "/L").CombinedOutput() + if err != nil { + t.Fatalf("failed to run mountvol %v /L: %v %q", vol, err, output) + } + ntvol := strings.Trim(string(output), " \n\r") + + dir := filepath.Join(tmpdir, "dir") + err = os.MkdirAll(dir, 0777) + if err != nil { + t.Fatal(err) + } + + absdirjlink := filepath.Join(tmpdir, "absdirjlink") + mklinkj(t, absdirjlink, dir) + testReadlink(t, absdirjlink, dir) + + ntdirjlink := filepath.Join(tmpdir, "ntdirjlink") + mklinkj(t, ntdirjlink, ntvol+absdirjlink[len(filepath.VolumeName(absdirjlink)):]) + testReadlink(t, ntdirjlink, absdirjlink) + + ntdirjlinktolink := filepath.Join(tmpdir, "ntdirjlinktolink") + mklinkj(t, ntdirjlinktolink, ntvol+absdirjlink[len(filepath.VolumeName(absdirjlink)):]) + testReadlink(t, ntdirjlinktolink, absdirjlink) + + mklinkj(t, "reldirjlink", "dir") + testReadlink(t, "reldirjlink", dir) // relative directory junction resolves to absolute path + + // Make sure we have sufficient privilege to run mklink command. + testenv.MustHaveSymlink(t) + + absdirlink := filepath.Join(tmpdir, "absdirlink") + mklinkd(t, absdirlink, dir) + testReadlink(t, absdirlink, dir) + + ntdirlink := filepath.Join(tmpdir, "ntdirlink") + mklinkd(t, ntdirlink, ntvol+absdirlink[len(filepath.VolumeName(absdirlink)):]) + testReadlink(t, ntdirlink, absdirlink) + + mklinkd(t, "reldirlink", "dir") + testReadlink(t, "reldirlink", "dir") + + file := filepath.Join(tmpdir, "file") + err = os.WriteFile(file, []byte(""), 0666) + if err != nil { + t.Fatal(err) + } + + filelink := filepath.Join(tmpdir, "filelink") + mklink(t, filelink, file) + testReadlink(t, filelink, file) + + linktofilelink := filepath.Join(tmpdir, "linktofilelink") + mklink(t, linktofilelink, ntvol+filelink[len(filepath.VolumeName(filelink)):]) + testReadlink(t, linktofilelink, filelink) + + mklink(t, "relfilelink", "file") + testReadlink(t, "relfilelink", "file") +} + +func TestOpenDirTOCTOU(t *testing.T) { + t.Parallel() + + // Check opened directories can't be renamed until the handle is closed. + // See issue 52747. + tmpdir := t.TempDir() + dir := filepath.Join(tmpdir, "dir") + if err := os.Mkdir(dir, 0777); err != nil { + t.Fatal(err) + } + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + newpath := filepath.Join(tmpdir, "dir1") + err = os.Rename(dir, newpath) + if err == nil || !errors.Is(err, windows.ERROR_SHARING_VIOLATION) { + f.Close() + t.Fatalf("Rename(%q, %q) = %v; want windows.ERROR_SHARING_VIOLATION", dir, newpath, err) + } + f.Close() + err = os.Rename(dir, newpath) + if err != nil { + t.Error(err) + } +} + +func TestAppExecLinkStat(t *testing.T) { + // We expect executables installed to %LOCALAPPDATA%\Microsoft\WindowsApps to + // be reparse points with tag IO_REPARSE_TAG_APPEXECLINK. Here we check that + // such reparse points are treated as irregular (but executable) files, not + // broken symlinks. + appdata := os.Getenv("LOCALAPPDATA") + if appdata == "" { + t.Skipf("skipping: LOCALAPPDATA not set") + } + + pythonExeName := "python3.exe" + pythonPath := filepath.Join(appdata, `Microsoft\WindowsApps`, pythonExeName) + + lfi, err := os.Lstat(pythonPath) + if err != nil { + t.Skip("skipping test, because Python 3 is not installed via the Windows App Store on this system; see https://golang.org/issue/42919") + } + + // An APPEXECLINK reparse point is not a symlink, so os.Readlink should return + // a non-nil error for it, and Stat should return results identical to Lstat. + linkName, err := os.Readlink(pythonPath) + if err == nil { + t.Errorf("os.Readlink(%q) = %q, but expected an error\n(should be an APPEXECLINK reparse point, not a symlink)", pythonPath, linkName) + } + + sfi, err := os.Stat(pythonPath) + if err != nil { + t.Fatalf("Stat %s: %v", pythonPath, err) + } + + if lfi.Name() != sfi.Name() { + t.Logf("os.Lstat(%q) = %+v", pythonPath, lfi) + t.Logf("os.Stat(%q) = %+v", pythonPath, sfi) + t.Errorf("files should be same") + } + + if lfi.Name() != pythonExeName { + t.Errorf("Stat %s: got %q, but wanted %q", pythonPath, lfi.Name(), pythonExeName) + } + if m := lfi.Mode(); m&fs.ModeSymlink != 0 { + t.Errorf("%q should be a file, not a link (mode=0x%x)", pythonPath, uint32(m)) + } + if m := lfi.Mode(); m&fs.ModeDir != 0 { + t.Errorf("%q should be a file, not a directory (mode=0x%x)", pythonPath, uint32(m)) + } + if m := lfi.Mode(); m&fs.ModeIrregular == 0 { + // A reparse point is not a regular file, but we don't have a more appropriate + // ModeType bit for it, so it should be marked as irregular. + t.Errorf("%q should not be a regular file (mode=0x%x)", pythonPath, uint32(m)) + } + + if sfi.Name() != pythonExeName { + t.Errorf("Stat %s: got %q, but wanted %q", pythonPath, sfi.Name(), pythonExeName) + } + if m := sfi.Mode(); m&fs.ModeSymlink != 0 { + t.Errorf("%q should be a file, not a link (mode=0x%x)", pythonPath, uint32(m)) + } + if m := sfi.Mode(); m&fs.ModeDir != 0 { + t.Errorf("%q should be a file, not a directory (mode=0x%x)", pythonPath, uint32(m)) + } + if m := sfi.Mode(); m&fs.ModeIrregular == 0 { + // A reparse point is not a regular file, but we don't have a more appropriate + // ModeType bit for it, so it should be marked as irregular. + t.Errorf("%q should not be a regular file (mode=0x%x)", pythonPath, uint32(m)) + } + + p, err := exec.LookPath(pythonPath) + if err != nil { + t.Errorf("exec.LookPath(%q): %v", pythonPath, err) + } + if p != pythonPath { + t.Errorf("exec.LookPath(%q) = %q; want %q", pythonPath, p, pythonPath) + } +} + +func TestIllformedUTF16FileName(t *testing.T) { + dir := t.TempDir() + const sep = string(os.PathSeparator) + if !strings.HasSuffix(dir, sep) { + dir += sep + } + + // This UTF-16 file name is ill-formed as it contains low surrogates that are not preceded by high surrogates ([1:5]). + namew := []uint16{0x2e, 0xdc6d, 0xdc73, 0xdc79, 0xdc73, 0x30, 0x30, 0x30, 0x31, 0} + + // Create a file whose name contains unpaired surrogates. + // Use syscall.CreateFile instead of os.Create to simulate a file that is created by + // a non-Go program so the file name hasn't gone through syscall.UTF16FromString. + dirw := utf16.Encode([]rune(dir)) + pathw := append(dirw, namew...) + fd, err := syscall.CreateFile(&pathw[0], syscall.GENERIC_ALL, 0, nil, syscall.CREATE_NEW, 0, 0) + if err != nil { + t.Fatal(err) + } + syscall.CloseHandle(fd) + + name := syscall.UTF16ToString(namew) + path := filepath.Join(dir, name) + // Verify that os.Lstat can query the file. + fi, err := os.Lstat(path) + if err != nil { + t.Fatal(err) + } + if got := fi.Name(); got != name { + t.Errorf("got %q, want %q", got, name) + } + // Verify that File.Readdirnames lists the file. + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + files, err := f.Readdirnames(0) + f.Close() + if err != nil { + t.Fatal(err) + } + if !slices.Contains(files, name) { + t.Error("file not listed") + } + // Verify that os.RemoveAll can remove the directory + // and that it doesn't hang. + err = os.RemoveAll(dir) + if err != nil { + t.Error(err) + } +} + +func TestUTF16Alloc(t *testing.T) { + allowsPerRun := func(want int, f func()) { + t.Helper() + got := int(testing.AllocsPerRun(5, f)) + if got != want { + t.Errorf("got %d allocs, want %d", got, want) + } + } + allowsPerRun(1, func() { + syscall.UTF16ToString([]uint16{'a', 'b', 'c'}) + }) + allowsPerRun(1, func() { + syscall.UTF16FromString("abc") + }) +} + +func TestNewFileInvalid(t *testing.T) { + t.Parallel() + if f := os.NewFile(uintptr(syscall.InvalidHandle), "invalid"); f != nil { + t.Errorf("NewFile(InvalidHandle) got %v want nil", f) + } +} + +func TestReadDirPipe(t *testing.T) { + dir := `\\.\pipe\` + fi, err := os.Stat(dir) + if err != nil || !fi.IsDir() { + t.Skipf("%s is not a directory", dir) + } + _, err = os.ReadDir(dir) + if err != nil { + t.Errorf("ReadDir(%q) = %v", dir, err) + } +} + +func TestReadDirNoFileID(t *testing.T) { + *os.AllowReadDirFileID = false + defer func() { *os.AllowReadDirFileID = true }() + + dir := t.TempDir() + pathA := filepath.Join(dir, "a") + pathB := filepath.Join(dir, "b") + if err := os.WriteFile(pathA, nil, 0666); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(pathB, nil, 0666); err != nil { + t.Fatal(err) + } + + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + if len(files) != 2 { + t.Fatalf("ReadDir(%q) = %v; want 2 files", dir, files) + } + + // Check that os.SameFile works with files returned by os.ReadDir. + f1, err := files[0].Info() + if err != nil { + t.Fatal(err) + } + f2, err := files[1].Info() + if err != nil { + t.Fatal(err) + } + if !os.SameFile(f1, f1) { + t.Errorf("SameFile(%v, %v) = false; want true", f1, f1) + } + if !os.SameFile(f2, f2) { + t.Errorf("SameFile(%v, %v) = false; want true", f2, f2) + } + if os.SameFile(f1, f2) { + t.Errorf("SameFile(%v, %v) = true; want false", f1, f2) + } + + // Check that os.SameFile works with a mix of os.ReadDir and os.Stat files. + f1s, err := os.Stat(pathA) + if err != nil { + t.Fatal(err) + } + f2s, err := os.Stat(pathB) + if err != nil { + t.Fatal(err) + } + if !os.SameFile(f1, f1s) { + t.Errorf("SameFile(%v, %v) = false; want true", f1, f1s) + } + if !os.SameFile(f2, f2s) { + t.Errorf("SameFile(%v, %v) = false; want true", f2, f2s) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/path.go b/platform/dbops/binaries/go/go/src/os/path.go new file mode 100644 index 0000000000000000000000000000000000000000..6ac4cbe20f78d29eb1ce4d98e669303160921205 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path.go @@ -0,0 +1,85 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" +) + +// MkdirAll creates a directory named path, +// along with any necessary parents, and returns nil, +// or else returns an error. +// The permission bits perm (before umask) are used for all +// directories that MkdirAll creates. +// If path is already a directory, MkdirAll does nothing +// and returns nil. +func MkdirAll(path string, perm FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + + // Extract the parent folder from path by first removing any trailing + // path separator and then scanning backward until finding a path + // separator or reaching the beginning of the string. + i := len(path) - 1 + for i >= 0 && IsPathSeparator(path[i]) { + i-- + } + for i >= 0 && !IsPathSeparator(path[i]) { + i-- + } + if i < 0 { + i = 0 + } + + // If there is a parent directory, and it is not the volume name, + // recurse to ensure parent directory exists. + if parent := path[:i]; len(parent) > len(volumeName(path)) { + err = MkdirAll(parent, perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// RemoveAll removes path and any children it contains. +// It removes everything it can but returns the first error +// it encounters. If the path does not exist, RemoveAll +// returns nil (no error). +// If there is an error, it will be of type *PathError. +func RemoveAll(path string) error { + return removeAll(path) +} + +// endsWithDot reports whether the final component of path is ".". +func endsWithDot(path string) bool { + if path == "." { + return true + } + if len(path) >= 2 && path[len(path)-1] == '.' && IsPathSeparator(path[len(path)-2]) { + return true + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/os/path_plan9.go b/platform/dbops/binaries/go/go/src/os/path_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..f1c9dbc048c1f5ff4f64f708f2473b71452fc807 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path_plan9.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +const ( + PathSeparator = '/' // OS-specific path separator + PathListSeparator = '\000' // OS-specific path list separator +) + +// IsPathSeparator reports whether c is a directory separator character. +func IsPathSeparator(c uint8) bool { + return PathSeparator == c +} + +func volumeName(p string) string { + return "" +} diff --git a/platform/dbops/binaries/go/go/src/os/path_test.go b/platform/dbops/binaries/go/go/src/os/path_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2a4e9565dc26db8bef4368ed77142637db968cc9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path_test.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "internal/testenv" + . "os" + "path/filepath" + "runtime" + "syscall" + "testing" +) + +var isReadonlyError = func(error) bool { return false } + +func TestMkdirAll(t *testing.T) { + t.Parallel() + + tmpDir := TempDir() + path := tmpDir + "/_TestMkdirAll_/dir/./dir2" + err := MkdirAll(path, 0777) + if err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + defer RemoveAll(tmpDir + "/_TestMkdirAll_") + + // Already exists, should succeed. + err = MkdirAll(path, 0777) + if err != nil { + t.Fatalf("MkdirAll %q (second time): %s", path, err) + } + + // Make file. + fpath := path + "/file" + f, err := Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + defer f.Close() + + // Can't make directory named after file. + err = MkdirAll(fpath, 0777) + if err == nil { + t.Fatalf("MkdirAll %q: no error", fpath) + } + perr, ok := err.(*PathError) + if !ok { + t.Fatalf("MkdirAll %q returned %T, not *PathError", fpath, err) + } + if filepath.Clean(perr.Path) != filepath.Clean(fpath) { + t.Fatalf("MkdirAll %q returned wrong error path: %q not %q", fpath, filepath.Clean(perr.Path), filepath.Clean(fpath)) + } + + // Can't make subdirectory of file. + ffpath := fpath + "/subdir" + err = MkdirAll(ffpath, 0777) + if err == nil { + t.Fatalf("MkdirAll %q: no error", ffpath) + } + perr, ok = err.(*PathError) + if !ok { + t.Fatalf("MkdirAll %q returned %T, not *PathError", ffpath, err) + } + if filepath.Clean(perr.Path) != filepath.Clean(fpath) { + t.Fatalf("MkdirAll %q returned wrong error path: %q not %q", ffpath, filepath.Clean(perr.Path), filepath.Clean(fpath)) + } + + if runtime.GOOS == "windows" { + path := tmpDir + `\_TestMkdirAll_\dir\.\dir2\` + err := MkdirAll(path, 0777) + if err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + } +} + +func TestMkdirAllWithSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + tmpDir := t.TempDir() + dir := tmpDir + "/dir" + if err := Mkdir(dir, 0755); err != nil { + t.Fatalf("Mkdir %s: %s", dir, err) + } + + link := tmpDir + "/link" + if err := Symlink("dir", link); err != nil { + t.Fatalf("Symlink %s: %s", link, err) + } + + path := link + "/foo" + if err := MkdirAll(path, 0755); err != nil { + t.Errorf("MkdirAll %q: %s", path, err) + } +} + +func TestMkdirAllAtSlash(t *testing.T) { + switch runtime.GOOS { + case "android", "ios", "plan9", "windows": + t.Skipf("skipping on %s", runtime.GOOS) + } + if testenv.Builder() == "" { + t.Skipf("skipping non-hermetic test outside of Go builders") + } + + RemoveAll("/_go_os_test") + const dir = "/_go_os_test/dir" + err := MkdirAll(dir, 0777) + if err != nil { + pathErr, ok := err.(*PathError) + // common for users not to be able to write to / + if ok && (pathErr.Err == syscall.EACCES || isReadonlyError(pathErr.Err)) { + t.Skipf("could not create %v: %v", dir, err) + } + t.Fatalf(`MkdirAll "/_go_os_test/dir": %v, %s`, err, pathErr.Err) + } + RemoveAll("/_go_os_test") +} diff --git a/platform/dbops/binaries/go/go/src/os/path_unix.go b/platform/dbops/binaries/go/go/src/os/path_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..1c80fa91f8be4c973f797806308b29fb3cb157f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path_unix.go @@ -0,0 +1,75 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os + +const ( + PathSeparator = '/' // OS-specific path separator + PathListSeparator = ':' // OS-specific path list separator +) + +// IsPathSeparator reports whether c is a directory separator character. +func IsPathSeparator(c uint8) bool { + return PathSeparator == c +} + +// basename removes trailing slashes and the leading directory name from path name. +func basename(name string) string { + i := len(name) - 1 + // Remove trailing slashes + for ; i > 0 && name[i] == '/'; i-- { + name = name[:i] + } + // Remove leading directory name + for i--; i >= 0; i-- { + if name[i] == '/' { + name = name[i+1:] + break + } + } + + return name +} + +// splitPath returns the base name and parent directory. +func splitPath(path string) (string, string) { + // if no better parent is found, the path is relative from "here" + dirname := "." + + // Remove all but one leading slash. + for len(path) > 1 && path[0] == '/' && path[1] == '/' { + path = path[1:] + } + + i := len(path) - 1 + + // Remove trailing slashes. + for ; i > 0 && path[i] == '/'; i-- { + path = path[:i] + } + + // if no slashes in path, base is path + basename := path + + // Remove leading directory path + for i--; i >= 0; i-- { + if path[i] == '/' { + if i == 0 { + dirname = path[:1] + } else { + dirname = path[:i] + } + basename = path[i+1:] + break + } + } + + return dirname, basename +} + +func volumeName(p string) string { + return "" +} diff --git a/platform/dbops/binaries/go/go/src/os/path_windows.go b/platform/dbops/binaries/go/go/src/os/path_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..052202514825ec315be0f5e41b7ef5f92244b534 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path_windows.go @@ -0,0 +1,216 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +const ( + PathSeparator = '\\' // OS-specific path separator + PathListSeparator = ';' // OS-specific path list separator +) + +// IsPathSeparator reports whether c is a directory separator character. +func IsPathSeparator(c uint8) bool { + // NOTE: Windows accepts / as path separator. + return c == '\\' || c == '/' +} + +// basename removes trailing slashes and the leading +// directory name and drive letter from path name. +func basename(name string) string { + // Remove drive letter + if len(name) == 2 && name[1] == ':' { + name = "." + } else if len(name) > 2 && name[1] == ':' { + name = name[2:] + } + i := len(name) - 1 + // Remove trailing slashes + for ; i > 0 && (name[i] == '/' || name[i] == '\\'); i-- { + name = name[:i] + } + // Remove leading directory name + for i--; i >= 0; i-- { + if name[i] == '/' || name[i] == '\\' { + name = name[i+1:] + break + } + } + return name +} + +func isAbs(path string) (b bool) { + v := volumeName(path) + if v == "" { + return false + } + path = path[len(v):] + if path == "" { + return false + } + return IsPathSeparator(path[0]) +} + +func volumeName(path string) (v string) { + if len(path) < 2 { + return "" + } + // with drive letter + c := path[0] + if path[1] == ':' && + ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || + 'A' <= c && c <= 'Z') { + return path[:2] + } + // is it UNC + if l := len(path); l >= 5 && IsPathSeparator(path[0]) && IsPathSeparator(path[1]) && + !IsPathSeparator(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if IsPathSeparator(path[n]) { + n++ + // third, following something characters. its share name. + if !IsPathSeparator(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if IsPathSeparator(path[n]) { + break + } + } + return path[:n] + } + break + } + } + } + return "" +} + +func fromSlash(path string) string { + // Replace each '/' with '\\' if present + var pathbuf []byte + var lastSlash int + for i, b := range path { + if b == '/' { + if pathbuf == nil { + pathbuf = make([]byte, len(path)) + } + copy(pathbuf[lastSlash:], path[lastSlash:i]) + pathbuf[i] = '\\' + lastSlash = i + 1 + } + } + if pathbuf == nil { + return path + } + + copy(pathbuf[lastSlash:], path[lastSlash:]) + return string(pathbuf) +} + +func dirname(path string) string { + vol := volumeName(path) + i := len(path) - 1 + for i >= len(vol) && !IsPathSeparator(path[i]) { + i-- + } + dir := path[len(vol) : i+1] + last := len(dir) - 1 + if last > 0 && IsPathSeparator(dir[last]) { + dir = dir[:last] + } + if dir == "" { + dir = "." + } + return vol + dir +} + +// This is set via go:linkname on runtime.canUseLongPaths, and is true when the OS +// supports opting into proper long path handling without the need for fixups. +var canUseLongPaths bool + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation +func fixLongPath(path string) string { + if canUseLongPaths { + return path + } + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !isAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} diff --git a/platform/dbops/binaries/go/go/src/os/path_windows_test.go b/platform/dbops/binaries/go/go/src/os/path_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4e5e501d1f124cfc06bd3bc39c546576e3a68ec6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/path_windows_test.go @@ -0,0 +1,157 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "fmt" + "internal/syscall/windows" + "internal/testenv" + "os" + "path/filepath" + "strings" + "syscall" + "testing" +) + +func TestFixLongPath(t *testing.T) { + if os.CanUseLongPaths { + return + } + t.Parallel() + + // 248 is long enough to trigger the longer-than-248 checks in + // fixLongPath, but short enough not to make a path component + // longer than 255, which is illegal on Windows. (which + // doesn't really matter anyway, since this is purely a string + // function we're testing, and it's not actually being used to + // do a system call) + veryLong := "l" + strings.Repeat("o", 248) + "ng" + for _, test := range []struct{ in, want string }{ + // Short; unchanged: + {`C:\short.txt`, `C:\short.txt`}, + {`C:\`, `C:\`}, + {`C:`, `C:`}, + // The "long" substring is replaced by a looooooong + // string which triggers the rewriting. Except in the + // cases below where it doesn't. + {`C:\long\foo.txt`, `\\?\C:\long\foo.txt`}, + {`C:/long/foo.txt`, `\\?\C:\long\foo.txt`}, + {`C:\long\foo\\bar\.\baz\\`, `\\?\C:\long\foo\bar\baz`}, + {`\\unc\path`, `\\unc\path`}, + {`long.txt`, `long.txt`}, + {`C:long.txt`, `C:long.txt`}, + {`c:\long\..\bar\baz`, `c:\long\..\bar\baz`}, + {`\\?\c:\long\foo.txt`, `\\?\c:\long\foo.txt`}, + {`\\?\c:\long/foo.txt`, `\\?\c:\long/foo.txt`}, + } { + in := strings.ReplaceAll(test.in, "long", veryLong) + want := strings.ReplaceAll(test.want, "long", veryLong) + if got := os.FixLongPath(in); got != want { + got = strings.ReplaceAll(got, veryLong, "long") + t.Errorf("fixLongPath(%q) = %q; want %q", test.in, got, test.want) + } + } +} + +func TestMkdirAllLongPath(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + path := tmpDir + for i := 0; i < 100; i++ { + path += `\another-path-component` + } + if err := os.MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll(%q) failed; %v", path, err) + } + if err := os.RemoveAll(tmpDir); err != nil { + t.Fatalf("RemoveAll(%q) failed; %v", tmpDir, err) + } +} + +func TestMkdirAllExtendedLength(t *testing.T) { + t.Parallel() + tmpDir := t.TempDir() + + const prefix = `\\?\` + if len(tmpDir) < 4 || tmpDir[:4] != prefix { + fullPath, err := syscall.FullPath(tmpDir) + if err != nil { + t.Fatalf("FullPath(%q) fails: %v", tmpDir, err) + } + tmpDir = prefix + fullPath + } + path := tmpDir + `\dir\` + if err := os.MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll(%q) failed: %v", path, err) + } + + path = path + `.\dir2` + if err := os.MkdirAll(path, 0777); err == nil { + t.Fatalf("MkdirAll(%q) should have failed, but did not", path) + } +} + +func TestOpenRootSlash(t *testing.T) { + t.Parallel() + + tests := []string{ + `/`, + `\`, + } + + for _, test := range tests { + dir, err := os.Open(test) + if err != nil { + t.Fatalf("Open(%q) failed: %v", test, err) + } + dir.Close() + } +} + +func testMkdirAllAtRoot(t *testing.T, root string) { + // Create a unique-enough directory name in root. + base := fmt.Sprintf("%s-%d", t.Name(), os.Getpid()) + path := filepath.Join(root, base) + if err := os.MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll(%q) failed: %v", path, err) + } + // Clean up + if err := os.RemoveAll(path); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllExtendedLengthAtRoot(t *testing.T) { + if testenv.Builder() == "" { + t.Skipf("skipping non-hermetic test outside of Go builders") + } + + const prefix = `\\?\` + vol := filepath.VolumeName(t.TempDir()) + `\` + if len(vol) < 4 || vol[:4] != prefix { + vol = prefix + vol + } + testMkdirAllAtRoot(t, vol) +} + +func TestMkdirAllVolumeNameAtRoot(t *testing.T) { + if testenv.Builder() == "" { + t.Skipf("skipping non-hermetic test outside of Go builders") + } + + vol, err := syscall.UTF16PtrFromString(filepath.VolumeName(t.TempDir()) + `\`) + if err != nil { + t.Fatal(err) + } + const maxVolNameLen = 50 + var buf [maxVolNameLen]uint16 + err = windows.GetVolumeNameForVolumeMountPoint(vol, &buf[0], maxVolNameLen) + if err != nil { + t.Fatal(err) + } + volName := syscall.UTF16ToString(buf[:]) + testMkdirAllAtRoot(t, volName) +} diff --git a/platform/dbops/binaries/go/go/src/os/pipe2_unix.go b/platform/dbops/binaries/go/go/src/os/pipe2_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..2d293fdb4d9665fca9034e989e9bf127bc6f6d0a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/pipe2_unix.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package os + +import "syscall" + +// Pipe returns a connected pair of Files; reads from r return bytes written to w. +// It returns the files and an error, if any. +func Pipe() (r *File, w *File, err error) { + var p [2]int + + e := syscall.Pipe2(p[0:], syscall.O_CLOEXEC) + if e != nil { + return nil, nil, NewSyscallError("pipe2", e) + } + + return newFile(p[0], "|0", kindPipe), newFile(p[1], "|1", kindPipe), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/pipe_test.go b/platform/dbops/binaries/go/go/src/os/pipe_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a9e0c8bc8a9d2c71693681d1858f5facb56e5d6b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/pipe_test.go @@ -0,0 +1,478 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test broken pipes on Unix systems. +// +//go:build !plan9 && !js && !wasip1 + +package os_test + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "io" + "io/fs" + "os" + "os/exec" + "os/signal" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "time" +) + +func TestEPIPE(t *testing.T) { + // This test cannot be run in parallel because of a race similar + // to the one reported in https://go.dev/issue/22315. + // + // Even though the pipe is opened with O_CLOEXEC, if another test forks in + // between the call to os.Pipe and the call to r.Close, that child process can + // retain an open copy of r's file descriptor until it execs. If one of our + // Write calls occurs during that interval it can spuriously succeed, + // buffering the write to the child's copy of the pipe (even though the child + // will not actually read the buffered bytes). + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + + expect := syscall.EPIPE + if runtime.GOOS == "windows" { + // 232 is Windows error code ERROR_NO_DATA, "The pipe is being closed". + expect = syscall.Errno(232) + } + // Every time we write to the pipe we should get an EPIPE. + for i := 0; i < 20; i++ { + _, err = w.Write([]byte("hi")) + if err == nil { + t.Fatal("unexpected success of Write to broken pipe") + } + if pe, ok := err.(*fs.PathError); ok { + err = pe.Err + } + if se, ok := err.(*os.SyscallError); ok { + err = se.Err + } + if err != expect { + t.Errorf("iteration %d: got %v, expected %v", i, err, expect) + } + } +} + +func TestStdPipe(t *testing.T) { + switch runtime.GOOS { + case "windows": + t.Skip("Windows doesn't support SIGPIPE") + } + + if os.Getenv("GO_TEST_STD_PIPE_HELPER") != "" { + if os.Getenv("GO_TEST_STD_PIPE_HELPER_SIGNAL") != "" { + signal.Notify(make(chan os.Signal, 1), syscall.SIGPIPE) + } + switch os.Getenv("GO_TEST_STD_PIPE_HELPER") { + case "1": + os.Stdout.Write([]byte("stdout")) + case "2": + os.Stderr.Write([]byte("stderr")) + case "3": + if _, err := os.NewFile(3, "3").Write([]byte("3")); err == nil { + os.Exit(3) + } + default: + panic("unrecognized value for GO_TEST_STD_PIPE_HELPER") + } + // For stdout/stderr, we should have crashed with a broken pipe error. + // The caller will be looking for that exit status, + // so just exit normally here to cause a failure in the caller. + // For descriptor 3, a normal exit is expected. + os.Exit(0) + } + + testenv.MustHaveExec(t) + // This test cannot be run in parallel due to the same race as for TestEPIPE. + // (We expect a write to a closed pipe can fail, but a concurrent fork of a + // child process can cause the pipe to unexpectedly remain open.) + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + // Invoke the test program to run the test and write to a closed pipe. + // If sig is false: + // writing to stdout or stderr should cause an immediate SIGPIPE; + // writing to descriptor 3 should fail with EPIPE and then exit 0. + // If sig is true: + // all writes should fail with EPIPE and then exit 0. + for _, sig := range []bool{false, true} { + for dest := 1; dest < 4; dest++ { + cmd := testenv.Command(t, os.Args[0], "-test.run", "TestStdPipe") + cmd.Stdout = w + cmd.Stderr = w + cmd.ExtraFiles = []*os.File{w} + cmd.Env = append(os.Environ(), fmt.Sprintf("GO_TEST_STD_PIPE_HELPER=%d", dest)) + if sig { + cmd.Env = append(cmd.Env, "GO_TEST_STD_PIPE_HELPER_SIGNAL=1") + } + if err := cmd.Run(); err == nil { + if !sig && dest < 3 { + t.Errorf("unexpected success of write to closed pipe %d sig %t in child", dest, sig) + } + } else if ee, ok := err.(*exec.ExitError); !ok { + t.Errorf("unexpected exec error type %T: %v", err, err) + } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok { + t.Errorf("unexpected wait status type %T: %v", ee.Sys(), ee.Sys()) + } else if ws.Signaled() && ws.Signal() == syscall.SIGPIPE { + if sig || dest > 2 { + t.Errorf("unexpected SIGPIPE signal for descriptor %d sig %t", dest, sig) + } + } else { + t.Errorf("unexpected exit status %v for descriptor %d sig %t", err, dest, sig) + } + } + } + + // Test redirecting stdout but not stderr. Issue 40076. + cmd := testenv.Command(t, os.Args[0], "-test.run", "TestStdPipe") + cmd.Stdout = w + var stderr bytes.Buffer + cmd.Stderr = &stderr + cmd.Env = append(cmd.Environ(), "GO_TEST_STD_PIPE_HELPER=1") + if err := cmd.Run(); err == nil { + t.Errorf("unexpected success of write to closed stdout") + } else if ee, ok := err.(*exec.ExitError); !ok { + t.Errorf("unexpected exec error type %T: %v", err, err) + } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok { + t.Errorf("unexpected wait status type %T: %v", ee.Sys(), ee.Sys()) + } else if !ws.Signaled() || ws.Signal() != syscall.SIGPIPE { + t.Errorf("unexpected exit status %v for write to closed stdout", err) + } + if output := stderr.Bytes(); len(output) > 0 { + t.Errorf("unexpected output on stderr: %s", output) + } +} + +func testClosedPipeRace(t *testing.T, read bool) { + // This test cannot be run in parallel due to the same race as for TestEPIPE. + // (We expect a write to a closed pipe can fail, but a concurrent fork of a + // child process can cause the pipe to unexpectedly remain open.) + + limit := 1 + if !read { + // Get the amount we have to write to overload a pipe + // with no reader. + limit = 131073 + if b, err := os.ReadFile("/proc/sys/fs/pipe-max-size"); err == nil { + if i, err := strconv.Atoi(strings.TrimSpace(string(b))); err == nil { + limit = i + 1 + } + } + t.Logf("using pipe write limit of %d", limit) + } + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + // Close the read end of the pipe in a goroutine while we are + // writing to the write end, or vice-versa. + go func() { + // Give the main goroutine a chance to enter the Read or + // Write call. This is sloppy but the test will pass even + // if we close before the read/write. + time.Sleep(20 * time.Millisecond) + + var err error + if read { + err = r.Close() + } else { + err = w.Close() + } + if err != nil { + t.Error(err) + } + }() + + b := make([]byte, limit) + if read { + _, err = r.Read(b[:]) + } else { + _, err = w.Write(b[:]) + } + if err == nil { + t.Error("I/O on closed pipe unexpectedly succeeded") + } else if pe, ok := err.(*fs.PathError); !ok { + t.Errorf("I/O on closed pipe returned unexpected error type %T; expected fs.PathError", pe) + } else if pe.Err != fs.ErrClosed { + t.Errorf("got error %q but expected %q", pe.Err, fs.ErrClosed) + } else { + t.Logf("I/O returned expected error %q", err) + } +} + +func TestClosedPipeRaceRead(t *testing.T) { + testClosedPipeRace(t, true) +} + +func TestClosedPipeRaceWrite(t *testing.T) { + testClosedPipeRace(t, false) +} + +// Issue 20915: Reading on nonblocking fd should not return "waiting +// for unsupported file type." Currently it returns EAGAIN; it is +// possible that in the future it will simply wait for data. +func TestReadNonblockingFd(t *testing.T) { + switch runtime.GOOS { + case "windows": + t.Skip("Windows doesn't support SetNonblock") + } + if os.Getenv("GO_WANT_READ_NONBLOCKING_FD") == "1" { + fd := syscallDescriptor(os.Stdin.Fd()) + syscall.SetNonblock(fd, true) + defer syscall.SetNonblock(fd, false) + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + if perr, ok := err.(*fs.PathError); !ok || perr.Err != syscall.EAGAIN { + t.Fatalf("read on nonblocking stdin got %q, should have gotten EAGAIN", err) + } + } + os.Exit(0) + } + + testenv.MustHaveExec(t) + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$") + cmd.Env = append(cmd.Environ(), "GO_WANT_READ_NONBLOCKING_FD=1") + cmd.Stdin = r + output, err := cmd.CombinedOutput() + t.Logf("%s", output) + if err != nil { + t.Errorf("child process failed: %v", err) + } +} + +func TestCloseWithBlockingReadByNewFile(t *testing.T) { + t.Parallel() + + var p [2]syscallDescriptor + err := syscall.Pipe(p[:]) + if err != nil { + t.Fatal(err) + } + // os.NewFile returns a blocking mode file. + testCloseWithBlockingRead(t, os.NewFile(uintptr(p[0]), "reader"), os.NewFile(uintptr(p[1]), "writer")) +} + +func TestCloseWithBlockingReadByFd(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + // Calling Fd will put the file into blocking mode. + _ = r.Fd() + testCloseWithBlockingRead(t, r, w) +} + +// Test that we don't let a blocking read prevent a close. +func testCloseWithBlockingRead(t *testing.T, r, w *os.File) { + var ( + enteringRead = make(chan struct{}) + done = make(chan struct{}) + ) + go func() { + var b [1]byte + close(enteringRead) + _, err := r.Read(b[:]) + if err == nil { + t.Error("I/O on closed pipe unexpectedly succeeded") + } + + if pe, ok := err.(*fs.PathError); ok { + err = pe.Err + } + if err != io.EOF && err != fs.ErrClosed { + t.Errorf("got %v, expected EOF or closed", err) + } + close(done) + }() + + // Give the goroutine a chance to enter the Read + // or Write call. This is sloppy but the test will + // pass even if we close before the read/write. + <-enteringRead + time.Sleep(20 * time.Millisecond) + + if err := r.Close(); err != nil { + t.Error(err) + } + // r.Close has completed, but since we assume r is in blocking mode that + // probably didn't unblock the call to r.Read. Close w to unblock it. + w.Close() + <-done +} + +func TestPipeEOF(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + + testPipeEOF(t, r, w) +} + +// testPipeEOF tests that when the write side of a pipe or FIFO is closed, +// a blocked Read call on the reader side returns io.EOF. +// +// This scenario previously failed to unblock the Read call on darwin. +// (See https://go.dev/issue/24164.) +func testPipeEOF(t *testing.T, r io.ReadCloser, w io.WriteCloser) { + // parkDelay is an arbitrary delay we wait for a pipe-reader goroutine to park + // before issuing the corresponding write. The test should pass no matter what + // delay we use, but with a longer delay is has a higher chance of detecting + // poller bugs. + parkDelay := 10 * time.Millisecond + if testing.Short() { + parkDelay = 100 * time.Microsecond + } + writerDone := make(chan struct{}) + defer func() { + if err := r.Close(); err != nil { + t.Errorf("error closing reader: %v", err) + } + <-writerDone + }() + + write := make(chan int, 1) + go func() { + defer close(writerDone) + + for i := range write { + time.Sleep(parkDelay) + _, err := fmt.Fprintf(w, "line %d\n", i) + if err != nil { + t.Errorf("error writing to fifo: %v", err) + return + } + } + + time.Sleep(parkDelay) + if err := w.Close(); err != nil { + t.Errorf("error closing writer: %v", err) + } + }() + + rbuf := bufio.NewReader(r) + for i := 0; i < 3; i++ { + write <- i + b, err := rbuf.ReadBytes('\n') + if err != nil { + t.Fatal(err) + } + t.Logf("%s\n", bytes.TrimSpace(b)) + } + + close(write) + b, err := rbuf.ReadBytes('\n') + if err != io.EOF || len(b) != 0 { + t.Errorf(`ReadBytes: %q, %v; want "", io.EOF`, b, err) + } +} + +// Issue 24481. +func TestFdRace(t *testing.T) { + // This test starts 100 simultaneous goroutines, which could bury a more + // interesting stack if this or some other test happens to panic. It is also + // nearly instantaneous, so any latency benefit from running it in parallel + // would be minimal. + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + var wg sync.WaitGroup + call := func() { + defer wg.Done() + w.Fd() + } + + const tries = 100 + for i := 0; i < tries; i++ { + wg.Add(1) + go call() + } + wg.Wait() +} + +func TestFdReadRace(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + const count = 10 + + c := make(chan bool, 1) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + var buf [count]byte + r.SetReadDeadline(time.Now().Add(time.Minute)) + c <- true + if _, err := r.Read(buf[:]); os.IsTimeout(err) { + t.Error("read timed out") + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-c + // Give the other goroutine a chance to enter the Read. + // It doesn't matter if this occasionally fails, the test + // will still pass, it just won't test anything. + time.Sleep(10 * time.Millisecond) + r.Fd() + + // The bug was that Fd would hang until Read timed out. + // If the bug is fixed, then writing to w and closing r here + // will cause the Read to exit before the timeout expires. + w.Write(make([]byte, count)) + r.Close() + }() + + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/os/pipe_unix.go b/platform/dbops/binaries/go/go/src/os/pipe_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..2eb11a04cb2d929ac9f11769f0a18a59ab8b1cba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/pipe_unix.go @@ -0,0 +1,28 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin + +package os + +import "syscall" + +// Pipe returns a connected pair of Files; reads from r return bytes written to w. +// It returns the files and an error, if any. +func Pipe() (r *File, w *File, err error) { + var p [2]int + + // See ../syscall/exec.go for description of lock. + syscall.ForkLock.RLock() + e := syscall.Pipe(p[0:]) + if e != nil { + syscall.ForkLock.RUnlock() + return nil, nil, NewSyscallError("pipe", e) + } + syscall.CloseOnExec(p[0]) + syscall.CloseOnExec(p[1]) + syscall.ForkLock.RUnlock() + + return newFile(p[0], "|0", kindPipe), newFile(p[1], "|1", kindPipe), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/pipe_wasm.go b/platform/dbops/binaries/go/go/src/os/pipe_wasm.go new file mode 100644 index 0000000000000000000000000000000000000000..87a29b1f71d97244958f2b9bff9ef00735faecab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/pipe_wasm.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package os + +import "syscall" + +// Pipe returns a connected pair of Files; reads from r return bytes written to w. +// It returns the files and an error, if any. +func Pipe() (r *File, w *File, err error) { + // Neither GOOS=js nor GOOS=wasip1 have pipes. + return nil, nil, NewSyscallError("pipe", syscall.ENOSYS) +} diff --git a/platform/dbops/binaries/go/go/src/os/proc.go b/platform/dbops/binaries/go/go/src/os/proc.go new file mode 100644 index 0000000000000000000000000000000000000000..3aae5680eea58b22d0e6b49bd9afb3416b1e9052 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/proc.go @@ -0,0 +1,80 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Process etc. + +package os + +import ( + "internal/testlog" + "runtime" + "syscall" +) + +// Args hold the command-line arguments, starting with the program name. +var Args []string + +func init() { + if runtime.GOOS == "windows" { + // Initialized in exec_windows.go. + return + } + Args = runtime_args() +} + +func runtime_args() []string // in package runtime + +// Getuid returns the numeric user id of the caller. +// +// On Windows, it returns -1. +func Getuid() int { return syscall.Getuid() } + +// Geteuid returns the numeric effective user id of the caller. +// +// On Windows, it returns -1. +func Geteuid() int { return syscall.Geteuid() } + +// Getgid returns the numeric group id of the caller. +// +// On Windows, it returns -1. +func Getgid() int { return syscall.Getgid() } + +// Getegid returns the numeric effective group id of the caller. +// +// On Windows, it returns -1. +func Getegid() int { return syscall.Getegid() } + +// Getgroups returns a list of the numeric ids of groups that the caller belongs to. +// +// On Windows, it returns syscall.EWINDOWS. See the os/user package +// for a possible alternative. +func Getgroups() ([]int, error) { + gids, e := syscall.Getgroups() + return gids, NewSyscallError("getgroups", e) +} + +// Exit causes the current program to exit with the given status code. +// Conventionally, code zero indicates success, non-zero an error. +// The program terminates immediately; deferred functions are not run. +// +// For portability, the status code should be in the range [0, 125]. +func Exit(code int) { + if code == 0 && testlog.PanicOnExit0() { + // We were told to panic on calls to os.Exit(0). + // This is used to fail tests that make an early + // unexpected call to os.Exit(0). + panic("unexpected call to os.Exit(0) during test") + } + + // Inform the runtime that os.Exit is being called. If -race is + // enabled, this will give race detector a chance to fail the + // program (racy programs do not have the right to finish + // successfully). If coverage is enabled, then this call will + // enable us to write out a coverage data file. + runtime_beforeExit(code) + + syscall.Exit(code) +} + +func runtime_beforeExit(exitCode int) // implemented in runtime diff --git a/platform/dbops/binaries/go/go/src/os/rawconn.go b/platform/dbops/binaries/go/go/src/os/rawconn.go new file mode 100644 index 0000000000000000000000000000000000000000..14a495d9c0a59d66e2eff0f00633d0f007423404 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/rawconn.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package os + +import ( + "runtime" +) + +// rawConn implements syscall.RawConn. +type rawConn struct { + file *File +} + +func (c *rawConn) Control(f func(uintptr)) error { + if err := c.file.checkValid("SyscallConn.Control"); err != nil { + return err + } + err := c.file.pfd.RawControl(f) + runtime.KeepAlive(c.file) + return err +} + +func (c *rawConn) Read(f func(uintptr) bool) error { + if err := c.file.checkValid("SyscallConn.Read"); err != nil { + return err + } + err := c.file.pfd.RawRead(f) + runtime.KeepAlive(c.file) + return err +} + +func (c *rawConn) Write(f func(uintptr) bool) error { + if err := c.file.checkValid("SyscallConn.Write"); err != nil { + return err + } + err := c.file.pfd.RawWrite(f) + runtime.KeepAlive(c.file) + return err +} + +func newRawConn(file *File) (*rawConn, error) { + return &rawConn{file: file}, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/rawconn_test.go b/platform/dbops/binaries/go/go/src/os/rawconn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8aae7ae68422017cfc6da6826e1555820b124a7e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/rawconn_test.go @@ -0,0 +1,66 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test use of raw connections. +// +//go:build !plan9 && !js && !wasip1 + +package os_test + +import ( + "os" + "syscall" + "testing" +) + +func TestRawConnReadWrite(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + rconn, err := r.SyscallConn() + if err != nil { + t.Fatal(err) + } + wconn, err := w.SyscallConn() + if err != nil { + t.Fatal(err) + } + + var operr error + err = wconn.Write(func(s uintptr) bool { + _, operr = syscall.Write(syscallDescriptor(s), []byte{'b'}) + return operr != syscall.EAGAIN + }) + if err != nil { + t.Fatal(err) + } + if operr != nil { + t.Fatal(err) + } + + var n int + buf := make([]byte, 1) + err = rconn.Read(func(s uintptr) bool { + n, operr = syscall.Read(syscallDescriptor(s), buf) + return operr != syscall.EAGAIN + }) + if err != nil { + t.Fatal(err) + } + if operr != nil { + t.Fatal(operr) + } + if n != 1 { + t.Errorf("read %d bytes, expected 1", n) + } + if buf[0] != 'b' { + t.Errorf("read %q, expected %q", buf, "b") + } +} diff --git a/platform/dbops/binaries/go/go/src/os/read_test.go b/platform/dbops/binaries/go/go/src/os/read_test.go new file mode 100644 index 0000000000000000000000000000000000000000..18f7d54734825dcf91c57d910c0ce8281f062811 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/read_test.go @@ -0,0 +1,138 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + . "os" + "path/filepath" + "runtime" + "testing" +) + +func checkNamedSize(t *testing.T, path string, size int64) { + dir, err := Stat(path) + if err != nil { + t.Fatalf("Stat %q (looking for size %d): %s", path, size, err) + } + if dir.Size() != size { + t.Errorf("Stat %q: size %d want %d", path, dir.Size(), size) + } +} + +func TestReadFile(t *testing.T) { + t.Parallel() + + filename := "rumpelstilzchen" + contents, err := ReadFile(filename) + if err == nil { + t.Fatalf("ReadFile %s: error expected, none found", filename) + } + + filename = "read_test.go" + contents, err = ReadFile(filename) + if err != nil { + t.Fatalf("ReadFile %s: %v", filename, err) + } + + checkNamedSize(t, filename, int64(len(contents))) +} + +func TestWriteFile(t *testing.T) { + t.Parallel() + + f, err := CreateTemp("", "ioutil-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer Remove(f.Name()) + + msg := "Programming today is a race between software engineers striving to " + + "build bigger and better idiot-proof programs, and the Universe trying " + + "to produce bigger and better idiots. So far, the Universe is winning." + + if err := WriteFile(f.Name(), []byte(msg), 0644); err != nil { + t.Fatalf("WriteFile %s: %v", f.Name(), err) + } + + data, err := ReadFile(f.Name()) + if err != nil { + t.Fatalf("ReadFile %s: %v", f.Name(), err) + } + + if string(data) != msg { + t.Fatalf("ReadFile: wrong data:\nhave %q\nwant %q", string(data), msg) + } +} + +func TestReadOnlyWriteFile(t *testing.T) { + if Getuid() == 0 { + t.Skipf("Root can write to read-only files anyway, so skip the read-only test.") + } + if runtime.GOOS == "wasip1" { + t.Skip("no support for file permissions on " + runtime.GOOS) + } + t.Parallel() + + // We don't want to use CreateTemp directly, since that opens a file for us as 0600. + tempDir, err := MkdirTemp("", t.Name()) + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tempDir) + filename := filepath.Join(tempDir, "blurp.txt") + + shmorp := []byte("shmorp") + florp := []byte("florp") + err = WriteFile(filename, shmorp, 0444) + if err != nil { + t.Fatalf("WriteFile %s: %v", filename, err) + } + err = WriteFile(filename, florp, 0444) + if err == nil { + t.Fatalf("Expected an error when writing to read-only file %s", filename) + } + got, err := ReadFile(filename) + if err != nil { + t.Fatalf("ReadFile %s: %v", filename, err) + } + if !bytes.Equal(got, shmorp) { + t.Fatalf("want %s, got %s", shmorp, got) + } +} + +func TestReadDir(t *testing.T) { + t.Parallel() + + dirname := "rumpelstilzchen" + _, err := ReadDir(dirname) + if err == nil { + t.Fatalf("ReadDir %s: error expected, none found", dirname) + } + + dirname = "." + list, err := ReadDir(dirname) + if err != nil { + t.Fatalf("ReadDir %s: %v", dirname, err) + } + + foundFile := false + foundSubDir := false + for _, dir := range list { + switch { + case !dir.IsDir() && dir.Name() == "read_test.go": + foundFile = true + case dir.IsDir() && dir.Name() == "exec": + foundSubDir = true + } + } + if !foundFile { + t.Fatalf("ReadDir %s: read_test.go file not found", dirname) + } + if !foundSubDir { + t.Fatalf("ReadDir %s: exec directory not found", dirname) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/readfrom_linux_test.go b/platform/dbops/binaries/go/go/src/os/readfrom_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..93f78032e737146813f1cb24ff19aff6b2605a48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/readfrom_linux_test.go @@ -0,0 +1,825 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "internal/poll" + "internal/testpty" + "io" + "math/rand" + "net" + . "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "time" + + "golang.org/x/net/nettest" +) + +func TestCopyFileRange(t *testing.T) { + sizes := []int{ + 1, + 42, + 1025, + syscall.Getpagesize() + 1, + 32769, + } + t.Run("Basic", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testCopyFileRange(t, int64(size), -1) + }) + } + }) + t.Run("Limited", func(t *testing.T) { + t.Run("OneLess", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testCopyFileRange(t, int64(size), int64(size)-1) + }) + } + }) + t.Run("Half", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testCopyFileRange(t, int64(size), int64(size)/2) + }) + } + }) + t.Run("More", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testCopyFileRange(t, int64(size), int64(size)+7) + }) + } + }) + }) + t.Run("DoesntTryInAppendMode", func(t *testing.T) { + dst, src, data, hook := newCopyFileRangeTest(t, 42) + + dst2, err := OpenFile(dst.Name(), O_RDWR|O_APPEND, 0755) + if err != nil { + t.Fatal(err) + } + defer dst2.Close() + + if _, err := io.Copy(dst2, src); err != nil { + t.Fatal(err) + } + if hook.called { + t.Fatal("called poll.CopyFileRange for destination in O_APPEND mode") + } + mustSeekStart(t, dst2) + mustContainData(t, dst2, data) // through traditional means + }) + t.Run("CopyFileItself", func(t *testing.T) { + hook := hookCopyFileRange(t) + + f, err := CreateTemp("", "file-readfrom-itself-test") + if err != nil { + t.Fatalf("failed to create tmp file: %v", err) + } + t.Cleanup(func() { + f.Close() + Remove(f.Name()) + }) + + data := []byte("hello world!") + if _, err := f.Write(data); err != nil { + t.Fatalf("failed to create and feed the file: %v", err) + } + + if err := f.Sync(); err != nil { + t.Fatalf("failed to save the file: %v", err) + } + + // Rewind it. + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to rewind the file: %v", err) + } + + // Read data from the file itself. + if _, err := io.Copy(f, f); err != nil { + t.Fatalf("failed to read from the file: %v", err) + } + + if !hook.called || hook.written != 0 || hook.handled || hook.err != nil { + t.Fatalf("poll.CopyFileRange should be called and return the EINVAL error, but got hook.called=%t, hook.err=%v", hook.called, hook.err) + } + + // Rewind it. + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to rewind the file: %v", err) + } + + data2, err := io.ReadAll(f) + if err != nil { + t.Fatalf("failed to read from the file: %v", err) + } + + // It should wind up a double of the original data. + if strings.Repeat(string(data), 2) != string(data2) { + t.Fatalf("data mismatch: %s != %s", string(data), string(data2)) + } + }) + t.Run("NotRegular", func(t *testing.T) { + t.Run("BothPipes", func(t *testing.T) { + hook := hookCopyFileRange(t) + + pr1, pw1, err := Pipe() + if err != nil { + t.Fatal(err) + } + defer pr1.Close() + defer pw1.Close() + + pr2, pw2, err := Pipe() + if err != nil { + t.Fatal(err) + } + defer pr2.Close() + defer pw2.Close() + + // The pipe is empty, and PIPE_BUF is large enough + // for this, by (POSIX) definition, so there is no + // need for an additional goroutine. + data := []byte("hello") + if _, err := pw1.Write(data); err != nil { + t.Fatal(err) + } + pw1.Close() + + n, err := io.Copy(pw2, pr1) + if err != nil { + t.Fatal(err) + } + if n != int64(len(data)) { + t.Fatalf("transferred %d, want %d", n, len(data)) + } + if !hook.called { + t.Fatalf("should have called poll.CopyFileRange") + } + pw2.Close() + mustContainData(t, pr2, data) + }) + t.Run("DstPipe", func(t *testing.T) { + dst, src, data, hook := newCopyFileRangeTest(t, 255) + dst.Close() + + pr, pw, err := Pipe() + if err != nil { + t.Fatal(err) + } + defer pr.Close() + defer pw.Close() + + n, err := io.Copy(pw, src) + if err != nil { + t.Fatal(err) + } + if n != int64(len(data)) { + t.Fatalf("transferred %d, want %d", n, len(data)) + } + if !hook.called { + t.Fatalf("should have called poll.CopyFileRange") + } + pw.Close() + mustContainData(t, pr, data) + }) + t.Run("SrcPipe", func(t *testing.T) { + dst, src, data, hook := newCopyFileRangeTest(t, 255) + src.Close() + + pr, pw, err := Pipe() + if err != nil { + t.Fatal(err) + } + defer pr.Close() + defer pw.Close() + + // The pipe is empty, and PIPE_BUF is large enough + // for this, by (POSIX) definition, so there is no + // need for an additional goroutine. + if _, err := pw.Write(data); err != nil { + t.Fatal(err) + } + pw.Close() + + n, err := io.Copy(dst, pr) + if err != nil { + t.Fatal(err) + } + if n != int64(len(data)) { + t.Fatalf("transferred %d, want %d", n, len(data)) + } + if !hook.called { + t.Fatalf("should have called poll.CopyFileRange") + } + mustSeekStart(t, dst) + mustContainData(t, dst, data) + }) + }) + t.Run("Nil", func(t *testing.T) { + var nilFile *File + anyFile, err := CreateTemp("", "") + if err != nil { + t.Fatal(err) + } + defer Remove(anyFile.Name()) + defer anyFile.Close() + + if _, err := io.Copy(nilFile, nilFile); err != ErrInvalid { + t.Errorf("io.Copy(nilFile, nilFile) = %v, want %v", err, ErrInvalid) + } + if _, err := io.Copy(anyFile, nilFile); err != ErrInvalid { + t.Errorf("io.Copy(anyFile, nilFile) = %v, want %v", err, ErrInvalid) + } + if _, err := io.Copy(nilFile, anyFile); err != ErrInvalid { + t.Errorf("io.Copy(nilFile, anyFile) = %v, want %v", err, ErrInvalid) + } + + if _, err := nilFile.ReadFrom(nilFile); err != ErrInvalid { + t.Errorf("nilFile.ReadFrom(nilFile) = %v, want %v", err, ErrInvalid) + } + if _, err := anyFile.ReadFrom(nilFile); err != ErrInvalid { + t.Errorf("anyFile.ReadFrom(nilFile) = %v, want %v", err, ErrInvalid) + } + if _, err := nilFile.ReadFrom(anyFile); err != ErrInvalid { + t.Errorf("nilFile.ReadFrom(anyFile) = %v, want %v", err, ErrInvalid) + } + }) +} + +func TestSpliceFile(t *testing.T) { + sizes := []int{ + 1, + 42, + 1025, + syscall.Getpagesize() + 1, + 32769, + } + t.Run("Basic-TCP", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "tcp", int64(size), -1) + }) + } + }) + t.Run("Basic-Unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "unix", int64(size), -1) + }) + } + }) + t.Run("TCP-To-TTY", func(t *testing.T) { + testSpliceToTTY(t, "tcp", 32768) + }) + t.Run("Unix-To-TTY", func(t *testing.T) { + testSpliceToTTY(t, "unix", 32768) + }) + t.Run("Limited", func(t *testing.T) { + t.Run("OneLess-TCP", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "tcp", int64(size), int64(size)-1) + }) + } + }) + t.Run("OneLess-Unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "unix", int64(size), int64(size)-1) + }) + } + }) + t.Run("Half-TCP", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "tcp", int64(size), int64(size)/2) + }) + } + }) + t.Run("Half-Unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "unix", int64(size), int64(size)/2) + }) + } + }) + t.Run("More-TCP", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "tcp", int64(size), int64(size)+1) + }) + } + }) + t.Run("More-Unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSpliceFile(t, "unix", int64(size), int64(size)+1) + }) + } + }) + }) +} + +func testSpliceFile(t *testing.T, proto string, size, limit int64) { + dst, src, data, hook, cleanup := newSpliceFileTest(t, proto, size) + defer cleanup() + + // If we have a limit, wrap the reader. + var ( + r io.Reader + lr *io.LimitedReader + ) + if limit >= 0 { + lr = &io.LimitedReader{N: limit, R: src} + r = lr + if limit < int64(len(data)) { + data = data[:limit] + } + } else { + r = src + } + // Now call ReadFrom (through io.Copy), which will hopefully call poll.Splice + n, err := io.Copy(dst, r) + if err != nil { + t.Fatal(err) + } + + // We should have called poll.Splice with the right file descriptor arguments. + if n > 0 && !hook.called { + t.Fatal("expected to called poll.Splice") + } + if hook.called && hook.dstfd != int(dst.Fd()) { + t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, dst.Fd()) + } + sc, ok := src.(syscall.Conn) + if !ok { + t.Fatalf("server Conn is not a syscall.Conn") + } + rc, err := sc.SyscallConn() + if err != nil { + t.Fatalf("server Conn SyscallConn error: %v", err) + } + if err = rc.Control(func(fd uintptr) { + if hook.called && hook.srcfd != int(fd) { + t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, int(fd)) + } + }); err != nil { + t.Fatalf("server Conn Control error: %v", err) + } + + // Check that the offsets after the transfer make sense, that the size + // of the transfer was reported correctly, and that the destination + // file contains exactly the bytes we expect it to contain. + dstoff, err := dst.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + if dstoff != int64(len(data)) { + t.Errorf("dstoff = %d, want %d", dstoff, len(data)) + } + if n != int64(len(data)) { + t.Errorf("short ReadFrom: wrote %d bytes, want %d", n, len(data)) + } + mustSeekStart(t, dst) + mustContainData(t, dst, data) + + // If we had a limit, check that it was updated. + if lr != nil { + if want := limit - n; lr.N != want { + t.Fatalf("didn't update limit correctly: got %d, want %d", lr.N, want) + } + } +} + +// Issue #59041. +func testSpliceToTTY(t *testing.T, proto string, size int64) { + var wg sync.WaitGroup + + // Call wg.Wait as the final deferred function, + // because the goroutines may block until some of + // the deferred Close calls. + defer wg.Wait() + + pty, ttyName, err := testpty.Open() + if err != nil { + t.Skipf("skipping test because pty open failed: %v", err) + } + defer pty.Close() + + // Open the tty directly, rather than via OpenFile. + // This bypasses the non-blocking support and is required + // to recreate the problem in the issue (#59041). + ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0) + if err != nil { + t.Skipf("skipping test becaused failed to open tty: %v", err) + } + defer syscall.Close(ttyFD) + + tty := NewFile(uintptr(ttyFD), "tty") + defer tty.Close() + + client, server := createSocketPair(t, proto) + + data := bytes.Repeat([]byte{'a'}, int(size)) + + wg.Add(1) + go func() { + defer wg.Done() + // The problem (issue #59041) occurs when writing + // a series of blocks of data. It does not occur + // when all the data is written at once. + for i := 0; i < len(data); i += 1024 { + if _, err := client.Write(data[i : i+1024]); err != nil { + // If we get here because the client was + // closed, skip the error. + if !errors.Is(err, net.ErrClosed) { + t.Errorf("error writing to socket: %v", err) + } + return + } + } + client.Close() + }() + + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 32) + for { + if _, err := pty.Read(buf); err != nil { + if err != io.EOF && !errors.Is(err, ErrClosed) { + // An error here doesn't matter for + // our test. + t.Logf("error reading from pty: %v", err) + } + return + } + } + }() + + // Close Client to wake up the writing goroutine if necessary. + defer client.Close() + + _, err = io.Copy(tty, server) + if err != nil { + t.Fatal(err) + } +} + +func testCopyFileRange(t *testing.T, size int64, limit int64) { + dst, src, data, hook := newCopyFileRangeTest(t, size) + + // If we have a limit, wrap the reader. + var ( + realsrc io.Reader + lr *io.LimitedReader + ) + if limit >= 0 { + lr = &io.LimitedReader{N: limit, R: src} + realsrc = lr + if limit < int64(len(data)) { + data = data[:limit] + } + } else { + realsrc = src + } + + // Now call ReadFrom (through io.Copy), which will hopefully call + // poll.CopyFileRange. + n, err := io.Copy(dst, realsrc) + if err != nil { + t.Fatal(err) + } + + // If we didn't have a limit, we should have called poll.CopyFileRange + // with the right file descriptor arguments. + if limit > 0 && !hook.called { + t.Fatal("never called poll.CopyFileRange") + } + if hook.called && hook.dstfd != int(dst.Fd()) { + t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, dst.Fd()) + } + if hook.called && hook.srcfd != int(src.Fd()) { + t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, src.Fd()) + } + + // Check that the offsets after the transfer make sense, that the size + // of the transfer was reported correctly, and that the destination + // file contains exactly the bytes we expect it to contain. + dstoff, err := dst.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + srcoff, err := src.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + if dstoff != srcoff { + t.Errorf("offsets differ: dstoff = %d, srcoff = %d", dstoff, srcoff) + } + if dstoff != int64(len(data)) { + t.Errorf("dstoff = %d, want %d", dstoff, len(data)) + } + if n != int64(len(data)) { + t.Errorf("short ReadFrom: wrote %d bytes, want %d", n, len(data)) + } + mustSeekStart(t, dst) + mustContainData(t, dst, data) + + // If we had a limit, check that it was updated. + if lr != nil { + if want := limit - n; lr.N != want { + t.Fatalf("didn't update limit correctly: got %d, want %d", lr.N, want) + } + } +} + +// newCopyFileRangeTest initializes a new test for copy_file_range. +// +// It creates source and destination files, and populates the source file +// with random data of the specified size. It also hooks package os' call +// to poll.CopyFileRange and returns the hook so it can be inspected. +func newCopyFileRangeTest(t *testing.T, size int64) (dst, src *File, data []byte, hook *copyFileRangeHook) { + t.Helper() + + hook = hookCopyFileRange(t) + tmp := t.TempDir() + + src, err := Create(filepath.Join(tmp, "src")) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { src.Close() }) + + dst, err = Create(filepath.Join(tmp, "dst")) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { dst.Close() }) + + // Populate the source file with data, then rewind it, so it can be + // consumed by copy_file_range(2). + prng := rand.New(rand.NewSource(time.Now().Unix())) + data = make([]byte, size) + prng.Read(data) + if _, err := src.Write(data); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + return dst, src, data, hook +} + +// newSpliceFileTest initializes a new test for splice. +// +// It creates source sockets and destination file, and populates the source sockets +// with random data of the specified size. It also hooks package os' call +// to poll.Splice and returns the hook so it can be inspected. +func newSpliceFileTest(t *testing.T, proto string, size int64) (*File, net.Conn, []byte, *spliceFileHook, func()) { + t.Helper() + + hook := hookSpliceFile(t) + + client, server := createSocketPair(t, proto) + + dst, err := CreateTemp(t.TempDir(), "dst-splice-file-test") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { dst.Close() }) + + randSeed := time.Now().Unix() + t.Logf("random data seed: %d\n", randSeed) + prng := rand.New(rand.NewSource(randSeed)) + data := make([]byte, size) + prng.Read(data) + + done := make(chan struct{}) + go func() { + client.Write(data) + client.Close() + close(done) + }() + + return dst, server, data, hook, func() { <-done } +} + +// mustContainData ensures that the specified file contains exactly the +// specified data. +func mustContainData(t *testing.T, f *File, data []byte) { + t.Helper() + + got := make([]byte, len(data)) + if _, err := io.ReadFull(f, got); err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, data) { + t.Fatalf("didn't get the same data back from %s", f.Name()) + } + if _, err := f.Read(make([]byte, 1)); err != io.EOF { + t.Fatalf("not at EOF") + } +} + +func mustSeekStart(t *testing.T, f *File) { + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } +} + +func hookCopyFileRange(t *testing.T) *copyFileRangeHook { + h := new(copyFileRangeHook) + h.install() + t.Cleanup(h.uninstall) + return h +} + +type copyFileRangeHook struct { + called bool + dstfd int + srcfd int + remain int64 + + written int64 + handled bool + err error + + original func(dst, src *poll.FD, remain int64) (int64, bool, error) +} + +func (h *copyFileRangeHook) install() { + h.original = *PollCopyFileRangeP + *PollCopyFileRangeP = func(dst, src *poll.FD, remain int64) (int64, bool, error) { + h.called = true + h.dstfd = dst.Sysfd + h.srcfd = src.Sysfd + h.remain = remain + h.written, h.handled, h.err = h.original(dst, src, remain) + return h.written, h.handled, h.err + } +} + +func (h *copyFileRangeHook) uninstall() { + *PollCopyFileRangeP = h.original +} + +func hookSpliceFile(t *testing.T) *spliceFileHook { + h := new(spliceFileHook) + h.install() + t.Cleanup(h.uninstall) + return h +} + +type spliceFileHook struct { + called bool + dstfd int + srcfd int + remain int64 + + written int64 + handled bool + sc string + err error + + original func(dst, src *poll.FD, remain int64) (int64, bool, string, error) +} + +func (h *spliceFileHook) install() { + h.original = *PollSpliceFile + *PollSpliceFile = func(dst, src *poll.FD, remain int64) (int64, bool, string, error) { + h.called = true + h.dstfd = dst.Sysfd + h.srcfd = src.Sysfd + h.remain = remain + h.written, h.handled, h.sc, h.err = h.original(dst, src, remain) + return h.written, h.handled, h.sc, h.err + } +} + +func (h *spliceFileHook) uninstall() { + *PollSpliceFile = h.original +} + +// On some kernels copy_file_range fails on files in /proc. +func TestProcCopy(t *testing.T) { + t.Parallel() + + const cmdlineFile = "/proc/self/cmdline" + cmdline, err := ReadFile(cmdlineFile) + if err != nil { + t.Skipf("can't read /proc file: %v", err) + } + in, err := Open(cmdlineFile) + if err != nil { + t.Fatal(err) + } + defer in.Close() + outFile := filepath.Join(t.TempDir(), "cmdline") + out, err := Create(outFile) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, in); err != nil { + t.Fatal(err) + } + if err := out.Close(); err != nil { + t.Fatal(err) + } + copy, err := ReadFile(outFile) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(cmdline, copy) { + t.Errorf("copy of %q got %q want %q\n", cmdlineFile, copy, cmdline) + } +} + +func TestGetPollFDAndNetwork(t *testing.T) { + t.Run("tcp4", func(t *testing.T) { testGetPollFDAndNetwork(t, "tcp4") }) + t.Run("unix", func(t *testing.T) { testGetPollFDAndNetwork(t, "unix") }) +} + +func testGetPollFDAndNetwork(t *testing.T, proto string) { + _, server := createSocketPair(t, proto) + sc, ok := server.(syscall.Conn) + if !ok { + t.Fatalf("server Conn is not a syscall.Conn") + } + rc, err := sc.SyscallConn() + if err != nil { + t.Fatalf("server SyscallConn error: %v", err) + } + if err = rc.Control(func(fd uintptr) { + pfd, network := GetPollFDAndNetwork(server) + if pfd == nil { + t.Fatalf("GetPollFDAndNetwork didn't return poll.FD") + } + if string(network) != proto { + t.Fatalf("GetPollFDAndNetwork returned wrong network, got: %s, want: %s", network, proto) + } + if pfd.Sysfd != int(fd) { + t.Fatalf("GetPollFDAndNetwork returned wrong poll.FD, got: %d, want: %d", pfd.Sysfd, int(fd)) + } + if !pfd.IsStream { + t.Fatalf("expected IsStream to be true") + } + if err = pfd.Init(proto, true); err == nil { + t.Fatalf("Init should have failed with the initialized poll.FD and return EEXIST error") + } + }); err != nil { + t.Fatalf("server Control error: %v", err) + } +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/platform/dbops/binaries/go/go/src/os/removeall_at.go b/platform/dbops/binaries/go/go/src/os/removeall_at.go new file mode 100644 index 0000000000000000000000000000000000000000..774ca15823bbc141c8c5293de729889065e0902d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/removeall_at.go @@ -0,0 +1,191 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package os + +import ( + "internal/syscall/unix" + "io" + "syscall" +) + +func removeAll(path string) error { + if path == "" { + // fail silently to retain compatibility with previous behavior + // of RemoveAll. See issue 28830. + return nil + } + + // The rmdir system call does not permit removing ".", + // so we don't permit it either. + if endsWithDot(path) { + return &PathError{Op: "RemoveAll", Path: path, Err: syscall.EINVAL} + } + + // Simple case: if Remove works, we're done. + err := Remove(path) + if err == nil || IsNotExist(err) { + return nil + } + + // RemoveAll recurses by deleting the path base from + // its parent directory + parentDir, base := splitPath(path) + + parent, err := Open(parentDir) + if IsNotExist(err) { + // If parent does not exist, base cannot exist. Fail silently + return nil + } + if err != nil { + return err + } + defer parent.Close() + + if err := removeAllFrom(parent, base); err != nil { + if pathErr, ok := err.(*PathError); ok { + pathErr.Path = parentDir + string(PathSeparator) + pathErr.Path + err = pathErr + } + return err + } + return nil +} + +func removeAllFrom(parent *File, base string) error { + parentFd := int(parent.Fd()) + // Simple case: if Unlink (aka remove) works, we're done. + err := ignoringEINTR(func() error { + return unix.Unlinkat(parentFd, base, 0) + }) + if err == nil || IsNotExist(err) { + return nil + } + + // EISDIR means that we have a directory, and we need to + // remove its contents. + // EPERM or EACCES means that we don't have write permission on + // the parent directory, but this entry might still be a directory + // whose contents need to be removed. + // Otherwise just return the error. + if err != syscall.EISDIR && err != syscall.EPERM && err != syscall.EACCES { + return &PathError{Op: "unlinkat", Path: base, Err: err} + } + uErr := err + + // Remove the directory's entries. + var recurseErr error + for { + const reqSize = 1024 + var respSize int + + // Open the directory to recurse into + file, err := openDirAt(parentFd, base) + if err != nil { + if IsNotExist(err) { + return nil + } + if err == syscall.ENOTDIR { + // Not a directory; return the error from the unix.Unlinkat. + return &PathError{Op: "unlinkat", Path: base, Err: uErr} + } + recurseErr = &PathError{Op: "openfdat", Path: base, Err: err} + break + } + + for { + numErr := 0 + + names, readErr := file.Readdirnames(reqSize) + // Errors other than EOF should stop us from continuing. + if readErr != nil && readErr != io.EOF { + file.Close() + if IsNotExist(readErr) { + return nil + } + return &PathError{Op: "readdirnames", Path: base, Err: readErr} + } + + respSize = len(names) + for _, name := range names { + err := removeAllFrom(file, name) + if err != nil { + if pathErr, ok := err.(*PathError); ok { + pathErr.Path = base + string(PathSeparator) + pathErr.Path + } + numErr++ + if recurseErr == nil { + recurseErr = err + } + } + } + + // If we can delete any entry, break to start new iteration. + // Otherwise, we discard current names, get next entries and try deleting them. + if numErr != reqSize { + break + } + } + + // Removing files from the directory may have caused + // the OS to reshuffle it. Simply calling Readdirnames + // again may skip some entries. The only reliable way + // to avoid this is to close and re-open the + // directory. See issue 20841. + file.Close() + + // Finish when the end of the directory is reached + if respSize < reqSize { + break + } + } + + // Remove the directory itself. + unlinkError := ignoringEINTR(func() error { + return unix.Unlinkat(parentFd, base, unix.AT_REMOVEDIR) + }) + if unlinkError == nil || IsNotExist(unlinkError) { + return nil + } + + if recurseErr != nil { + return recurseErr + } + return &PathError{Op: "unlinkat", Path: base, Err: unlinkError} +} + +// openDirAt opens a directory name relative to the directory referred to by +// the file descriptor dirfd. If name is anything but a directory (this +// includes a symlink to one), it should return an error. Other than that this +// should act like openFileNolog. +// +// This acts like openFileNolog rather than OpenFile because +// we are going to (try to) remove the file. +// The contents of this file are not relevant for test caching. +func openDirAt(dirfd int, name string) (*File, error) { + var r int + for { + var e error + r, e = unix.Openat(dirfd, name, O_RDONLY|syscall.O_CLOEXEC|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0) + if e == nil { + break + } + + // See comment in openFileNolog. + if e == syscall.EINTR { + continue + } + + return nil, e + } + + if !supportsCloseOnExec { + syscall.CloseOnExec(r) + } + + // We use kindNoPoll because we know that this is a directory. + return newFile(r, name, kindNoPoll), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/removeall_noat.go b/platform/dbops/binaries/go/go/src/os/removeall_noat.go new file mode 100644 index 0000000000000000000000000000000000000000..2b8a7727f4f3e1b00644b3a0dfced8e7f8d25043 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/removeall_noat.go @@ -0,0 +1,142 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package os + +import ( + "io" + "runtime" + "syscall" +) + +func removeAll(path string) error { + if path == "" { + // fail silently to retain compatibility with previous behavior + // of RemoveAll. See issue 28830. + return nil + } + + // The rmdir system call permits removing "." on Plan 9, + // so we don't permit it to remain consistent with the + // "at" implementation of RemoveAll. + if endsWithDot(path) { + return &PathError{Op: "RemoveAll", Path: path, Err: syscall.EINVAL} + } + + // Simple case: if Remove works, we're done. + err := Remove(path) + if err == nil || IsNotExist(err) { + return nil + } + + // Otherwise, is this a directory we need to recurse into? + dir, serr := Lstat(path) + if serr != nil { + if serr, ok := serr.(*PathError); ok && (IsNotExist(serr.Err) || serr.Err == syscall.ENOTDIR) { + return nil + } + return serr + } + if !dir.IsDir() { + // Not a directory; return the error from Remove. + return err + } + + // Remove contents & return first error. + err = nil + for { + fd, err := Open(path) + if err != nil { + if IsNotExist(err) { + // Already deleted by someone else. + return nil + } + return err + } + + const reqSize = 1024 + var names []string + var readErr error + + for { + numErr := 0 + names, readErr = fd.Readdirnames(reqSize) + + for _, name := range names { + err1 := RemoveAll(path + string(PathSeparator) + name) + if err == nil { + err = err1 + } + if err1 != nil { + numErr++ + } + } + + // If we can delete any entry, break to start new iteration. + // Otherwise, we discard current names, get next entries and try deleting them. + if numErr != reqSize { + break + } + } + + // Removing files from the directory may have caused + // the OS to reshuffle it. Simply calling Readdirnames + // again may skip some entries. The only reliable way + // to avoid this is to close and re-open the + // directory. See issue 20841. + fd.Close() + + if readErr == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = readErr + } + if len(names) == 0 { + break + } + + // We don't want to re-open unnecessarily, so if we + // got fewer than request names from Readdirnames, try + // simply removing the directory now. If that + // succeeds, we are done. + if len(names) < reqSize { + err1 := Remove(path) + if err1 == nil || IsNotExist(err1) { + return nil + } + + if err != nil { + // We got some error removing the + // directory contents, and since we + // read fewer names than we requested + // there probably aren't more files to + // remove. Don't loop around to read + // the directory again. We'll probably + // just get the same error. + return err + } + } + } + + // Remove directory. + err1 := Remove(path) + if err1 == nil || IsNotExist(err1) { + return nil + } + if runtime.GOOS == "windows" && IsPermission(err1) { + if fs, err := Stat(path); err == nil { + if err = Chmod(path, FileMode(0200|int(fs.Mode()))); err == nil { + err1 = Remove(path) + } + } + } + if err == nil { + err = err1 + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/os/removeall_test.go b/platform/dbops/binaries/go/go/src/os/removeall_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c0b2dd6505608c4f39d1b4827cf771f1b04dfac5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/removeall_test.go @@ -0,0 +1,506 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "fmt" + "internal/testenv" + . "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" +) + +func TestRemoveAll(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + if err := RemoveAll(""); err != nil { + t.Errorf("RemoveAll(\"\"): %v; want nil", err) + } + + file := filepath.Join(tmpDir, "file") + path := filepath.Join(tmpDir, "_TestRemoveAll_") + fpath := filepath.Join(path, "file") + dpath := filepath.Join(path, "dir") + + // Make a regular file and remove + fd, err := Create(file) + if err != nil { + t.Fatalf("create %q: %s", file, err) + } + fd.Close() + if err = RemoveAll(file); err != nil { + t.Fatalf("RemoveAll %q (first): %s", file, err) + } + if _, err = Lstat(file); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (first)", file) + } + + // Make directory with 1 file and remove. + if err := MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + fd, err = Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q (second): %s", path, err) + } + if _, err = Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (second)", path) + } + + // Make directory with file and subdirectory and remove. + if err = MkdirAll(dpath, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", dpath, err) + } + fd, err = Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + fd, err = Create(dpath + "/file") + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q (third): %s", path, err) + } + if _, err := Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (third)", path) + } + + // Chmod is not supported under Windows or wasip1 and test fails as root. + if runtime.GOOS != "windows" && runtime.GOOS != "wasip1" && Getuid() != 0 { + // Make directory with file and subdirectory and trigger error. + if err = MkdirAll(dpath, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", dpath, err) + } + + for _, s := range []string{fpath, dpath + "/file1", path + "/zzz"} { + fd, err = Create(s) + if err != nil { + t.Fatalf("create %q: %s", s, err) + } + fd.Close() + } + if err = Chmod(dpath, 0); err != nil { + t.Fatalf("Chmod %q 0: %s", dpath, err) + } + + // No error checking here: either RemoveAll + // will or won't be able to remove dpath; + // either way we want to see if it removes fpath + // and path/zzz. Reasons why RemoveAll might + // succeed in removing dpath as well include: + // * running as root + // * running on a file system without permissions (FAT) + RemoveAll(path) + Chmod(dpath, 0777) + + for _, s := range []string{fpath, path + "/zzz"} { + if _, err = Lstat(s); err == nil { + t.Fatalf("Lstat %q succeeded after partial RemoveAll", s) + } + } + } + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q after partial RemoveAll: %s", path, err) + } + if _, err = Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (final)", path) + } +} + +// Test RemoveAll on a large directory. +func TestRemoveAllLarge(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + t.Parallel() + + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "_TestRemoveAllLarge_") + + // Make directory with 1000 files and remove. + if err := MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + for i := 0; i < 1000; i++ { + fpath := fmt.Sprintf("%s/file%d", path, i) + fd, err := Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + } + if err := RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q: %s", path, err) + } + if _, err := Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll", path) + } +} + +func TestRemoveAllLongPath(t *testing.T) { + switch runtime.GOOS { + case "aix", "darwin", "ios", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "illumos", "solaris": + break + default: + t.Skip("skipping for not implemented platforms") + } + + prevDir, err := Getwd() + if err != nil { + t.Fatalf("Could not get wd: %s", err) + } + + startPath, err := MkdirTemp("", "TestRemoveAllLongPath-") + if err != nil { + t.Fatalf("Could not create TempDir: %s", err) + } + defer RemoveAll(startPath) + + err = Chdir(startPath) + if err != nil { + t.Fatalf("Could not chdir %s: %s", startPath, err) + } + + // Removing paths with over 4096 chars commonly fails + for i := 0; i < 41; i++ { + name := strings.Repeat("a", 100) + + err = Mkdir(name, 0755) + if err != nil { + t.Fatalf("Could not mkdir %s: %s", name, err) + } + + err = Chdir(name) + if err != nil { + t.Fatalf("Could not chdir %s: %s", name, err) + } + } + + err = Chdir(prevDir) + if err != nil { + t.Fatalf("Could not chdir %s: %s", prevDir, err) + } + + err = RemoveAll(startPath) + if err != nil { + t.Errorf("RemoveAll could not remove long file path %s: %s", startPath, err) + } +} + +func TestRemoveAllDot(t *testing.T) { + prevDir, err := Getwd() + if err != nil { + t.Fatalf("Could not get wd: %s", err) + } + tempDir, err := MkdirTemp("", "TestRemoveAllDot-") + if err != nil { + t.Fatalf("Could not create TempDir: %s", err) + } + defer RemoveAll(tempDir) + + err = Chdir(tempDir) + if err != nil { + t.Fatalf("Could not chdir to tempdir: %s", err) + } + + err = RemoveAll(".") + if err == nil { + t.Errorf("RemoveAll succeed to remove .") + } + + err = Chdir(prevDir) + if err != nil { + t.Fatalf("Could not chdir %s: %s", prevDir, err) + } +} + +func TestRemoveAllDotDot(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + subdir := filepath.Join(tempDir, "x") + subsubdir := filepath.Join(subdir, "y") + if err := MkdirAll(subsubdir, 0777); err != nil { + t.Fatal(err) + } + if err := RemoveAll(filepath.Join(subsubdir, "..")); err != nil { + t.Error(err) + } + for _, dir := range []string{subsubdir, subdir} { + if _, err := Stat(dir); err == nil { + t.Errorf("%s: exists after RemoveAll", dir) + } + } +} + +// Issue #29178. +func TestRemoveReadOnlyDir(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + subdir := filepath.Join(tempDir, "x") + if err := Mkdir(subdir, 0); err != nil { + t.Fatal(err) + } + + // If an error occurs make it more likely that removing the + // temporary directory will succeed. + defer Chmod(subdir, 0777) + + if err := RemoveAll(subdir); err != nil { + t.Fatal(err) + } + + if _, err := Stat(subdir); err == nil { + t.Error("subdirectory was not removed") + } +} + +// Issue #29983. +func TestRemoveAllButReadOnlyAndPathError(t *testing.T) { + switch runtime.GOOS { + case "js", "wasip1", "windows": + t.Skipf("skipping test on %s", runtime.GOOS) + } + + if Getuid() == 0 { + t.Skip("skipping test when running as root") + } + + t.Parallel() + + tempDir := t.TempDir() + dirs := []string{ + "a", + "a/x", + "a/x/1", + "b", + "b/y", + "b/y/2", + "c", + "c/z", + "c/z/3", + } + readonly := []string{ + "b", + } + inReadonly := func(d string) bool { + for _, ro := range readonly { + if d == ro { + return true + } + dd, _ := filepath.Split(d) + if filepath.Clean(dd) == ro { + return true + } + } + return false + } + + for _, dir := range dirs { + if err := Mkdir(filepath.Join(tempDir, dir), 0777); err != nil { + t.Fatal(err) + } + } + for _, dir := range readonly { + d := filepath.Join(tempDir, dir) + if err := Chmod(d, 0555); err != nil { + t.Fatal(err) + } + + // Defer changing the mode back so that the deferred + // RemoveAll(tempDir) can succeed. + defer Chmod(d, 0777) + } + + err := RemoveAll(tempDir) + if err == nil { + t.Fatal("RemoveAll succeeded unexpectedly") + } + + // The error should be of type *PathError. + // see issue 30491 for details. + if pathErr, ok := err.(*PathError); ok { + want := filepath.Join(tempDir, "b", "y") + if pathErr.Path != want { + t.Errorf("RemoveAll(%q): err.Path=%q, want %q", tempDir, pathErr.Path, want) + } + } else { + t.Errorf("RemoveAll(%q): error has type %T, want *fs.PathError", tempDir, err) + } + + for _, dir := range dirs { + _, err := Stat(filepath.Join(tempDir, dir)) + if inReadonly(dir) { + if err != nil { + t.Errorf("file %q was deleted but should still exist", dir) + } + } else { + if err == nil { + t.Errorf("file %q still exists but should have been deleted", dir) + } + } + } +} + +func TestRemoveUnreadableDir(t *testing.T) { + switch runtime.GOOS { + case "js": + t.Skipf("skipping test on %s", runtime.GOOS) + } + + if Getuid() == 0 { + t.Skip("skipping test when running as root") + } + + t.Parallel() + + tempDir := t.TempDir() + target := filepath.Join(tempDir, "d0", "d1", "d2") + if err := MkdirAll(target, 0755); err != nil { + t.Fatal(err) + } + if err := Chmod(target, 0300); err != nil { + t.Fatal(err) + } + if err := RemoveAll(filepath.Join(tempDir, "d0")); err != nil { + t.Fatal(err) + } +} + +// Issue 29921 +func TestRemoveAllWithMoreErrorThanReqSize(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + t.Parallel() + + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "_TestRemoveAllWithMoreErrorThanReqSize_") + + // Make directory with 1025 read-only files. + if err := MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + for i := 0; i < 1025; i++ { + fpath := filepath.Join(path, fmt.Sprintf("file%d", i)) + fd, err := Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + } + + // Make the parent directory read-only. On some platforms, this is what + // prevents Remove from removing the files within that directory. + if err := Chmod(path, 0555); err != nil { + t.Fatal(err) + } + defer Chmod(path, 0755) + + // This call should not hang, even on a platform that disallows file deletion + // from read-only directories. + err := RemoveAll(path) + + if Getuid() == 0 { + // On many platforms, root can remove files from read-only directories. + return + } + if err == nil { + if runtime.GOOS == "windows" || runtime.GOOS == "wasip1" { + // Marking a directory as read-only in Windows does not prevent the RemoveAll + // from creating or removing files within it. + // + // For wasip1, there is no support for file permissions so we cannot prevent + // RemoveAll from removing the files. + return + } + t.Fatal("RemoveAll() = nil; want error") + } + + dir, err := Open(path) + if err != nil { + t.Fatal(err) + } + defer dir.Close() + + names, _ := dir.Readdirnames(1025) + if len(names) < 1025 { + t.Fatalf("RemoveAll() unexpectedly removed %d read-only files from that directory", 1025-len(names)) + } +} + +func TestRemoveAllNoFcntl(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + const env = "GO_TEST_REMOVE_ALL_NO_FCNTL" + if dir := Getenv(env); dir != "" { + if err := RemoveAll(dir); err != nil { + t.Fatal(err) + } + return + } + + // Only test on Linux so that we can assume we have strace. + // The code is OS-independent so if it passes on Linux + // it should pass on other Unix systems. + if runtime.GOOS != "linux" { + t.Skipf("skipping test on %s", runtime.GOOS) + } + if _, err := Stat("/bin/strace"); err != nil { + t.Skipf("skipping test because /bin/strace not found: %v", err) + } + me, err := Executable() + if err != nil { + t.Skipf("skipping because Executable failed: %v", err) + } + + // Create 100 directories. + // The test is that we can remove them without calling fcntl + // on each one. + tmpdir := t.TempDir() + subdir := filepath.Join(tmpdir, "subdir") + if err := Mkdir(subdir, 0o755); err != nil { + t.Fatal(err) + } + for i := 0; i < 100; i++ { + subsubdir := filepath.Join(subdir, strconv.Itoa(i)) + if err := Mkdir(filepath.Join(subdir, strconv.Itoa(i)), 0o755); err != nil { + t.Fatal(err) + } + if err := WriteFile(filepath.Join(subsubdir, "file"), nil, 0o644); err != nil { + t.Fatal(err) + } + } + + cmd := testenv.Command(t, "/bin/strace", "-f", "-e", "fcntl", me, "-test.run=^TestRemoveAllNoFcntl$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, env+"="+subdir) + out, err := cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Fatal(err) + } + + if got := bytes.Count(out, []byte("fcntl")); got >= 100 { + t.Errorf("found %d fcntl calls, want < 100", got) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/stat.go b/platform/dbops/binaries/go/go/src/os/stat.go new file mode 100644 index 0000000000000000000000000000000000000000..11d9efa4573f02d935e27b23466a35b13f8f6729 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "internal/testlog" + +// Stat returns a FileInfo describing the named file. +// If there is an error, it will be of type *PathError. +func Stat(name string) (FileInfo, error) { + testlog.Stat(name) + return statNolog(name) +} + +// Lstat returns a FileInfo describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +// +// On Windows, if the file is a reparse point that is a surrogate for another +// named entity (such as a symbolic link or mounted folder), the returned +// FileInfo describes the reparse point, and makes no attempt to resolve it. +func Lstat(name string) (FileInfo, error) { + testlog.Stat(name) + return lstatNolog(name) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_aix.go b/platform/dbops/binaries/go/go/src/os/stat_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..a37c9fdae41aee2933450f47519d4c01f22fca2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_aix.go @@ -0,0 +1,51 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = int64(fs.sys.Size) + fs.modTime = stTimespecToTime(fs.sys.Mtim) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +func stTimespecToTime(ts syscall.StTimespec_t) time.Time { + return time.Unix(int64(ts.Sec), int64(ts.Nsec)) +} + +// For testing. +func atime(fi FileInfo) time.Time { + return stTimespecToTime(fi.Sys().(*syscall.Stat_t).Atim) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_darwin.go b/platform/dbops/binaries/go/go/src/os/stat_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..b92ffd4a0a6ff4a0b51e048832da7a7d998579f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_darwin.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtimespec.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK, syscall.S_IFWHT: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atimespec.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_dragonfly.go b/platform/dbops/binaries/go/go/src/os/stat_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..316c26c7cab7af1e10ed3be1ace47d51d01c2b9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_dragonfly.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtim.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atim.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_freebsd.go b/platform/dbops/binaries/go/go/src/os/stat_freebsd.go new file mode 100644 index 0000000000000000000000000000000000000000..919ee44dd6b503b2c39690251b7a5e597ebc354c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_freebsd.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtimespec.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atimespec.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_js.go b/platform/dbops/binaries/go/go/src/os/stat_js.go new file mode 100644 index 0000000000000000000000000000000000000000..a137172e66de6f019089ba974371bdc67ab523ac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_js.go @@ -0,0 +1,50 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtime, fs.sys.MtimeNsec) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + st := fi.Sys().(*syscall.Stat_t) + return time.Unix(st.Atime, st.AtimeNsec) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_linux.go b/platform/dbops/binaries/go/go/src/os/stat_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..316c26c7cab7af1e10ed3be1ace47d51d01c2b9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_linux.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtim.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atim.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_netbsd.go b/platform/dbops/binaries/go/go/src/os/stat_netbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..919ee44dd6b503b2c39690251b7a5e597ebc354c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtimespec.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atimespec.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_openbsd.go b/platform/dbops/binaries/go/go/src/os/stat_openbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..316c26c7cab7af1e10ed3be1ace47d51d01c2b9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_openbsd.go @@ -0,0 +1,47 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtim.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atim.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_plan9.go b/platform/dbops/binaries/go/go/src/os/stat_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..a5e9901379aeac52c68c94682c5f903945bac0e2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_plan9.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +const bitSize16 = 2 + +func fileInfoFromStat(d *syscall.Dir) *fileStat { + fs := &fileStat{ + name: d.Name, + size: d.Length, + modTime: time.Unix(int64(d.Mtime), 0), + sys: d, + } + fs.mode = FileMode(d.Mode & 0777) + if d.Mode&syscall.DMDIR != 0 { + fs.mode |= ModeDir + } + if d.Mode&syscall.DMAPPEND != 0 { + fs.mode |= ModeAppend + } + if d.Mode&syscall.DMEXCL != 0 { + fs.mode |= ModeExclusive + } + if d.Mode&syscall.DMTMP != 0 { + fs.mode |= ModeTemporary + } + // Consider all files not served by #M as device files. + if d.Type != 'M' { + fs.mode |= ModeDevice + } + // Consider all files served by #c as character device files. + if d.Type == 'c' { + fs.mode |= ModeCharDevice + } + return fs +} + +// arg is an open *File or a path string. +func dirstat(arg any) (*syscall.Dir, error) { + var name string + var err error + + size := syscall.STATFIXLEN + 16*4 + + for i := 0; i < 2; i++ { + buf := make([]byte, bitSize16+size) + + var n int + switch a := arg.(type) { + case *File: + name = a.name + if err := a.incref("fstat"); err != nil { + return nil, err + } + n, err = syscall.Fstat(a.fd, buf) + a.decref() + case string: + name = a + n, err = syscall.Stat(a, buf) + default: + panic("phase error in dirstat") + } + + if n < bitSize16 { + return nil, &PathError{Op: "stat", Path: name, Err: err} + } + + // Pull the real size out of the stat message. + size = int(uint16(buf[0]) | uint16(buf[1])<<8) + + // If the stat message is larger than our buffer we will + // go around the loop and allocate one that is big enough. + if size <= n { + d, err := syscall.UnmarshalDir(buf[:n]) + if err != nil { + return nil, &PathError{Op: "stat", Path: name, Err: err} + } + return d, nil + } + + } + + if err == nil { + err = syscall.ErrBadStat + } + + return nil, &PathError{Op: "stat", Path: name, Err: err} +} + +// statNolog implements Stat for Plan 9. +func statNolog(name string) (FileInfo, error) { + d, err := dirstat(name) + if err != nil { + return nil, err + } + return fileInfoFromStat(d), nil +} + +// lstatNolog implements Lstat for Plan 9. +func lstatNolog(name string) (FileInfo, error) { + return statNolog(name) +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(int64(fi.Sys().(*syscall.Dir).Atime), 0) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_solaris.go b/platform/dbops/binaries/go/go/src/os/stat_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..4e00ecb075f3a5ac157ae7503d3cc707a1b4147d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +// These constants aren't in the syscall package, which is frozen. +// Values taken from golang.org/x/sys/unix. +const ( + _S_IFNAM = 0x5000 + _S_IFDOOR = 0xd000 + _S_IFPORT = 0xe000 +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = fs.sys.Size + fs.modTime = time.Unix(fs.sys.Mtim.Unix()) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + case _S_IFNAM, _S_IFDOOR, _S_IFPORT: + fs.mode |= ModeIrregular + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(fi.Sys().(*syscall.Stat_t).Atim.Unix()) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_test.go b/platform/dbops/binaries/go/go/src/os/stat_test.go new file mode 100644 index 0000000000000000000000000000000000000000..96019699aa7bb59c3618b3b5a1fcf6e8a699f598 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_test.go @@ -0,0 +1,296 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "internal/testenv" + "io/fs" + "os" + "path/filepath" + "testing" +) + +// testStatAndLstat verifies that all os.Stat, os.Lstat os.File.Stat and os.Readdir work. +func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCheck func(*testing.T, string, fs.FileInfo)) { + // test os.Stat + sfi, err := os.Stat(path) + if err != nil { + t.Error(err) + return + } + statCheck(t, path, sfi) + + // test os.Lstat + lsfi, err := os.Lstat(path) + if err != nil { + t.Error(err) + return + } + lstatCheck(t, path, lsfi) + + if isLink { + if os.SameFile(sfi, lsfi) { + t.Errorf("stat and lstat of %q should not be the same", path) + } + } else { + if !os.SameFile(sfi, lsfi) { + t.Errorf("stat and lstat of %q should be the same", path) + } + } + + // test os.File.Stat + f, err := os.Open(path) + if err != nil { + t.Error(err) + return + } + defer f.Close() + + sfi2, err := f.Stat() + if err != nil { + t.Error(err) + return + } + statCheck(t, path, sfi2) + + if !os.SameFile(sfi, sfi2) { + t.Errorf("stat of open %q file and stat of %q should be the same", path, path) + } + + if isLink { + if os.SameFile(sfi2, lsfi) { + t.Errorf("stat of opened %q file and lstat of %q should not be the same", path, path) + } + } else { + if !os.SameFile(sfi2, lsfi) { + t.Errorf("stat of opened %q file and lstat of %q should be the same", path, path) + } + } + + // test fs.FileInfo returned by os.Readdir + if len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) { + // skip os.Readdir test of directories with slash at the end + return + } + parentdir := filepath.Dir(path) + parent, err := os.Open(parentdir) + if err != nil { + t.Error(err) + return + } + defer parent.Close() + + fis, err := parent.Readdir(-1) + if err != nil { + t.Error(err) + return + } + var lsfi2 fs.FileInfo + base := filepath.Base(path) + for _, fi2 := range fis { + if fi2.Name() == base { + lsfi2 = fi2 + break + } + } + if lsfi2 == nil { + t.Errorf("failed to find %q in its parent", path) + return + } + lstatCheck(t, path, lsfi2) + + if !os.SameFile(lsfi, lsfi2) { + t.Errorf("lstat of %q file in %q directory and %q should be the same", lsfi2.Name(), parentdir, path) + } +} + +// testIsDir verifies that fi refers to directory. +func testIsDir(t *testing.T, path string, fi fs.FileInfo) { + t.Helper() + if !fi.IsDir() { + t.Errorf("%q should be a directory", path) + } + if fi.Mode()&fs.ModeSymlink != 0 { + t.Errorf("%q should not be a symlink", path) + } +} + +// testIsSymlink verifies that fi refers to symlink. +func testIsSymlink(t *testing.T, path string, fi fs.FileInfo) { + t.Helper() + if fi.IsDir() { + t.Errorf("%q should not be a directory", path) + } + if fi.Mode()&fs.ModeSymlink == 0 { + t.Errorf("%q should be a symlink", path) + } +} + +// testIsFile verifies that fi refers to file. +func testIsFile(t *testing.T, path string, fi fs.FileInfo) { + t.Helper() + if fi.IsDir() { + t.Errorf("%q should not be a directory", path) + } + if fi.Mode()&fs.ModeSymlink != 0 { + t.Errorf("%q should not be a symlink", path) + } +} + +func testDirStats(t *testing.T, path string) { + testStatAndLstat(t, path, false, testIsDir, testIsDir) +} + +func testFileStats(t *testing.T, path string) { + testStatAndLstat(t, path, false, testIsFile, testIsFile) +} + +func testSymlinkStats(t *testing.T, path string, isdir bool) { + if isdir { + testStatAndLstat(t, path, true, testIsDir, testIsSymlink) + } else { + testStatAndLstat(t, path, true, testIsFile, testIsSymlink) + } +} + +func testSymlinkSameFile(t *testing.T, path, link string) { + pathfi, err := os.Stat(path) + if err != nil { + t.Error(err) + return + } + + linkfi, err := os.Stat(link) + if err != nil { + t.Error(err) + return + } + if !os.SameFile(pathfi, linkfi) { + t.Errorf("os.Stat(%q) and os.Stat(%q) are not the same file", path, link) + } + + linkfi, err = os.Lstat(link) + if err != nil { + t.Error(err) + return + } + if os.SameFile(pathfi, linkfi) { + t.Errorf("os.Stat(%q) and os.Lstat(%q) are the same file", path, link) + } +} + +func testSymlinkSameFileOpen(t *testing.T, link string) { + f, err := os.Open(link) + if err != nil { + t.Error(err) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Error(err) + return + } + + fi2, err := os.Stat(link) + if err != nil { + t.Error(err) + return + } + + if !os.SameFile(fi, fi2) { + t.Errorf("os.Open(%q).Stat() and os.Stat(%q) are not the same file", link, link) + } +} + +func TestDirAndSymlinkStats(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + tmpdir := t.TempDir() + dir := filepath.Join(tmpdir, "dir") + if err := os.Mkdir(dir, 0777); err != nil { + t.Fatal(err) + } + testDirStats(t, dir) + + dirlink := filepath.Join(tmpdir, "link") + if err := os.Symlink(dir, dirlink); err != nil { + t.Fatal(err) + } + testSymlinkStats(t, dirlink, true) + testSymlinkSameFile(t, dir, dirlink) + testSymlinkSameFileOpen(t, dirlink) + + linklink := filepath.Join(tmpdir, "linklink") + if err := os.Symlink(dirlink, linklink); err != nil { + t.Fatal(err) + } + testSymlinkStats(t, linklink, true) + testSymlinkSameFile(t, dir, linklink) + testSymlinkSameFileOpen(t, linklink) +} + +func TestFileAndSymlinkStats(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + tmpdir := t.TempDir() + file := filepath.Join(tmpdir, "file") + if err := os.WriteFile(file, []byte(""), 0644); err != nil { + t.Fatal(err) + } + testFileStats(t, file) + + filelink := filepath.Join(tmpdir, "link") + if err := os.Symlink(file, filelink); err != nil { + t.Fatal(err) + } + testSymlinkStats(t, filelink, false) + testSymlinkSameFile(t, file, filelink) + testSymlinkSameFileOpen(t, filelink) + + linklink := filepath.Join(tmpdir, "linklink") + if err := os.Symlink(filelink, linklink); err != nil { + t.Fatal(err) + } + testSymlinkStats(t, linklink, false) + testSymlinkSameFile(t, file, linklink) + testSymlinkSameFileOpen(t, linklink) +} + +// see issue 27225 for details +func TestSymlinkWithTrailingSlash(t *testing.T) { + testenv.MustHaveSymlink(t) + t.Parallel() + + tmpdir := t.TempDir() + dir := filepath.Join(tmpdir, "dir") + if err := os.Mkdir(dir, 0777); err != nil { + t.Fatal(err) + } + dirlink := filepath.Join(tmpdir, "link") + if err := os.Symlink(dir, dirlink); err != nil { + t.Fatal(err) + } + dirlinkWithSlash := dirlink + string(os.PathSeparator) + + testDirStats(t, dirlinkWithSlash) + + fi1, err := os.Stat(dir) + if err != nil { + t.Error(err) + return + } + fi2, err := os.Stat(dirlinkWithSlash) + if err != nil { + t.Error(err) + return + } + if !os.SameFile(fi1, fi2) { + t.Errorf("os.Stat(%q) and os.Stat(%q) are not the same file", dir, dirlinkWithSlash) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_unix.go b/platform/dbops/binaries/go/go/src/os/stat_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..431df33faef97d30f8d441817e1f1f00348535aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_unix.go @@ -0,0 +1,52 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package os + +import ( + "syscall" +) + +// Stat returns the FileInfo structure describing file. +// If there is an error, it will be of type *PathError. +func (f *File) Stat() (FileInfo, error) { + if f == nil { + return nil, ErrInvalid + } + var fs fileStat + err := f.pfd.Fstat(&fs.sys) + if err != nil { + return nil, &PathError{Op: "stat", Path: f.name, Err: err} + } + fillFileStatFromSys(&fs, f.name) + return &fs, nil +} + +// statNolog stats a file with no test logging. +func statNolog(name string) (FileInfo, error) { + var fs fileStat + err := ignoringEINTR(func() error { + return syscall.Stat(name, &fs.sys) + }) + if err != nil { + return nil, &PathError{Op: "stat", Path: name, Err: err} + } + fillFileStatFromSys(&fs, name) + return &fs, nil +} + +// lstatNolog lstats a file with no test logging. +func lstatNolog(name string) (FileInfo, error) { + var fs fileStat + err := ignoringEINTR(func() error { + return syscall.Lstat(name, &fs.sys) + }) + if err != nil { + return nil, &PathError{Op: "lstat", Path: name, Err: err} + } + fillFileStatFromSys(&fs, name) + return &fs, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_wasip1.go b/platform/dbops/binaries/go/go/src/os/stat_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..a4f0a20430b5ff5d7f8ca5c6b94e308bb5a5a722 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_wasip1.go @@ -0,0 +1,40 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = int64(fs.sys.Size) + fs.mode = FileMode(fs.sys.Mode) + fs.modTime = time.Unix(0, int64(fs.sys.Mtime)) + + switch fs.sys.Filetype { + case syscall.FILETYPE_BLOCK_DEVICE: + fs.mode |= ModeDevice + case syscall.FILETYPE_CHARACTER_DEVICE: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.FILETYPE_DIRECTORY: + fs.mode |= ModeDir + case syscall.FILETYPE_SOCKET_DGRAM: + fs.mode |= ModeSocket + case syscall.FILETYPE_SOCKET_STREAM: + fs.mode |= ModeSocket + case syscall.FILETYPE_SYMBOLIC_LINK: + fs.mode |= ModeSymlink + } +} + +// For testing. +func atime(fi FileInfo) time.Time { + st := fi.Sys().(*syscall.Stat_t) + return time.Unix(0, int64(st.Atime)) +} diff --git a/platform/dbops/binaries/go/go/src/os/stat_windows.go b/platform/dbops/binaries/go/go/src/os/stat_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..668255f74adf3f0b82b8f846fbd27fedb01d7b51 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/stat_windows.go @@ -0,0 +1,136 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/syscall/windows" + "syscall" + "unsafe" +) + +// Stat returns the FileInfo structure describing file. +// If there is an error, it will be of type *PathError. +func (file *File) Stat() (FileInfo, error) { + if file == nil { + return nil, ErrInvalid + } + return statHandle(file.name, file.pfd.Sysfd) +} + +// stat implements both Stat and Lstat of a file. +func stat(funcname, name string, followSurrogates bool) (FileInfo, error) { + if len(name) == 0 { + return nil, &PathError{Op: funcname, Path: name, Err: syscall.Errno(syscall.ERROR_PATH_NOT_FOUND)} + } + namep, err := syscall.UTF16PtrFromString(fixLongPath(name)) + if err != nil { + return nil, &PathError{Op: funcname, Path: name, Err: err} + } + + // Try GetFileAttributesEx first, because it is faster than CreateFile. + // See https://golang.org/issues/19922#issuecomment-300031421 for details. + var fa syscall.Win32FileAttributeData + err = syscall.GetFileAttributesEx(namep, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa))) + + // GetFileAttributesEx fails with ERROR_SHARING_VIOLATION error for + // files like c:\pagefile.sys. Use FindFirstFile for such files. + if err == windows.ERROR_SHARING_VIOLATION { + var fd syscall.Win32finddata + sh, err := syscall.FindFirstFile(namep, &fd) + if err != nil { + return nil, &PathError{Op: "FindFirstFile", Path: name, Err: err} + } + syscall.FindClose(sh) + if fd.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + // Not a surrogate for another named entity. FindFirstFile is good enough. + fs := newFileStatFromWin32finddata(&fd) + if err := fs.saveInfoFromPath(name); err != nil { + return nil, err + } + return fs, nil + } + } + + if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + // Not a surrogate for another named entity, because it isn't any kind of reparse point. + // The information we got from GetFileAttributesEx is good enough for now. + fs := &fileStat{ + FileAttributes: fa.FileAttributes, + CreationTime: fa.CreationTime, + LastAccessTime: fa.LastAccessTime, + LastWriteTime: fa.LastWriteTime, + FileSizeHigh: fa.FileSizeHigh, + FileSizeLow: fa.FileSizeLow, + } + if err := fs.saveInfoFromPath(name); err != nil { + return nil, err + } + return fs, nil + } + + // Use CreateFile to determine whether the file is a name surrogate and, if so, + // save information about the link target. + // Set FILE_FLAG_BACKUP_SEMANTICS so that CreateFile will create the handle + // even if name refers to a directory. + h, err := syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + // Since CreateFile failed, we can't determine whether name refers to a + // name surrogate, or some other kind of reparse point. Since we can't return a + // FileInfo with a known-accurate Mode, we must return an error. + return nil, &PathError{Op: "CreateFile", Path: name, Err: err} + } + + fi, err := statHandle(name, h) + syscall.CloseHandle(h) + if err == nil && followSurrogates && fi.(*fileStat).isReparseTagNameSurrogate() { + // To obtain information about the link target, we reopen the file without + // FILE_FLAG_OPEN_REPARSE_POINT and examine the resulting handle. + // (See https://devblogs.microsoft.com/oldnewthing/20100212-00/?p=14963.) + h, err = syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + // name refers to a symlink, but we couldn't resolve the symlink target. + return nil, &PathError{Op: "CreateFile", Path: name, Err: err} + } + defer syscall.CloseHandle(h) + return statHandle(name, h) + } + return fi, err +} + +func statHandle(name string, h syscall.Handle) (FileInfo, error) { + ft, err := syscall.GetFileType(h) + if err != nil { + return nil, &PathError{Op: "GetFileType", Path: name, Err: err} + } + switch ft { + case syscall.FILE_TYPE_PIPE, syscall.FILE_TYPE_CHAR: + return &fileStat{name: basename(name), filetype: ft}, nil + } + fs, err := newFileStatFromGetFileInformationByHandle(name, h) + if err != nil { + return nil, err + } + fs.filetype = ft + return fs, err +} + +// statNolog implements Stat for Windows. +func statNolog(name string) (FileInfo, error) { + return stat("Stat", name, true) +} + +// lstatNolog implements Lstat for Windows. +func lstatNolog(name string) (FileInfo, error) { + followSurrogates := false + if name != "" && IsPathSeparator(name[len(name)-1]) { + // We try to implement POSIX semantics for Lstat path resolution + // (per https://pubs.opengroup.org/onlinepubs/9699919799.2013edition/basedefs/V1_chap04.html#tag_04_12): + // symlinks before the last separator in the path must be resolved. Since + // the last separator in this case follows the last path element, we should + // follow symlinks in the last path element. + followSurrogates = true + } + return stat("Lstat", name, followSurrogates) +} diff --git a/platform/dbops/binaries/go/go/src/os/sticky_bsd.go b/platform/dbops/binaries/go/go/src/os/sticky_bsd.go new file mode 100644 index 0000000000000000000000000000000000000000..a6d933950584c87cc7d45c90075c077630005c0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sticky_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || netbsd || openbsd || solaris || wasip1 + +package os + +// According to sticky(8), neither open(2) nor mkdir(2) will create +// a file with the sticky bit set. +const supportsCreateWithStickyBit = false diff --git a/platform/dbops/binaries/go/go/src/os/sticky_notbsd.go b/platform/dbops/binaries/go/go/src/os/sticky_notbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..1d289b0fe317d99f1252fc979153d035f9b163d9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sticky_notbsd.go @@ -0,0 +1,9 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !dragonfly && !freebsd && !js && !netbsd && !openbsd && !solaris && !wasip1 + +package os + +const supportsCreateWithStickyBit = true diff --git a/platform/dbops/binaries/go/go/src/os/sys.go b/platform/dbops/binaries/go/go/src/os/sys.go new file mode 100644 index 0000000000000000000000000000000000000000..28b0f6bab028ecf3d86b6a024c87084be8415ba1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys.go @@ -0,0 +1,10 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +// Hostname returns the host name reported by the kernel. +func Hostname() (name string, err error) { + return hostname() +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_aix.go b/platform/dbops/binaries/go/go/src/os/sys_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..53a40f26772fcc8d6242ffb81cc4151e4639b342 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_aix.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "syscall" + +// gethostname syscall cannot be used because it also returns the domain. +// Therefore, hostname is retrieve with uname syscall and the Nodename field. + +func hostname() (name string, err error) { + var u syscall.Utsname + if errno := syscall.Uname(&u); errno != nil { + return "", NewSyscallError("uname", errno) + } + b := make([]byte, len(u.Nodename)) + i := 0 + for ; i < len(u.Nodename); i++ { + if u.Nodename[i] == 0 { + break + } + b[i] = byte(u.Nodename[i]) + } + return string(b[:i]), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_bsd.go b/platform/dbops/binaries/go/go/src/os/sys_bsd.go new file mode 100644 index 0000000000000000000000000000000000000000..63120fb9b4ddda9bf3912163bde5bf93697941b2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || (js && wasm) || netbsd || openbsd || wasip1 + +package os + +import "syscall" + +func hostname() (name string, err error) { + name, err = syscall.Sysctl("kern.hostname") + if err != nil { + return "", NewSyscallError("sysctl kern.hostname", err) + } + return name, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_js.go b/platform/dbops/binaries/go/go/src/os/sys_js.go new file mode 100644 index 0000000000000000000000000000000000000000..4fd0e2d7c7dd128fc2a07efa39b2c9651a52afe2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_js.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm + +package os + +// supportsCloseOnExec reports whether the platform supports the +// O_CLOEXEC flag. +const supportsCloseOnExec = false diff --git a/platform/dbops/binaries/go/go/src/os/sys_linux.go b/platform/dbops/binaries/go/go/src/os/sys_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..36a8a244555aa71e41978c2778190735c4bfca84 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_linux.go @@ -0,0 +1,53 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "runtime" + "syscall" +) + +func hostname() (name string, err error) { + // Try uname first, as it's only one system call and reading + // from /proc is not allowed on Android. + var un syscall.Utsname + err = syscall.Uname(&un) + + var buf [512]byte // Enough for a DNS name. + for i, b := range un.Nodename[:] { + buf[i] = uint8(b) + if b == 0 { + name = string(buf[:i]) + break + } + } + // If we got a name and it's not potentially truncated + // (Nodename is 65 bytes), return it. + if err == nil && len(name) > 0 && len(name) < 64 { + return name, nil + } + if runtime.GOOS == "android" { + if name != "" { + return name, nil + } + return "localhost", nil + } + + f, err := Open("/proc/sys/kernel/hostname") + if err != nil { + return "", err + } + defer f.Close() + + n, err := f.Read(buf[:]) + if err != nil { + return "", err + } + + if n > 0 && buf[n-1] == '\n' { + n-- + } + return string(buf[:n]), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_plan9.go b/platform/dbops/binaries/go/go/src/os/sys_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..40374eb242a39c2c242fc794370ca6a4ee291482 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_plan9.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +func hostname() (name string, err error) { + f, err := Open("#c/sysname") + if err != nil { + return "", err + } + defer f.Close() + + var buf [128]byte + n, err := f.Read(buf[:len(buf)-1]) + + if err != nil { + return "", err + } + if n > 0 { + buf[n] = 0 + } + return string(buf[0:n]), nil +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_solaris.go b/platform/dbops/binaries/go/go/src/os/sys_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..917e8f2b0d6660b08e0a69c0ca0c9c41fc442086 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_solaris.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "syscall" + +func hostname() (name string, err error) { + return syscall.Gethostname() +} diff --git a/platform/dbops/binaries/go/go/src/os/sys_unix.go b/platform/dbops/binaries/go/go/src/os/sys_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..79005c2cbd204128c94d3bbae0c3ac2f8e935426 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_unix.go @@ -0,0 +1,14 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package os + +// supportsCloseOnExec reports whether the platform supports the +// O_CLOEXEC flag. +// On Darwin, the O_CLOEXEC flag was introduced in OS X 10.7 (Darwin 11.0.0). +// See https://support.apple.com/kb/HT1633. +// On FreeBSD, the O_CLOEXEC flag was introduced in version 8.3. +const supportsCloseOnExec = true diff --git a/platform/dbops/binaries/go/go/src/os/sys_wasip1.go b/platform/dbops/binaries/go/go/src/os/sys_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..5a29aa53cb8f7bd8bf8f80b829907260b3354114 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_wasip1.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package os + +// supportsCloseOnExec reports whether the platform supports the +// O_CLOEXEC flag. +const supportsCloseOnExec = false diff --git a/platform/dbops/binaries/go/go/src/os/sys_windows.go b/platform/dbops/binaries/go/go/src/os/sys_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..72ad90b924ad8a714670572444d46e912df2c044 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/sys_windows.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/syscall/windows" + "syscall" +) + +func hostname() (name string, err error) { + // Use PhysicalDnsHostname to uniquely identify host in a cluster + const format = windows.ComputerNamePhysicalDnsHostname + + n := uint32(64) + for { + b := make([]uint16, n) + err := windows.GetComputerNameEx(format, &b[0], &n) + if err == nil { + return syscall.UTF16ToString(b[:n]), nil + } + if err != syscall.ERROR_MORE_DATA { + return "", NewSyscallError("ComputerNameEx", err) + } + + // If we received an ERROR_MORE_DATA, but n doesn't get larger, + // something has gone wrong and we may be in an infinite loop + if n <= uint32(len(b)) { + return "", NewSyscallError("ComputerNameEx", err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/os/tempfile.go b/platform/dbops/binaries/go/go/src/os/tempfile.go new file mode 100644 index 0000000000000000000000000000000000000000..66c65e6c783c745a98e1fbca3b318fa642bfaa3f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/tempfile.go @@ -0,0 +1,121 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "errors" + "internal/bytealg" + "internal/itoa" + _ "unsafe" // for go:linkname +) + +// random number source provided by runtime. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +//go:linkname runtime_rand runtime.rand +func runtime_rand() uint64 + +func nextRandom() string { + return itoa.Uitoa(uint(uint32(runtime_rand()))) +} + +// CreateTemp creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting file. +// The filename is generated by taking pattern and adding a random string to the end. +// If pattern includes a "*", the random string replaces the last "*". +// If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by TempDir. +// Multiple programs or goroutines calling CreateTemp simultaneously will not choose the same file. +// The caller can use the file's Name method to find the pathname of the file. +// It is the caller's responsibility to remove the file when it is no longer needed. +func CreateTemp(dir, pattern string) (*File, error) { + if dir == "" { + dir = TempDir() + } + + prefix, suffix, err := prefixAndSuffix(pattern) + if err != nil { + return nil, &PathError{Op: "createtemp", Path: pattern, Err: err} + } + prefix = joinPath(dir, prefix) + + try := 0 + for { + name := prefix + nextRandom() + suffix + f, err := OpenFile(name, O_RDWR|O_CREATE|O_EXCL, 0600) + if IsExist(err) { + if try++; try < 10000 { + continue + } + return nil, &PathError{Op: "createtemp", Path: prefix + "*" + suffix, Err: ErrExist} + } + return f, err + } +} + +var errPatternHasSeparator = errors.New("pattern contains path separator") + +// prefixAndSuffix splits pattern by the last wildcard "*", if applicable, +// returning prefix as the part before "*" and suffix as the part after "*". +func prefixAndSuffix(pattern string) (prefix, suffix string, err error) { + for i := 0; i < len(pattern); i++ { + if IsPathSeparator(pattern[i]) { + return "", "", errPatternHasSeparator + } + } + if pos := bytealg.LastIndexByteString(pattern, '*'); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + return prefix, suffix, nil +} + +// MkdirTemp creates a new temporary directory in the directory dir +// and returns the pathname of the new directory. +// The new directory's name is generated by adding a random string to the end of pattern. +// If pattern includes a "*", the random string replaces the last "*" instead. +// If dir is the empty string, MkdirTemp uses the default directory for temporary files, as returned by TempDir. +// Multiple programs or goroutines calling MkdirTemp simultaneously will not choose the same directory. +// It is the caller's responsibility to remove the directory when it is no longer needed. +func MkdirTemp(dir, pattern string) (string, error) { + if dir == "" { + dir = TempDir() + } + + prefix, suffix, err := prefixAndSuffix(pattern) + if err != nil { + return "", &PathError{Op: "mkdirtemp", Path: pattern, Err: err} + } + prefix = joinPath(dir, prefix) + + try := 0 + for { + name := prefix + nextRandom() + suffix + err := Mkdir(name, 0700) + if err == nil { + return name, nil + } + if IsExist(err) { + if try++; try < 10000 { + continue + } + return "", &PathError{Op: "mkdirtemp", Path: dir + string(PathSeparator) + prefix + "*" + suffix, Err: ErrExist} + } + if IsNotExist(err) { + if _, err := Stat(dir); IsNotExist(err) { + return "", err + } + } + return "", err + } +} + +func joinPath(dir, name string) string { + if len(dir) > 0 && IsPathSeparator(dir[len(dir)-1]) { + return dir + name + } + return dir + string(PathSeparator) + name +} diff --git a/platform/dbops/binaries/go/go/src/os/tempfile_test.go b/platform/dbops/binaries/go/go/src/os/tempfile_test.go new file mode 100644 index 0000000000000000000000000000000000000000..82f0aabda07fdc6acc9dc40c8d659a6423691368 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/tempfile_test.go @@ -0,0 +1,205 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "errors" + "io/fs" + . "os" + "path/filepath" + "regexp" + "strings" + "testing" +) + +func TestCreateTemp(t *testing.T) { + t.Parallel() + + dir, err := MkdirTemp("", "TestCreateTempBadDir") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(dir) + + nonexistentDir := filepath.Join(dir, "_not_exists_") + f, err := CreateTemp(nonexistentDir, "foo") + if f != nil || err == nil { + t.Errorf("CreateTemp(%q, `foo`) = %v, %v", nonexistentDir, f, err) + } +} + +func TestCreateTempPattern(t *testing.T) { + t.Parallel() + + tests := []struct{ pattern, prefix, suffix string }{ + {"tempfile_test", "tempfile_test", ""}, + {"tempfile_test*", "tempfile_test", ""}, + {"tempfile_test*xyz", "tempfile_test", "xyz"}, + } + for _, test := range tests { + f, err := CreateTemp("", test.pattern) + if err != nil { + t.Errorf("CreateTemp(..., %q) error: %v", test.pattern, err) + continue + } + defer Remove(f.Name()) + base := filepath.Base(f.Name()) + f.Close() + if !(strings.HasPrefix(base, test.prefix) && strings.HasSuffix(base, test.suffix)) { + t.Errorf("CreateTemp pattern %q created bad name %q; want prefix %q & suffix %q", + test.pattern, base, test.prefix, test.suffix) + } + } +} + +func TestCreateTempBadPattern(t *testing.T) { + t.Parallel() + + tmpDir, err := MkdirTemp("", t.Name()) + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tmpDir) + + const sep = string(PathSeparator) + tests := []struct { + pattern string + wantErr bool + }{ + {"ioutil*test", false}, + {"tempfile_test*foo", false}, + {"tempfile_test" + sep + "foo", true}, + {"tempfile_test*" + sep + "foo", true}, + {"tempfile_test" + sep + "*foo", true}, + {sep + "tempfile_test" + sep + "*foo", true}, + {"tempfile_test*foo" + sep, true}, + } + for _, tt := range tests { + t.Run(tt.pattern, func(t *testing.T) { + tmpfile, err := CreateTemp(tmpDir, tt.pattern) + if tmpfile != nil { + defer tmpfile.Close() + } + if tt.wantErr { + if err == nil { + t.Errorf("CreateTemp(..., %#q) succeeded, expected error", tt.pattern) + } + if !errors.Is(err, ErrPatternHasSeparator) { + t.Errorf("CreateTemp(..., %#q): %v, expected ErrPatternHasSeparator", tt.pattern, err) + } + } else if err != nil { + t.Errorf("CreateTemp(..., %#q): %v", tt.pattern, err) + } + }) + } +} + +func TestMkdirTemp(t *testing.T) { + t.Parallel() + + name, err := MkdirTemp("/_not_exists_", "foo") + if name != "" || err == nil { + t.Errorf("MkdirTemp(`/_not_exists_`, `foo`) = %v, %v", name, err) + } + + tests := []struct { + pattern string + wantPrefix, wantSuffix string + }{ + {"tempfile_test", "tempfile_test", ""}, + {"tempfile_test*", "tempfile_test", ""}, + {"tempfile_test*xyz", "tempfile_test", "xyz"}, + } + + dir := filepath.Clean(TempDir()) + + runTestMkdirTemp := func(t *testing.T, pattern, wantRePat string) { + name, err := MkdirTemp(dir, pattern) + if name == "" || err != nil { + t.Fatalf("MkdirTemp(dir, `tempfile_test`) = %v, %v", name, err) + } + defer Remove(name) + + re := regexp.MustCompile(wantRePat) + if !re.MatchString(name) { + t.Errorf("MkdirTemp(%q, %q) created bad name\n\t%q\ndid not match pattern\n\t%q", dir, pattern, name, wantRePat) + } + } + + for _, tt := range tests { + t.Run(tt.pattern, func(t *testing.T) { + wantRePat := "^" + regexp.QuoteMeta(filepath.Join(dir, tt.wantPrefix)) + "[0-9]+" + regexp.QuoteMeta(tt.wantSuffix) + "$" + runTestMkdirTemp(t, tt.pattern, wantRePat) + }) + } + + // Separately testing "*xyz" (which has no prefix). That is when constructing the + // pattern to assert on, as in the previous loop, using filepath.Join for an empty + // prefix filepath.Join(dir, ""), produces the pattern: + // ^[0-9]+xyz$ + // yet we just want to match + // "^/[0-9]+xyz" + t.Run("*xyz", func(t *testing.T) { + wantRePat := "^" + regexp.QuoteMeta(filepath.Join(dir)) + regexp.QuoteMeta(string(filepath.Separator)) + "[0-9]+xyz$" + runTestMkdirTemp(t, "*xyz", wantRePat) + }) +} + +// test that we return a nice error message if the dir argument to TempDir doesn't +// exist (or that it's empty and TempDir doesn't exist) +func TestMkdirTempBadDir(t *testing.T) { + t.Parallel() + + dir, err := MkdirTemp("", "MkdirTempBadDir") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(dir) + + badDir := filepath.Join(dir, "not-exist") + _, err = MkdirTemp(badDir, "foo") + if pe, ok := err.(*fs.PathError); !ok || !IsNotExist(err) || pe.Path != badDir { + t.Errorf("TempDir error = %#v; want PathError for path %q satisfying IsNotExist", err, badDir) + } +} + +func TestMkdirTempBadPattern(t *testing.T) { + t.Parallel() + + tmpDir, err := MkdirTemp("", t.Name()) + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tmpDir) + + const sep = string(PathSeparator) + tests := []struct { + pattern string + wantErr bool + }{ + {"ioutil*test", false}, + {"tempfile_test*foo", false}, + {"tempfile_test" + sep + "foo", true}, + {"tempfile_test*" + sep + "foo", true}, + {"tempfile_test" + sep + "*foo", true}, + {sep + "tempfile_test" + sep + "*foo", true}, + {"tempfile_test*foo" + sep, true}, + } + for _, tt := range tests { + t.Run(tt.pattern, func(t *testing.T) { + _, err := MkdirTemp(tmpDir, tt.pattern) + if tt.wantErr { + if err == nil { + t.Errorf("MkdirTemp(..., %#q) succeeded, expected error", tt.pattern) + } + if !errors.Is(err, ErrPatternHasSeparator) { + t.Errorf("MkdirTemp(..., %#q): %v, expected ErrPatternHasSeparator", tt.pattern, err) + } + } else if err != nil { + t.Errorf("MkdirTemp(..., %#q): %v", tt.pattern, err) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/os/timeout_test.go b/platform/dbops/binaries/go/go/src/os/timeout_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e0d2328ba148986397c0a0334a783e86f5f548f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/timeout_test.go @@ -0,0 +1,708 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js && !plan9 && !wasip1 && !windows + +package os_test + +import ( + "fmt" + "io" + "math/rand" + "os" + "os/signal" + "runtime" + "sync" + "syscall" + "testing" + "time" +) + +func TestNonpollableDeadline(t *testing.T) { + // On BSD systems regular files seem to be pollable, + // so just run this test on Linux. + if runtime.GOOS != "linux" { + t.Skipf("skipping on %s", runtime.GOOS) + } + t.Parallel() + + f, err := os.CreateTemp("", "ostest") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + defer f.Close() + deadline := time.Now().Add(10 * time.Second) + if err := f.SetDeadline(deadline); err != os.ErrNoDeadline { + t.Errorf("SetDeadline on file returned %v, wanted %v", err, os.ErrNoDeadline) + } + if err := f.SetReadDeadline(deadline); err != os.ErrNoDeadline { + t.Errorf("SetReadDeadline on file returned %v, wanted %v", err, os.ErrNoDeadline) + } + if err := f.SetWriteDeadline(deadline); err != os.ErrNoDeadline { + t.Errorf("SetWriteDeadline on file returned %v, wanted %v", err, os.ErrNoDeadline) + } +} + +// noDeadline is a zero time.Time value, which cancels a deadline. +var noDeadline time.Time + +var readTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that read deadlines work, even if there's data ready + // to be read. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestReadTimeout(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + if _, err := w.Write([]byte("READ TIMEOUT TEST")); err != nil { + t.Fatal(err) + } + + for i, tt := range readTimeoutTests { + if err := r.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var b [1]byte + for j, xerr := range tt.xerrs { + for { + n, err := r.Read(b[:]) + if xerr != nil { + if !isDeadlineExceeded(err) { + t.Fatalf("#%d/%d: %v", i, j, err) + } + } + if err == nil { + time.Sleep(tt.timeout / 3) + continue + } + if n != 0 { + t.Fatalf("#%d/%d: read %d; want 0", i, j, n) + } + break + } + } + } +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestReadTimeoutMustNotReturn(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + max := time.NewTimer(100 * time.Millisecond) + defer max.Stop() + ch := make(chan error) + go func() { + if err := r.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := r.SetWriteDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := r.SetReadDeadline(noDeadline); err != nil { + t.Error(err) + } + var b [1]byte + _, err := r.Read(b[:]) + ch <- err + }() + + select { + case err := <-ch: + t.Fatalf("expected Read to not return, but it returned with %v", err) + case <-max.C: + w.Close() + err := <-ch // wait for tester goroutine to stop + if os.IsTimeout(err) { + t.Fatal(err) + } + } +} + +var writeTimeoutTests = []struct { + timeout time.Duration + xerrs [2]error // expected errors in transition +}{ + // Tests that write deadlines work, even if there's buffer + // space available to write. + {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, + + {10 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestWriteTimeout(t *testing.T) { + t.Parallel() + + for i, tt := range writeTimeoutTests { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + if err := w.SetWriteDeadline(time.Now().Add(tt.timeout)); err != nil { + t.Fatalf("%v", err) + } + for j, xerr := range tt.xerrs { + for { + n, err := w.Write([]byte("WRITE TIMEOUT TEST")) + if xerr != nil { + if !isDeadlineExceeded(err) { + t.Fatalf("%d: %v", j, err) + } + } + if err == nil { + time.Sleep(tt.timeout / 3) + continue + } + if n != 0 { + t.Fatalf("%d: wrote %d; want 0", j, n) + } + break + } + } + }) + } +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestWriteTimeoutMustNotReturn(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + max := time.NewTimer(100 * time.Millisecond) + defer max.Stop() + ch := make(chan error) + go func() { + if err := w.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := w.SetReadDeadline(time.Now().Add(-5 * time.Second)); err != nil { + t.Error(err) + } + if err := w.SetWriteDeadline(noDeadline); err != nil { + t.Error(err) + } + var b [1]byte + for { + if _, err := w.Write(b[:]); err != nil { + ch <- err + break + } + } + }() + + select { + case err := <-ch: + t.Fatalf("expected Write to not return, but it returned with %v", err) + case <-max.C: + r.Close() + err := <-ch // wait for tester goroutine to stop + if os.IsTimeout(err) { + t.Fatal(err) + } + } +} + +const ( + // minDynamicTimeout is the minimum timeout to attempt for + // tests that automatically increase timeouts until success. + // + // Lower values may allow tests to succeed more quickly if the value is close + // to the true minimum, but may require more iterations (and waste more time + // and CPU power on failed attempts) if the timeout is too low. + minDynamicTimeout = 1 * time.Millisecond + + // maxDynamicTimeout is the maximum timeout to attempt for + // tests that automatically increase timeouts until success. + // + // This should be a strict upper bound on the latency required to hit a + // timeout accurately, even on a slow or heavily-loaded machine. If a test + // would increase the timeout beyond this value, the test fails. + maxDynamicTimeout = 4 * time.Second +) + +// timeoutUpperBound returns the maximum time that we expect a timeout of +// duration d to take to return the caller. +func timeoutUpperBound(d time.Duration) time.Duration { + switch runtime.GOOS { + case "openbsd", "netbsd": + // NetBSD and OpenBSD seem to be unable to reliably hit deadlines even when + // the absolute durations are long. + // In https://build.golang.org/log/c34f8685d020b98377dd4988cd38f0c5bd72267e, + // we observed that an openbsd-amd64-68 builder took 4.090948779s for a + // 2.983020682s timeout (37.1% overhead). + // (See https://go.dev/issue/50189 for further detail.) + // Give them lots of slop to compensate. + return d * 3 / 2 + } + // Other platforms seem to hit their deadlines more reliably, + // at least when they are long enough to cover scheduling jitter. + return d * 11 / 10 +} + +// nextTimeout returns the next timeout to try after an operation took the given +// actual duration with a timeout shorter than that duration. +func nextTimeout(actual time.Duration) (next time.Duration, ok bool) { + if actual >= maxDynamicTimeout { + return maxDynamicTimeout, false + } + // Since the previous attempt took actual, we can't expect to beat that + // duration by any significant margin. Try the next attempt with an arbitrary + // factor above that, so that our growth curve is at least exponential. + next = actual * 5 / 4 + if next > maxDynamicTimeout { + return maxDynamicTimeout, true + } + return next, true +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestReadTimeoutFluctuation(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + d := minDynamicTimeout + b := make([]byte, 256) + for { + t.Logf("SetReadDeadline(+%v)", d) + t0 := time.Now() + deadline := t0.Add(d) + if err = r.SetReadDeadline(deadline); err != nil { + t.Fatalf("SetReadDeadline(%v): %v", deadline, err) + } + var n int + n, err = r.Read(b) + t1 := time.Now() + + if n != 0 || err == nil || !isDeadlineExceeded(err) { + t.Errorf("Read did not return (0, timeout): (%d, %v)", n, err) + } + + actual := t1.Sub(t0) + if t1.Before(deadline) { + t.Errorf("Read took %s; expected at least %s", actual, d) + } + if t.Failed() { + return + } + if want := timeoutUpperBound(d); actual > want { + next, ok := nextTimeout(actual) + if !ok { + t.Fatalf("Read took %s; expected at most %v", actual, want) + } + // Maybe this machine is too slow to reliably schedule goroutines within + // the requested duration. Increase the timeout and try again. + t.Logf("Read took %s (expected %s); trying with longer timeout", actual, d) + d = next + continue + } + + break + } +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestWriteTimeoutFluctuation(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + d := minDynamicTimeout + for { + t.Logf("SetWriteDeadline(+%v)", d) + t0 := time.Now() + deadline := t0.Add(d) + if err := w.SetWriteDeadline(deadline); err != nil { + t.Fatalf("SetWriteDeadline(%v): %v", deadline, err) + } + var n int64 + var err error + for { + var dn int + dn, err = w.Write([]byte("TIMEOUT TRANSMITTER")) + n += int64(dn) + if err != nil { + break + } + } + t1 := time.Now() + // Inv: err != nil + if !isDeadlineExceeded(err) { + t.Fatalf("Write did not return (any, timeout): (%d, %v)", n, err) + } + + actual := t1.Sub(t0) + if t1.Before(deadline) { + t.Errorf("Write took %s; expected at least %s", actual, d) + } + if t.Failed() { + return + } + if want := timeoutUpperBound(d); actual > want { + if n > 0 { + // SetWriteDeadline specifies a time “after which I/O operations fail + // instead of blocking”. However, the kernel's send buffer is not yet + // full, we may be able to write some arbitrary (but finite) number of + // bytes to it without blocking. + t.Logf("Wrote %d bytes into send buffer; retrying until buffer is full", n) + if d <= maxDynamicTimeout/2 { + // We don't know how long the actual write loop would have taken if + // the buffer were full, so just guess and double the duration so that + // the next attempt can make twice as much progress toward filling it. + d *= 2 + } + } else if next, ok := nextTimeout(actual); !ok { + t.Fatalf("Write took %s; expected at most %s", actual, want) + } else { + // Maybe this machine is too slow to reliably schedule goroutines within + // the requested duration. Increase the timeout and try again. + t.Logf("Write took %s (expected %s); trying with longer timeout", actual, d) + d = next + } + continue + } + + break + } +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestVariousDeadlines(t *testing.T) { + t.Parallel() + testVariousDeadlines(t) +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestVariousDeadlines1Proc(t *testing.T) { + // Cannot use t.Parallel - modifies global GOMAXPROCS. + if testing.Short() { + t.Skip("skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + testVariousDeadlines(t) +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestVariousDeadlines4Proc(t *testing.T) { + // Cannot use t.Parallel - modifies global GOMAXPROCS. + if testing.Short() { + t.Skip("skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + testVariousDeadlines(t) +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +func testVariousDeadlines(t *testing.T) { + type result struct { + n int64 + err error + d time.Duration + } + + handler := func(w *os.File, pasvch chan result) { + // The writer, with no timeouts of its own, + // sending bytes to clients as fast as it can. + t0 := time.Now() + n, err := io.Copy(w, neverEnding('a')) + dt := time.Since(t0) + pasvch <- result{n, err, dt} + } + + for _, timeout := range []time.Duration{ + 1 * time.Nanosecond, + 2 * time.Nanosecond, + 5 * time.Nanosecond, + 50 * time.Nanosecond, + 100 * time.Nanosecond, + 200 * time.Nanosecond, + 500 * time.Nanosecond, + 750 * time.Nanosecond, + 1 * time.Microsecond, + 5 * time.Microsecond, + 25 * time.Microsecond, + 250 * time.Microsecond, + 500 * time.Microsecond, + 1 * time.Millisecond, + 5 * time.Millisecond, + 100 * time.Millisecond, + 250 * time.Millisecond, + 500 * time.Millisecond, + 1 * time.Second, + } { + numRuns := 3 + if testing.Short() { + numRuns = 1 + if timeout > 500*time.Microsecond { + continue + } + } + for run := 0; run < numRuns; run++ { + t.Run(fmt.Sprintf("%v-%d", timeout, run+1), func(t *testing.T) { + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + pasvch := make(chan result) + go handler(w, pasvch) + + tooLong := 5 * time.Second + max := time.NewTimer(tooLong) + defer max.Stop() + actvch := make(chan result) + go func() { + t0 := time.Now() + if err := r.SetDeadline(t0.Add(timeout)); err != nil { + t.Error(err) + } + n, err := io.Copy(io.Discard, r) + dt := time.Since(t0) + r.Close() + actvch <- result{n, err, dt} + }() + + select { + case res := <-actvch: + if !isDeadlineExceeded(err) { + t.Logf("good client timeout after %v, reading %d bytes", res.d, res.n) + } else { + t.Fatalf("client Copy = %d, %v; want timeout", res.n, res.err) + } + case <-max.C: + t.Fatalf("timeout (%v) waiting for client to timeout (%v) reading", tooLong, timeout) + } + + select { + case res := <-pasvch: + t.Logf("writer in %v wrote %d: %v", res.d, res.n, res.err) + case <-max.C: + t.Fatalf("timeout waiting for writer to finish writing") + } + }) + } + } +} + +// There is a very similar copy of this in net/timeout_test.go. +func TestReadWriteDeadlineRace(t *testing.T) { + t.Parallel() + + N := 1000 + if testing.Short() { + N = 50 + } + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + tic := time.NewTicker(2 * time.Microsecond) + defer tic.Stop() + for i := 0; i < N; i++ { + if err := r.SetReadDeadline(time.Now().Add(2 * time.Microsecond)); err != nil { + break + } + if err := w.SetWriteDeadline(time.Now().Add(2 * time.Microsecond)); err != nil { + break + } + <-tic.C + } + }() + go func() { + defer wg.Done() + var b [1]byte + for i := 0; i < N; i++ { + _, err := r.Read(b[:]) + if err != nil && !isDeadlineExceeded(err) { + t.Error("Read returned non-timeout error", err) + } + } + }() + go func() { + defer wg.Done() + var b [1]byte + for i := 0; i < N; i++ { + _, err := w.Write(b[:]) + if err != nil && !isDeadlineExceeded(err) { + t.Error("Write returned non-timeout error", err) + } + } + }() + wg.Wait() // wait for tester goroutine to stop +} + +// TestRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancellation has occurred. +func TestRacyRead(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + var wg sync.WaitGroup + defer wg.Wait() + + go io.Copy(w, rand.New(rand.NewSource(0))) + + r.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := r.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + if !isDeadlineExceeded(err) { + t.Error(err) + } + r.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// TestRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancellation has occurred. +func TestRacyWrite(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + var wg sync.WaitGroup + defer wg.Wait() + + go io.Copy(io.Discard, r) + + w.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := w.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + if !isDeadlineExceeded(err) { + t.Error(err) + } + w.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// Closing a TTY while reading from it should not hang. Issue 23943. +func TestTTYClose(t *testing.T) { + // Ignore SIGTTIN in case we are running in the background. + signal.Ignore(syscall.SIGTTIN) + defer signal.Reset(syscall.SIGTTIN) + + f, err := os.Open("/dev/tty") + if err != nil { + t.Skipf("skipping because opening /dev/tty failed: %v", err) + } + + go func() { + var buf [1]byte + f.Read(buf[:]) + }() + + // Give the goroutine a chance to enter the read. + // It doesn't matter much if it occasionally fails to do so, + // we won't be testing what we want to test but the test will pass. + time.Sleep(time.Millisecond) + + c := make(chan bool) + go func() { + defer close(c) + f.Close() + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Error("timed out waiting for close") + } + + // On some systems the goroutines may now be hanging. + // There's not much we can do about that. +} diff --git a/platform/dbops/binaries/go/go/src/os/types.go b/platform/dbops/binaries/go/go/src/os/types.go new file mode 100644 index 0000000000000000000000000000000000000000..d8edd98b68d76b8849356068b27e9bd9a7d48764 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/types.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "io/fs" + "syscall" +) + +// Getpagesize returns the underlying system's memory page size. +func Getpagesize() int { return syscall.Getpagesize() } + +// File represents an open file descriptor. +type File struct { + *file // os specific +} + +// A FileInfo describes a file and is returned by Stat and Lstat. +type FileInfo = fs.FileInfo + +// A FileMode represents a file's mode and permission bits. +// The bits have the same definition on all systems, so that +// information about files can be moved from one system +// to another portably. Not all bits apply to all systems. +// The only required bit is ModeDir for directories. +type FileMode = fs.FileMode + +// The defined file mode bits are the most significant bits of the FileMode. +// The nine least-significant bits are the standard Unix rwxrwxrwx permissions. +// The values of these bits should be considered part of the public API and +// may be used in wire protocols or disk representations: they must not be +// changed, although new bits might be added. +const ( + // The single letters are the abbreviations + // used by the String method's formatting. + ModeDir = fs.ModeDir // d: is a directory + ModeAppend = fs.ModeAppend // a: append-only + ModeExclusive = fs.ModeExclusive // l: exclusive use + ModeTemporary = fs.ModeTemporary // T: temporary file; Plan 9 only + ModeSymlink = fs.ModeSymlink // L: symbolic link + ModeDevice = fs.ModeDevice // D: device file + ModeNamedPipe = fs.ModeNamedPipe // p: named pipe (FIFO) + ModeSocket = fs.ModeSocket // S: Unix domain socket + ModeSetuid = fs.ModeSetuid // u: setuid + ModeSetgid = fs.ModeSetgid // g: setgid + ModeCharDevice = fs.ModeCharDevice // c: Unix character device, when ModeDevice is set + ModeSticky = fs.ModeSticky // t: sticky + ModeIrregular = fs.ModeIrregular // ?: non-regular file; nothing else is known about this file + + // Mask for the type bits. For regular files, none will be set. + ModeType = fs.ModeType + + ModePerm = fs.ModePerm // Unix permission bits, 0o777 +) + +func (fs *fileStat) Name() string { return fs.name } +func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() } + +// SameFile reports whether fi1 and fi2 describe the same file. +// For example, on Unix this means that the device and inode fields +// of the two underlying structures are identical; on other systems +// the decision may be based on the path names. +// SameFile only applies to results returned by this package's Stat. +// It returns false in other cases. +func SameFile(fi1, fi2 FileInfo) bool { + fs1, ok1 := fi1.(*fileStat) + fs2, ok2 := fi2.(*fileStat) + if !ok1 || !ok2 { + return false + } + return sameFile(fs1, fs2) +} diff --git a/platform/dbops/binaries/go/go/src/os/types_plan9.go b/platform/dbops/binaries/go/go/src/os/types_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..adb40130855a82abab4c8392cc6da17c5348a928 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/types_plan9.go @@ -0,0 +1,30 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +// A fileStat is the implementation of FileInfo returned by Stat and Lstat. +type fileStat struct { + name string + size int64 + mode FileMode + modTime time.Time + sys any +} + +func (fs *fileStat) Size() int64 { return fs.size } +func (fs *fileStat) Mode() FileMode { return fs.mode } +func (fs *fileStat) ModTime() time.Time { return fs.modTime } +func (fs *fileStat) Sys() any { return fs.sys } + +func sameFile(fs1, fs2 *fileStat) bool { + a := fs1.sys.(*syscall.Dir) + b := fs2.sys.(*syscall.Dir) + return a.Qid.Path == b.Qid.Path && a.Type == b.Type && a.Dev == b.Dev +} diff --git a/platform/dbops/binaries/go/go/src/os/types_unix.go b/platform/dbops/binaries/go/go/src/os/types_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..1b90a5a14157a9b6d8f03b6eff98ea166bd59abf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/types_unix.go @@ -0,0 +1,30 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 + +package os + +import ( + "syscall" + "time" +) + +// A fileStat is the implementation of FileInfo returned by Stat and Lstat. +type fileStat struct { + name string + size int64 + mode FileMode + modTime time.Time + sys syscall.Stat_t +} + +func (fs *fileStat) Size() int64 { return fs.size } +func (fs *fileStat) Mode() FileMode { return fs.mode } +func (fs *fileStat) ModTime() time.Time { return fs.modTime } +func (fs *fileStat) Sys() any { return &fs.sys } + +func sameFile(fs1, fs2 *fileStat) bool { + return fs1.sys.Dev == fs2.sys.Dev && fs1.sys.Ino == fs2.sys.Ino +} diff --git a/platform/dbops/binaries/go/go/src/os/types_windows.go b/platform/dbops/binaries/go/go/src/os/types_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..6b9fef6c123f61e125c6200a60b0a59f9a498fc6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/types_windows.go @@ -0,0 +1,306 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/syscall/windows" + "sync" + "syscall" + "time" + "unsafe" +) + +// A fileStat is the implementation of FileInfo returned by Stat and Lstat. +type fileStat struct { + name string + + // from ByHandleFileInformation, Win32FileAttributeData, Win32finddata, and GetFileInformationByHandleEx + FileAttributes uint32 + CreationTime syscall.Filetime + LastAccessTime syscall.Filetime + LastWriteTime syscall.Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + + // from Win32finddata and GetFileInformationByHandleEx + ReparseTag uint32 + + // what syscall.GetFileType returns + filetype uint32 + + // used to implement SameFile + sync.Mutex + path string + vol uint32 + idxhi uint32 + idxlo uint32 + appendNameToPath bool +} + +// newFileStatFromGetFileInformationByHandle calls GetFileInformationByHandle +// to gather all required information about the file handle h. +func newFileStatFromGetFileInformationByHandle(path string, h syscall.Handle) (fs *fileStat, err error) { + var d syscall.ByHandleFileInformation + err = syscall.GetFileInformationByHandle(h, &d) + if err != nil { + return nil, &PathError{Op: "GetFileInformationByHandle", Path: path, Err: err} + } + + var ti windows.FILE_ATTRIBUTE_TAG_INFO + err = windows.GetFileInformationByHandleEx(h, windows.FileAttributeTagInfo, (*byte)(unsafe.Pointer(&ti)), uint32(unsafe.Sizeof(ti))) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == windows.ERROR_INVALID_PARAMETER { + // It appears calling GetFileInformationByHandleEx with + // FILE_ATTRIBUTE_TAG_INFO fails on FAT file system with + // ERROR_INVALID_PARAMETER. Clear ti.ReparseTag in that + // instance to indicate no symlinks are possible. + ti.ReparseTag = 0 + } else { + return nil, &PathError{Op: "GetFileInformationByHandleEx", Path: path, Err: err} + } + } + + return &fileStat{ + name: basename(path), + FileAttributes: d.FileAttributes, + CreationTime: d.CreationTime, + LastAccessTime: d.LastAccessTime, + LastWriteTime: d.LastWriteTime, + FileSizeHigh: d.FileSizeHigh, + FileSizeLow: d.FileSizeLow, + vol: d.VolumeSerialNumber, + idxhi: d.FileIndexHigh, + idxlo: d.FileIndexLow, + ReparseTag: ti.ReparseTag, + // fileStat.path is used by os.SameFile to decide if it needs + // to fetch vol, idxhi and idxlo. But these are already set, + // so set fileStat.path to "" to prevent os.SameFile doing it again. + }, nil +} + +// newFileStatFromFileIDBothDirInfo copies all required information +// from windows.FILE_ID_BOTH_DIR_INFO d into the newly created fileStat. +func newFileStatFromFileIDBothDirInfo(d *windows.FILE_ID_BOTH_DIR_INFO) *fileStat { + // The FILE_ID_BOTH_DIR_INFO MSDN documentations isn't completely correct. + // FileAttributes can contain any file attributes that is currently set on the file, + // not just the ones documented. + // EaSize contains the reparse tag if the file is a reparse point. + return &fileStat{ + FileAttributes: d.FileAttributes, + CreationTime: d.CreationTime, + LastAccessTime: d.LastAccessTime, + LastWriteTime: d.LastWriteTime, + FileSizeHigh: uint32(d.EndOfFile >> 32), + FileSizeLow: uint32(d.EndOfFile), + ReparseTag: d.EaSize, + idxhi: uint32(d.FileID >> 32), + idxlo: uint32(d.FileID), + } +} + +// newFileStatFromFileFullDirInfo copies all required information +// from windows.FILE_FULL_DIR_INFO d into the newly created fileStat. +func newFileStatFromFileFullDirInfo(d *windows.FILE_FULL_DIR_INFO) *fileStat { + return &fileStat{ + FileAttributes: d.FileAttributes, + CreationTime: d.CreationTime, + LastAccessTime: d.LastAccessTime, + LastWriteTime: d.LastWriteTime, + FileSizeHigh: uint32(d.EndOfFile >> 32), + FileSizeLow: uint32(d.EndOfFile), + ReparseTag: d.EaSize, + } +} + +// newFileStatFromWin32finddata copies all required information +// from syscall.Win32finddata d into the newly created fileStat. +func newFileStatFromWin32finddata(d *syscall.Win32finddata) *fileStat { + fs := &fileStat{ + FileAttributes: d.FileAttributes, + CreationTime: d.CreationTime, + LastAccessTime: d.LastAccessTime, + LastWriteTime: d.LastWriteTime, + FileSizeHigh: d.FileSizeHigh, + FileSizeLow: d.FileSizeLow, + } + if d.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // Per https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-win32_find_dataw: + // “If the dwFileAttributes member includes the FILE_ATTRIBUTE_REPARSE_POINT + // attribute, this member specifies the reparse point tag. Otherwise, this + // value is undefined and should not be used.” + fs.ReparseTag = d.Reserved0 + } + return fs +} + +// isReparseTagNameSurrogate determines whether a tag's associated +// reparse point is a surrogate for another named entity (for example, a mounted folder). +// +// See https://learn.microsoft.com/en-us/windows/win32/api/winnt/nf-winnt-isreparsetagnamesurrogate +// and https://learn.microsoft.com/en-us/windows/win32/fileio/reparse-point-tags. +func (fs *fileStat) isReparseTagNameSurrogate() bool { + // True for IO_REPARSE_TAG_SYMLINK and IO_REPARSE_TAG_MOUNT_POINT. + return fs.ReparseTag&0x20000000 != 0 +} + +func (fs *fileStat) isSymlink() bool { + // As of https://go.dev/cl/86556, we treat MOUNT_POINT reparse points as + // symlinks because otherwise certain directory junction tests in the + // path/filepath package would fail. + // + // However, + // https://learn.microsoft.com/en-us/windows/win32/fileio/hard-links-and-junctions + // seems to suggest that directory junctions should be treated like hard + // links, not symlinks. + // + // TODO(bcmills): Get more input from Microsoft on what the behavior ought to + // be for MOUNT_POINT reparse points. + + return fs.ReparseTag == syscall.IO_REPARSE_TAG_SYMLINK || + fs.ReparseTag == windows.IO_REPARSE_TAG_MOUNT_POINT +} + +func (fs *fileStat) Size() int64 { + return int64(fs.FileSizeHigh)<<32 + int64(fs.FileSizeLow) +} + +func (fs *fileStat) Mode() (m FileMode) { + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 { + m |= 0444 + } else { + m |= 0666 + } + if fs.isSymlink() { + return m | ModeSymlink + } + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + m |= ModeDir | 0111 + } + switch fs.filetype { + case syscall.FILE_TYPE_PIPE: + m |= ModeNamedPipe + case syscall.FILE_TYPE_CHAR: + m |= ModeDevice | ModeCharDevice + } + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 && m&ModeType == 0 { + if fs.ReparseTag == windows.IO_REPARSE_TAG_DEDUP { + // If the Data Deduplication service is enabled on Windows Server, its + // Optimization job may convert regular files to IO_REPARSE_TAG_DEDUP + // whenever that job runs. + // + // However, DEDUP reparse points remain similar in most respects to + // regular files: they continue to support random-access reads and writes + // of persistent data, and they shouldn't add unexpected latency or + // unavailability in the way that a network filesystem might. + // + // Go programs may use ModeIrregular to filter out unusual files (such as + // raw device files on Linux, POSIX FIFO special files, and so on), so + // to avoid files changing unpredictably from regular to irregular we will + // consider DEDUP files to be close enough to regular to treat as such. + } else { + m |= ModeIrregular + } + } + return m +} + +func (fs *fileStat) ModTime() time.Time { + return time.Unix(0, fs.LastWriteTime.Nanoseconds()) +} + +// Sys returns syscall.Win32FileAttributeData for file fs. +func (fs *fileStat) Sys() any { + return &syscall.Win32FileAttributeData{ + FileAttributes: fs.FileAttributes, + CreationTime: fs.CreationTime, + LastAccessTime: fs.LastAccessTime, + LastWriteTime: fs.LastWriteTime, + FileSizeHigh: fs.FileSizeHigh, + FileSizeLow: fs.FileSizeLow, + } +} + +func (fs *fileStat) loadFileId() error { + fs.Lock() + defer fs.Unlock() + if fs.path == "" { + // already done + return nil + } + var path string + if fs.appendNameToPath { + path = fixLongPath(fs.path + `\` + fs.name) + } else { + path = fs.path + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + + // Per https://learn.microsoft.com/en-us/windows/win32/fileio/reparse-points-and-file-operations, + // “Applications that use the CreateFile function should specify the + // FILE_FLAG_OPEN_REPARSE_POINT flag when opening the file if it is a reparse + // point.” + // + // And per https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew, + // “If the file is not a reparse point, then this flag is ignored.” + // + // So we set FILE_FLAG_OPEN_REPARSE_POINT unconditionally, since we want + // information about the reparse point itself. + // + // If the file is a symlink, the symlink target should have already been + // resolved when the fileStat was created, so we don't need to worry about + // resolving symlink reparse points again here. + attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS | syscall.FILE_FLAG_OPEN_REPARSE_POINT) + + h, err := syscall.CreateFile(pathp, 0, 0, nil, syscall.OPEN_EXISTING, attrs, 0) + if err != nil { + return err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + err = syscall.GetFileInformationByHandle(h, &i) + if err != nil { + return err + } + fs.path = "" + fs.vol = i.VolumeSerialNumber + fs.idxhi = i.FileIndexHigh + fs.idxlo = i.FileIndexLow + return nil +} + +// saveInfoFromPath saves full path of the file to be used by os.SameFile later, +// and set name from path. +func (fs *fileStat) saveInfoFromPath(path string) error { + fs.path = path + if !isAbs(fs.path) { + var err error + fs.path, err = syscall.FullPath(fs.path) + if err != nil { + return &PathError{Op: "FullPath", Path: path, Err: err} + } + } + fs.name = basename(path) + return nil +} + +func sameFile(fs1, fs2 *fileStat) bool { + e := fs1.loadFileId() + if e != nil { + return false + } + e = fs2.loadFileId() + if e != nil { + return false + } + return fs1.vol == fs2.vol && fs1.idxhi == fs2.idxhi && fs1.idxlo == fs2.idxlo +} + +// For testing. +func atime(fi FileInfo) time.Time { + return time.Unix(0, fi.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) +} diff --git a/platform/dbops/binaries/go/go/src/os/wait6_dragonfly.go b/platform/dbops/binaries/go/go/src/os/wait6_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..cc3af39a237513051ee18eab8fcd161fca925c02 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait6_dragonfly.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +const _P_PID = 0 + +func wait6(idtype, id, options int) (status int, errno syscall.Errno) { + var status32 int32 // C.int + _, _, errno = syscall.Syscall6(syscall.SYS_WAIT6, uintptr(idtype), uintptr(id), uintptr(unsafe.Pointer(&status32)), uintptr(options), 0, 0) + return int(status32), errno +} diff --git a/platform/dbops/binaries/go/go/src/os/wait6_freebsd64.go b/platform/dbops/binaries/go/go/src/os/wait6_freebsd64.go new file mode 100644 index 0000000000000000000000000000000000000000..b2677c533a4954f27287641d878a186a80fc86f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait6_freebsd64.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && (amd64 || arm64 || riscv64) + +package os + +import ( + "syscall" + "unsafe" +) + +const _P_PID = 0 + +func wait6(idtype, id, options int) (status int, errno syscall.Errno) { + var status32 int32 // C.int + _, _, errno = syscall.Syscall6(syscall.SYS_WAIT6, uintptr(idtype), uintptr(id), uintptr(unsafe.Pointer(&status32)), uintptr(options), 0, 0) + return int(status32), errno +} diff --git a/platform/dbops/binaries/go/go/src/os/wait6_freebsd_386.go b/platform/dbops/binaries/go/go/src/os/wait6_freebsd_386.go new file mode 100644 index 0000000000000000000000000000000000000000..30b01c564dbae8c9bae8e144b8f067804ea9cf2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait6_freebsd_386.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +const _P_PID = 0 + +func wait6(idtype, id, options int) (status int, errno syscall.Errno) { + // freebsd32_wait6_args{ idtype, id1, id2, status, options, wrusage, info } + _, _, errno = syscall.Syscall9(syscall.SYS_WAIT6, uintptr(idtype), uintptr(id), 0, uintptr(unsafe.Pointer(&status)), uintptr(options), 0, 0, 0, 0) + return status, errno +} diff --git a/platform/dbops/binaries/go/go/src/os/wait6_freebsd_arm.go b/platform/dbops/binaries/go/go/src/os/wait6_freebsd_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..0fd8af012ed0a66a5be354ac22140e6b56afd154 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait6_freebsd_arm.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +const _P_PID = 0 + +func wait6(idtype, id, options int) (status int, errno syscall.Errno) { + // freebsd32_wait6_args{ idtype, pad, id1, id2, status, options, wrusage, info } + _, _, errno = syscall.Syscall9(syscall.SYS_WAIT6, uintptr(idtype), 0, uintptr(id), 0, uintptr(unsafe.Pointer(&status)), uintptr(options), 0, 0, 0) + return status, errno +} diff --git a/platform/dbops/binaries/go/go/src/os/wait6_netbsd.go b/platform/dbops/binaries/go/go/src/os/wait6_netbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..0bbb73d490e04e1c3e206f3dcba5d1f95aa98294 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait6_netbsd.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "unsafe" +) + +const _P_PID = 1 // not 0 as on FreeBSD and Dragonfly! + +func wait6(idtype, id, options int) (status int, errno syscall.Errno) { + var status32 int32 // C.int + _, _, errno = syscall.Syscall6(syscall.SYS_WAIT6, uintptr(idtype), uintptr(id), uintptr(unsafe.Pointer(&status32)), uintptr(options), 0, 0) + return int(status32), errno +} diff --git a/platform/dbops/binaries/go/go/src/os/wait_unimp.go b/platform/dbops/binaries/go/go/src/os/wait_unimp.go new file mode 100644 index 0000000000000000000000000000000000000000..810e35da63815b54ed540c46ca77ebd95fb5f828 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait_unimp.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// aix, darwin, js/wasm, openbsd, solaris and wasip1/wasm don't implement +// waitid/wait6. + +//go:build aix || darwin || (js && wasm) || openbsd || solaris || wasip1 + +package os + +// blockUntilWaitable attempts to block until a call to p.Wait will +// succeed immediately, and reports whether it has done so. +// It does not actually call p.Wait. +// This version is used on systems that do not implement waitid, +// or where we have not implemented it yet. Note that this is racy: +// a call to Process.Signal can in an extremely unlikely case send a +// signal to the wrong process, see issue #13987. +func (p *Process) blockUntilWaitable() (bool, error) { + return false, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/wait_wait6.go b/platform/dbops/binaries/go/go/src/os/wait_wait6.go new file mode 100644 index 0000000000000000000000000000000000000000..1031428826ca667478fbe868b96582f1e3eedbf4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait_wait6.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || netbsd + +package os + +import ( + "runtime" + "syscall" +) + +// blockUntilWaitable attempts to block until a call to p.Wait will +// succeed immediately, and reports whether it has done so. +// It does not actually call p.Wait. +func (p *Process) blockUntilWaitable() (bool, error) { + var errno syscall.Errno + for { + _, errno = wait6(_P_PID, p.Pid, syscall.WEXITED|syscall.WNOWAIT) + if errno != syscall.EINTR { + break + } + } + runtime.KeepAlive(p) + if errno == syscall.ENOSYS { + return false, nil + } else if errno != 0 { + return false, NewSyscallError("wait6", errno) + } + return true, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/wait_waitid.go b/platform/dbops/binaries/go/go/src/os/wait_waitid.go new file mode 100644 index 0000000000000000000000000000000000000000..cd078f35220676591c09b2fc2fb205c1567629d8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/wait_waitid.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We used to use this code for Darwin, but according to issue #19314 +// waitid returns if the process is stopped, even when using WEXITED. + +//go:build linux + +package os + +import ( + "runtime" + "syscall" + "unsafe" +) + +const _P_PID = 1 + +// blockUntilWaitable attempts to block until a call to p.Wait will +// succeed immediately, and reports whether it has done so. +// It does not actually call p.Wait. +func (p *Process) blockUntilWaitable() (bool, error) { + // The waitid system call expects a pointer to a siginfo_t, + // which is 128 bytes on all Linux systems. + // On darwin/amd64, it requires 104 bytes. + // We don't care about the values it returns. + var siginfo [16]uint64 + psig := &siginfo[0] + var e syscall.Errno + for { + _, _, e = syscall.Syscall6(syscall.SYS_WAITID, _P_PID, uintptr(p.Pid), uintptr(unsafe.Pointer(psig)), syscall.WEXITED|syscall.WNOWAIT, 0, 0) + if e != syscall.EINTR { + break + } + } + runtime.KeepAlive(p) + if e != 0 { + // waitid has been available since Linux 2.6.9, but + // reportedly is not available in Ubuntu on Windows. + // See issue 16610. + if e == syscall.ENOSYS { + return false, nil + } + return false, NewSyscallError("waitid", e) + } + return true, nil +} diff --git a/platform/dbops/binaries/go/go/src/os/writeto_linux_test.go b/platform/dbops/binaries/go/go/src/os/writeto_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5ffab88a2ab52630ba24c9855d4cda8c1515df2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/writeto_linux_test.go @@ -0,0 +1,171 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "internal/poll" + "io" + "math/rand" + "net" + . "os" + "strconv" + "syscall" + "testing" + "time" +) + +func TestSendFile(t *testing.T) { + sizes := []int{ + 1, + 42, + 1025, + syscall.Getpagesize() + 1, + 32769, + } + t.Run("sendfile-to-unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSendFile(t, "unix", int64(size)) + }) + } + }) + t.Run("sendfile-to-tcp", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSendFile(t, "tcp", int64(size)) + }) + } + }) +} + +func testSendFile(t *testing.T, proto string, size int64) { + dst, src, recv, data, hook := newSendFileTest(t, proto, size) + + // Now call WriteTo (through io.Copy), which will hopefully call poll.SendFile + n, err := io.Copy(dst, src) + if err != nil { + t.Fatalf("io.Copy error: %v", err) + } + + // We should have called poll.Splice with the right file descriptor arguments. + if n > 0 && !hook.called { + t.Fatal("expected to called poll.SendFile") + } + if hook.called && hook.srcfd != int(src.Fd()) { + t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, src.Fd()) + } + sc, ok := dst.(syscall.Conn) + if !ok { + t.Fatalf("destination is not a syscall.Conn") + } + rc, err := sc.SyscallConn() + if err != nil { + t.Fatalf("destination SyscallConn error: %v", err) + } + if err = rc.Control(func(fd uintptr) { + if hook.called && hook.dstfd != int(fd) { + t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, int(fd)) + } + }); err != nil { + t.Fatalf("destination Conn Control error: %v", err) + } + + // Verify the data size and content. + dataSize := len(data) + dstData := make([]byte, dataSize) + m, err := io.ReadFull(recv, dstData) + if err != nil { + t.Fatalf("server Conn Read error: %v", err) + } + if n != int64(dataSize) { + t.Fatalf("data length mismatch for io.Copy, got %d, want %d", n, dataSize) + } + if m != dataSize { + t.Fatalf("data length mismatch for net.Conn.Read, got %d, want %d", m, dataSize) + } + if !bytes.Equal(dstData, data) { + t.Errorf("data mismatch, got %s, want %s", dstData, data) + } +} + +// newSendFileTest initializes a new test for sendfile. +// +// It creates source file and destination sockets, and populates the source file +// with random data of the specified size. It also hooks package os' call +// to poll.Sendfile and returns the hook so it can be inspected. +func newSendFileTest(t *testing.T, proto string, size int64) (net.Conn, *File, net.Conn, []byte, *sendFileHook) { + t.Helper() + + hook := hookSendFile(t) + + client, server := createSocketPair(t, proto) + tempFile, data := createTempFile(t, size) + + return client, tempFile, server, data, hook +} + +func hookSendFile(t *testing.T) *sendFileHook { + h := new(sendFileHook) + h.install() + t.Cleanup(h.uninstall) + return h +} + +type sendFileHook struct { + called bool + dstfd int + srcfd int + remain int64 + + written int64 + handled bool + err error + + original func(dst *poll.FD, src int, remain int64) (int64, error, bool) +} + +func (h *sendFileHook) install() { + h.original = *PollSendFile + *PollSendFile = func(dst *poll.FD, src int, remain int64) (int64, error, bool) { + h.called = true + h.dstfd = dst.Sysfd + h.srcfd = src + h.remain = remain + h.written, h.err, h.handled = h.original(dst, src, remain) + return h.written, h.err, h.handled + } +} + +func (h *sendFileHook) uninstall() { + *PollSendFile = h.original +} + +func createTempFile(t *testing.T, size int64) (*File, []byte) { + f, err := CreateTemp(t.TempDir(), "writeto-sendfile-to-socket") + if err != nil { + t.Fatalf("failed to create temporary file: %v", err) + } + t.Cleanup(func() { + f.Close() + }) + + randSeed := time.Now().Unix() + t.Logf("random data seed: %d\n", randSeed) + prng := rand.New(rand.NewSource(randSeed)) + data := make([]byte, size) + prng.Read(data) + if _, err := f.Write(data); err != nil { + t.Fatalf("failed to create and feed the file: %v", err) + } + if err := f.Sync(); err != nil { + t.Fatalf("failed to save the file: %v", err) + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to rewind the file: %v", err) + } + + return f, data +} diff --git a/platform/dbops/binaries/go/go/src/os/zero_copy_linux.go b/platform/dbops/binaries/go/go/src/os/zero_copy_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..7c45aefeee8621b4585f39998558ab9316da19d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/zero_copy_linux.go @@ -0,0 +1,167 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "internal/poll" + "io" + "syscall" +) + +var ( + pollCopyFileRange = poll.CopyFileRange + pollSplice = poll.Splice + pollSendFile = poll.SendFile +) + +func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) { + pfd, network := getPollFDAndNetwork(w) + // TODO(panjf2000): same as File.spliceToFile. + if pfd == nil || !pfd.IsStream || !isUnixOrTCP(string(network)) { + return + } + + sc, err := f.SyscallConn() + if err != nil { + return + } + + rerr := sc.Read(func(fd uintptr) (done bool) { + written, err, handled = pollSendFile(pfd, int(fd), 1<<63-1) + return true + }) + + if err == nil { + err = rerr + } + + return written, handled, wrapSyscallError("sendfile", err) +} + +func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) { + // Neither copy_file_range(2) nor splice(2) supports destinations opened with + // O_APPEND, so don't bother to try zero-copy with these system calls. + // + // Visit https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS and + // https://man7.org/linux/man-pages/man2/splice.2.html#ERRORS for details. + if f.appendMode { + return 0, false, nil + } + + written, handled, err = f.copyFileRange(r) + if handled { + return + } + return f.spliceToFile(r) +} + +func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error) { + var ( + remain int64 + lr *io.LimitedReader + ) + if lr, r, remain = tryLimitedReader(r); remain <= 0 { + return 0, true, nil + } + + pfd, _ := getPollFDAndNetwork(r) + // TODO(panjf2000): run some tests to see if we should unlock the non-streams for splice. + // Streams benefit the most from the splice(2), non-streams are not even supported in old kernels + // where splice(2) will just return EINVAL; newer kernels support non-streams like UDP, but I really + // doubt that splice(2) could help non-streams, cuz they usually send small frames respectively + // and one splice call would result in one frame. + // splice(2) is suitable for large data but the generation of fragments defeats its edge here. + // Therefore, don't bother to try splice if the r is not a streaming descriptor. + if pfd == nil || !pfd.IsStream { + return + } + + var syscallName string + written, handled, syscallName, err = pollSplice(&f.pfd, pfd, remain) + + if lr != nil { + lr.N = remain - written + } + + return written, handled, wrapSyscallError(syscallName, err) +} + +func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err error) { + var ( + remain int64 + lr *io.LimitedReader + ) + if lr, r, remain = tryLimitedReader(r); remain <= 0 { + return 0, true, nil + } + + var src *File + switch v := r.(type) { + case *File: + src = v + case fileWithoutWriteTo: + src = v.File + default: + return 0, false, nil + } + + if src.checkValid("ReadFrom") != nil { + // Avoid returning the error as we report handled as false, + // leave further error handling as the responsibility of the caller. + return 0, false, nil + } + + written, handled, err = pollCopyFileRange(&f.pfd, &src.pfd, remain) + if lr != nil { + lr.N -= written + } + return written, handled, wrapSyscallError("copy_file_range", err) +} + +// getPollFDAndNetwork tries to get the poll.FD and network type from the given interface +// by expecting the underlying type of i to be the implementation of syscall.Conn +// that contains a *net.rawConn. +func getPollFDAndNetwork(i any) (*poll.FD, poll.String) { + sc, ok := i.(syscall.Conn) + if !ok { + return nil, "" + } + rc, err := sc.SyscallConn() + if err != nil { + return nil, "" + } + irc, ok := rc.(interface { + PollFD() *poll.FD + Network() poll.String + }) + if !ok { + return nil, "" + } + return irc.PollFD(), irc.Network() +} + +// tryLimitedReader tries to assert the io.Reader to io.LimitedReader, it returns the io.LimitedReader, +// the underlying io.Reader and the remaining amount of bytes if the assertion succeeds, +// otherwise it just returns the original io.Reader and the theoretical unlimited remaining amount of bytes. +func tryLimitedReader(r io.Reader) (*io.LimitedReader, io.Reader, int64) { + var remain int64 = 1<<63 - 1 // by default, copy until EOF + + lr, ok := r.(*io.LimitedReader) + if !ok { + return nil, r, remain + } + + remain = lr.N + return lr, lr.R, remain +} + +func isUnixOrTCP(network string) bool { + switch network { + case "tcp", "tcp4", "tcp6", "unix": + return true + default: + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/os/zero_copy_stub.go b/platform/dbops/binaries/go/go/src/os/zero_copy_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..9ec5808101889d7f90879fd41e3216349acef8f7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/os/zero_copy_stub.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package os + +import "io" + +func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) { + return 0, false, nil +} + +func (f *File) readFrom(r io.Reader) (n int64, handled bool, err error) { + return 0, false, nil +} diff --git a/platform/dbops/binaries/go/go/src/path/example_test.go b/platform/dbops/binaries/go/go/src/path/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e30ebd13dcd739c32657e8a8b482d1d6503822e1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/path/example_test.go @@ -0,0 +1,121 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package path_test + +import ( + "fmt" + "path" +) + +func ExampleBase() { + fmt.Println(path.Base("/a/b")) + fmt.Println(path.Base("/")) + fmt.Println(path.Base("")) + // Output: + // b + // / + // . +} + +func ExampleClean() { + paths := []string{ + "a/c", + "a//c", + "a/c/.", + "a/c/b/..", + "/../a/c", + "/../a/b/../././/c", + "", + } + + for _, p := range paths { + fmt.Printf("Clean(%q) = %q\n", p, path.Clean(p)) + } + + // Output: + // Clean("a/c") = "a/c" + // Clean("a//c") = "a/c" + // Clean("a/c/.") = "a/c" + // Clean("a/c/b/..") = "a/c" + // Clean("/../a/c") = "/a/c" + // Clean("/../a/b/../././/c") = "/a/c" + // Clean("") = "." +} + +func ExampleDir() { + fmt.Println(path.Dir("/a/b/c")) + fmt.Println(path.Dir("a/b/c")) + fmt.Println(path.Dir("/a/")) + fmt.Println(path.Dir("a/")) + fmt.Println(path.Dir("/")) + fmt.Println(path.Dir("")) + // Output: + // /a/b + // a/b + // /a + // a + // / + // . +} + +func ExampleExt() { + fmt.Println(path.Ext("/a/b/c/bar.css")) + fmt.Println(path.Ext("/")) + fmt.Println(path.Ext("")) + // Output: + // .css + // + // +} + +func ExampleIsAbs() { + fmt.Println(path.IsAbs("/dev/null")) + // Output: true +} + +func ExampleJoin() { + fmt.Println(path.Join("a", "b", "c")) + fmt.Println(path.Join("a", "b/c")) + fmt.Println(path.Join("a/b", "c")) + + fmt.Println(path.Join("a/b", "../../../xyz")) + + fmt.Println(path.Join("", "")) + fmt.Println(path.Join("a", "")) + fmt.Println(path.Join("", "a")) + + // Output: + // a/b/c + // a/b/c + // a/b/c + // ../xyz + // + // a + // a +} + +func ExampleMatch() { + fmt.Println(path.Match("abc", "abc")) + fmt.Println(path.Match("a*", "abc")) + fmt.Println(path.Match("a*/b", "a/c/b")) + // Output: + // true + // true + // false +} + +func ExampleSplit() { + split := func(s string) { + dir, file := path.Split(s) + fmt.Printf("path.Split(%q) = dir: %q, file: %q\n", s, dir, file) + } + split("static/myfile.css") + split("myfile.css") + split("") + // Output: + // path.Split("static/myfile.css") = dir: "static/", file: "myfile.css" + // path.Split("myfile.css") = dir: "", file: "myfile.css" + // path.Split("") = dir: "", file: "" +} diff --git a/platform/dbops/binaries/go/go/src/path/match.go b/platform/dbops/binaries/go/go/src/path/match.go new file mode 100644 index 0000000000000000000000000000000000000000..d8b6809568fbdcbebca6d4d648b3d014f0fc1360 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/path/match.go @@ -0,0 +1,230 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package path + +import ( + "errors" + "internal/bytealg" + "unicode/utf8" +) + +// ErrBadPattern indicates a pattern was malformed. +var ErrBadPattern = errors.New("syntax error in pattern") + +// Match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is [ErrBadPattern], when pattern +// is malformed. +func Match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches rest of string unless it has a /. + return bytealg.IndexByteString(name, '/') < 0, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + // Cannot skip /. + for i := 0; i < len(name) && name[i] != '/'; i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + // Before returning false with no error, + // check that the remainder of the pattern is syntactically valid. + for len(pattern) > 0 { + _, chunk, pattern = scanChunk(pattern) + if _, _, err := matchChunk(chunk, ""); err != nil { + return false, err + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + // failed records whether the match has failed. + // After the match fails, the loop continues on processing chunk, + // checking that the pattern is well-formed but no longer reading s. + failed := false + for len(chunk) > 0 { + if !failed && len(s) == 0 { + failed = true + } + switch chunk[0] { + case '[': + // character class + var r rune + if !failed { + var n int + r, n = utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + // possibly negated + negated := false + if len(chunk) > 0 && chunk[0] == '^' { + negated = true + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return "", false, err + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return "", false, err + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + failed = true + } + + case '?': + if !failed { + if s[0] == '/' { + failed = true + } + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + return "", false, ErrBadPattern + } + fallthrough + + default: + if !failed { + if chunk[0] != s[0] { + failed = true + } + s = s[1:] + } + chunk = chunk[1:] + } + } + if failed { + return "", false, nil + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = ErrBadPattern + } + return +} diff --git a/platform/dbops/binaries/go/go/src/path/match_test.go b/platform/dbops/binaries/go/go/src/path/match_test.go new file mode 100644 index 0000000000000000000000000000000000000000..996bd691eb82b60c8ae875dddcbffedb98da9c91 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/path/match_test.go @@ -0,0 +1,84 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package path_test + +import ( + . "path" + "testing" +) + +type MatchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []MatchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", false, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, ErrBadPattern}, + {"[-]", "-", false, ErrBadPattern}, + {"[x-]", "x", false, ErrBadPattern}, + {"[x-]", "-", false, ErrBadPattern}, + {"[x-]", "z", false, ErrBadPattern}, + {"[-x]", "x", false, ErrBadPattern}, + {"[-x]", "-", false, ErrBadPattern}, + {"[-x]", "a", false, ErrBadPattern}, + {"\\", "a", false, ErrBadPattern}, + {"[a-b-c]", "a", false, ErrBadPattern}, + {"[", "a", false, ErrBadPattern}, + {"[^", "a", false, ErrBadPattern}, + {"[^bc", "a", false, ErrBadPattern}, + {"a[", "a", false, ErrBadPattern}, + {"a[", "ab", false, ErrBadPattern}, + {"a[", "x", false, ErrBadPattern}, + {"a/b[", "x", false, ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + ok, err := Match(tt.pattern, tt.s) + if ok != tt.match || err != tt.err { + t.Errorf("Match(%#q, %#q) = %v, %v want %v, %v", tt.pattern, tt.s, ok, err, tt.match, tt.err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/path/path.go b/platform/dbops/binaries/go/go/src/path/path.go new file mode 100644 index 0000000000000000000000000000000000000000..5149a92c4fb9daa7b0001421be2d9d37def29310 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/path/path.go @@ -0,0 +1,226 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package path implements utility routines for manipulating slash-separated +// paths. +// +// The path package should only be used for paths separated by forward +// slashes, such as the paths in URLs. This package does not deal with +// Windows paths with drive letters or backslashes; to manipulate +// operating system paths, use the [path/filepath] package. +package path + +import "internal/bytealg" + +// A lazybuf is a lazily constructed path buffer. +// It supports append, reading previously appended bytes, +// and retrieving the final string. It does not allocate a buffer +// to hold the output until that output diverges from s. +type lazybuf struct { + s string + buf []byte + w int +} + +func (b *lazybuf) index(i int) byte { + if b.buf != nil { + return b.buf[i] + } + return b.s[i] +} + +func (b *lazybuf) append(c byte) { + if b.buf == nil { + if b.w < len(b.s) && b.s[b.w] == c { + b.w++ + return + } + b.buf = make([]byte, len(b.s)) + copy(b.buf, b.s[:b.w]) + } + b.buf[b.w] = c + b.w++ +} + +func (b *lazybuf) string() string { + if b.buf == nil { + return b.s[:b.w] + } + return string(b.buf[:b.w]) +} + +// Clean returns the shortest path name equivalent to path +// by purely lexical processing. It applies the following rules +// iteratively until no further processing can be done: +// +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// The returned path ends in a slash only if it is the root "/". +// +// If the result of this process is an empty string, Clean +// returns the string ".". +// +// See also Rob Pike, “Lexical File Names in Plan 9 or +// Getting Dot-Dot Right,” +// https://9p.io/sys/doc/lexnames.html +func Clean(path string) string { + if path == "" { + return "." + } + + rooted := path[0] == '/' + n := len(path) + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + // dotdot is index in buf where .. must stop, either because + // it is the leading slash or it is a leading ../../.. prefix. + out := lazybuf{s: path} + r, dotdot := 0, 0 + if rooted { + out.append('/') + r, dotdot = 1, 1 + } + + for r < n { + switch { + case path[r] == '/': + // empty path element + r++ + case path[r] == '.' && (r+1 == n || path[r+1] == '/'): + // . element + r++ + case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'): + // .. element: remove to last / + r += 2 + switch { + case out.w > dotdot: + // can backtrack + out.w-- + for out.w > dotdot && out.index(out.w) != '/' { + out.w-- + } + case !rooted: + // cannot backtrack, but not rooted, so append .. element. + if out.w > 0 { + out.append('/') + } + out.append('.') + out.append('.') + dotdot = out.w + } + default: + // real path element. + // add slash if needed + if rooted && out.w != 1 || !rooted && out.w != 0 { + out.append('/') + } + // copy element + for ; r < n && path[r] != '/'; r++ { + out.append(path[r]) + } + } + } + + // Turn empty string into "." + if out.w == 0 { + return "." + } + + return out.string() +} + +// Split splits path immediately following the final slash, +// separating it into a directory and file name component. +// If there is no slash in path, Split returns an empty dir and +// file set to path. +// The returned values have the property that path = dir+file. +func Split(path string) (dir, file string) { + i := bytealg.LastIndexByteString(path, '/') + return path[:i+1], path[i+1:] +} + +// Join joins any number of path elements into a single path, +// separating them with slashes. Empty elements are ignored. +// The result is Cleaned. However, if the argument list is +// empty or all its elements are empty, Join returns +// an empty string. +func Join(elem ...string) string { + size := 0 + for _, e := range elem { + size += len(e) + } + if size == 0 { + return "" + } + buf := make([]byte, 0, size+len(elem)-1) + for _, e := range elem { + if len(buf) > 0 || e != "" { + if len(buf) > 0 { + buf = append(buf, '/') + } + buf = append(buf, e...) + } + } + return Clean(string(buf)) +} + +// Ext returns the file name extension used by path. +// The extension is the suffix beginning at the final dot +// in the final slash-separated element of path; +// it is empty if there is no dot. +func Ext(path string) string { + for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- { + if path[i] == '.' { + return path[i:] + } + } + return "" +} + +// Base returns the last element of path. +// Trailing slashes are removed before extracting the last element. +// If the path is empty, Base returns ".". +// If the path consists entirely of slashes, Base returns "/". +func Base(path string) string { + if path == "" { + return "." + } + // Strip trailing slashes. + for len(path) > 0 && path[len(path)-1] == '/' { + path = path[0 : len(path)-1] + } + // Find the last element + if i := bytealg.LastIndexByteString(path, '/'); i >= 0 { + path = path[i+1:] + } + // If empty now, it had only slashes. + if path == "" { + return "/" + } + return path +} + +// IsAbs reports whether the path is absolute. +func IsAbs(path string) bool { + return len(path) > 0 && path[0] == '/' +} + +// Dir returns all but the last element of path, typically the path's directory. +// After dropping the final element using [Split], the path is Cleaned and trailing +// slashes are removed. +// If the path is empty, Dir returns ".". +// If the path consists entirely of slashes followed by non-slash bytes, Dir +// returns a single slash. In any other case, the returned path does not end in a +// slash. +func Dir(path string) string { + dir, _ := Split(path) + return Clean(dir) +} diff --git a/platform/dbops/binaries/go/go/src/path/path_test.go b/platform/dbops/binaries/go/go/src/path/path_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a57286f6b8468b17de80e4ca3ffa78d90820dfac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/path/path_test.go @@ -0,0 +1,236 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package path_test + +import ( + . "path" + "runtime" + "testing" +) + +type PathTest struct { + path, result string +} + +var cleantests = []PathTest{ + // Already clean + {"", "."}, + {"abc", "abc"}, + {"abc/def", "abc/def"}, + {"a/b/c", "a/b/c"}, + {".", "."}, + {"..", ".."}, + {"../..", "../.."}, + {"../../abc", "../../abc"}, + {"/abc", "/abc"}, + {"/", "/"}, + + // Remove trailing slash + {"abc/", "abc"}, + {"abc/def/", "abc/def"}, + {"a/b/c/", "a/b/c"}, + {"./", "."}, + {"../", ".."}, + {"../../", "../.."}, + {"/abc/", "/abc"}, + + // Remove doubled slash + {"abc//def//ghi", "abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc"}, + {"abc//", "abc"}, + + // Remove . elements + {"abc/./def", "abc/def"}, + {"/./abc/def", "/abc/def"}, + {"abc/.", "abc"}, + + // Remove .. elements + {"abc/def/ghi/../jkl", "abc/def/jkl"}, + {"abc/def/../ghi/../jkl", "abc/jkl"}, + {"abc/def/..", "abc"}, + {"abc/def/../..", "."}, + {"/abc/def/../..", "/"}, + {"abc/def/../../..", ".."}, + {"/abc/def/../../..", "/"}, + {"abc/def/../../../ghi/jkl/../../../mno", "../../mno"}, + + // Combinations + {"abc/./../def", "def"}, + {"abc//./../def", "def"}, + {"abc/../../././../def", "../../def"}, +} + +func TestClean(t *testing.T) { + for _, test := range cleantests { + if s := Clean(test.path); s != test.result { + t.Errorf("Clean(%q) = %q, want %q", test.path, s, test.result) + } + if s := Clean(test.result); s != test.result { + t.Errorf("Clean(%q) = %q, want %q", test.result, s, test.result) + } + } +} + +func TestCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleantests { + allocs := testing.AllocsPerRun(100, func() { Clean(test.result) }) + if allocs > 0 { + t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs) + } + } +} + +type SplitTest struct { + path, dir, file string +} + +var splittests = []SplitTest{ + {"a/b", "a/", "b"}, + {"a/b/", "a/b/", ""}, + {"a/", "a/", ""}, + {"a", "", "a"}, + {"/", "/", ""}, +} + +func TestSplit(t *testing.T) { + for _, test := range splittests { + if d, f := Split(test.path); d != test.dir || f != test.file { + t.Errorf("Split(%q) = %q, %q, want %q, %q", test.path, d, f, test.dir, test.file) + } + } +} + +type JoinTest struct { + elem []string + path string +} + +var jointests = []JoinTest{ + // zero parameters + {[]string{}, ""}, + + // one parameter + {[]string{""}, ""}, + {[]string{"a"}, "a"}, + + // two parameters + {[]string{"a", "b"}, "a/b"}, + {[]string{"a", ""}, "a"}, + {[]string{"", "b"}, "b"}, + {[]string{"/", "a"}, "/a"}, + {[]string{"/", ""}, "/"}, + {[]string{"a/", "b"}, "a/b"}, + {[]string{"a/", ""}, "a"}, + {[]string{"", ""}, ""}, +} + +func TestJoin(t *testing.T) { + for _, test := range jointests { + if p := Join(test.elem...); p != test.path { + t.Errorf("Join(%q) = %q, want %q", test.elem, p, test.path) + } + } +} + +type ExtTest struct { + path, ext string +} + +var exttests = []ExtTest{ + {"path.go", ".go"}, + {"path.pb.go", ".go"}, + {"a.dir/b", ""}, + {"a.dir/b.go", ".go"}, + {"a.dir/", ""}, +} + +func TestExt(t *testing.T) { + for _, test := range exttests { + if x := Ext(test.path); x != test.ext { + t.Errorf("Ext(%q) = %q, want %q", test.path, x, test.ext) + } + } +} + +var basetests = []PathTest{ + // Already clean + {"", "."}, + {".", "."}, + {"/.", "."}, + {"/", "/"}, + {"////", "/"}, + {"x/", "x"}, + {"abc", "abc"}, + {"abc/def", "def"}, + {"a/b/.x", ".x"}, + {"a/b/c.", "c."}, + {"a/b/c.x", "c.x"}, +} + +func TestBase(t *testing.T) { + for _, test := range basetests { + if s := Base(test.path); s != test.result { + t.Errorf("Base(%q) = %q, want %q", test.path, s, test.result) + } + } +} + +var dirtests = []PathTest{ + {"", "."}, + {".", "."}, + {"/.", "/"}, + {"/", "/"}, + {"////", "/"}, + {"/foo", "/"}, + {"x/", "x"}, + {"abc", "."}, + {"abc/def", "abc"}, + {"abc////def", "abc"}, + {"a/b/.x", "a/b"}, + {"a/b/c.", "a/b"}, + {"a/b/c.x", "a/b"}, +} + +func TestDir(t *testing.T) { + for _, test := range dirtests { + if s := Dir(test.path); s != test.result { + t.Errorf("Dir(%q) = %q, want %q", test.path, s, test.result) + } + } +} + +type IsAbsTest struct { + path string + isAbs bool +} + +var isAbsTests = []IsAbsTest{ + {"", false}, + {"/", true}, + {"/usr/bin/gcc", true}, + {"..", false}, + {"/a/../bb", true}, + {".", false}, + {"./", false}, + {"lala", false}, +} + +func TestIsAbs(t *testing.T) { + for _, test := range isAbsTests { + if r := IsAbs(test.path); r != test.isAbs { + t.Errorf("IsAbs(%q) = %v, want %v", test.path, r, test.isAbs) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/plugin/plugin.go b/platform/dbops/binaries/go/go/src/plugin/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..b4b1697b52c82dc0cd59beaab6d38d3551118f9a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/plugin/plugin.go @@ -0,0 +1,120 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package plugin implements loading and symbol resolution of Go plugins. +// +// A plugin is a Go main package with exported functions and variables that +// has been built with: +// +// go build -buildmode=plugin +// +// When a plugin is first opened, the init functions of all packages not +// already part of the program are called. The main function is not run. +// A plugin is only initialized once, and cannot be closed. +// +// # Warnings +// +// The ability to dynamically load parts of an application during +// execution, perhaps based on user-defined configuration, may be a +// useful building block in some designs. In particular, because +// applications and dynamically loaded functions can share data +// structures directly, plugins may enable very high-performance +// integration of separate parts. +// +// However, the plugin mechanism has many significant drawbacks that +// should be considered carefully during the design. For example: +// +// - Plugins are currently supported only on Linux, FreeBSD, and +// macOS, making them unsuitable for applications intended to be +// portable. +// +// - Applications that use plugins may require careful configuration +// to ensure that the various parts of the program be made available +// in the correct location in the file system (or container image). +// By contrast, deploying an application consisting of a single static +// executable is straightforward. +// +// - Reasoning about program initialization is more difficult when +// some packages may not be initialized until long after the +// application has started running. +// +// - Bugs in applications that load plugins could be exploited by +// an attacker to load dangerous or untrusted libraries. +// +// - Runtime crashes are likely to occur unless all parts of the +// program (the application and all its plugins) are compiled +// using exactly the same version of the toolchain, the same build +// tags, and the same values of certain flags and environment +// variables. +// +// - Similar crashing problems are likely to arise unless all common +// dependencies of the application and its plugins are built from +// exactly the same source code. +// +// - Together, these restrictions mean that, in practice, the +// application and its plugins must all be built together by a +// single person or component of a system. In that case, it may +// be simpler for that person or component to generate Go source +// files that blank-import the desired set of plugins and then +// compile a static executable in the usual way. +// +// For these reasons, many users decide that traditional interprocess +// communication (IPC) mechanisms such as sockets, pipes, remote +// procedure call (RPC), shared memory mappings, or file system +// operations may be more suitable despite the performance overheads. +package plugin + +// Plugin is a loaded Go plugin. +type Plugin struct { + pluginpath string + err string // set if plugin failed to load + loaded chan struct{} // closed when loaded + syms map[string]any +} + +// Open opens a Go plugin. +// If a path has already been opened, then the existing *[Plugin] is returned. +// It is safe for concurrent use by multiple goroutines. +func Open(path string) (*Plugin, error) { + return open(path) +} + +// Lookup searches for a symbol named symName in plugin p. +// A symbol is any exported variable or function. +// It reports an error if the symbol is not found. +// It is safe for concurrent use by multiple goroutines. +func (p *Plugin) Lookup(symName string) (Symbol, error) { + return lookup(p, symName) +} + +// A Symbol is a pointer to a variable or function. +// +// For example, a plugin defined as +// +// package main +// +// import "fmt" +// +// var V int +// +// func F() { fmt.Printf("Hello, number %d\n", V) } +// +// may be loaded with the [Open] function and then the exported package +// symbols V and F can be accessed +// +// p, err := plugin.Open("plugin_name.so") +// if err != nil { +// panic(err) +// } +// v, err := p.Lookup("V") +// if err != nil { +// panic(err) +// } +// f, err := p.Lookup("F") +// if err != nil { +// panic(err) +// } +// *v.(*int) = 7 +// f.(func())() // prints "Hello, number 7" +type Symbol any diff --git a/platform/dbops/binaries/go/go/src/plugin/plugin_dlopen.go b/platform/dbops/binaries/go/go/src/plugin/plugin_dlopen.go new file mode 100644 index 0000000000000000000000000000000000000000..f6ae219d95efc738c561021627959586eeadc358 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/plugin/plugin_dlopen.go @@ -0,0 +1,153 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux && cgo) || (darwin && cgo) || (freebsd && cgo) + +package plugin + +/* +#cgo linux LDFLAGS: -ldl +#include +#include +#include +#include + +#include + +static uintptr_t pluginOpen(const char* path, char** err) { + void* h = dlopen(path, RTLD_NOW|RTLD_GLOBAL); + if (h == NULL) { + *err = (char*)dlerror(); + } + return (uintptr_t)h; +} + +static void* pluginLookup(uintptr_t h, const char* name, char** err) { + void* r = dlsym((void*)h, name); + if (r == NULL) { + *err = (char*)dlerror(); + } + return r; +} +*/ +import "C" + +import ( + "errors" + "sync" + "unsafe" +) + +func open(name string) (*Plugin, error) { + cPath := make([]byte, C.PATH_MAX+1) + cRelName := make([]byte, len(name)+1) + copy(cRelName, name) + if C.realpath( + (*C.char)(unsafe.Pointer(&cRelName[0])), + (*C.char)(unsafe.Pointer(&cPath[0]))) == nil { + return nil, errors.New(`plugin.Open("` + name + `"): realpath failed`) + } + + filepath := C.GoString((*C.char)(unsafe.Pointer(&cPath[0]))) + + pluginsMu.Lock() + if p := plugins[filepath]; p != nil { + pluginsMu.Unlock() + if p.err != "" { + return nil, errors.New(`plugin.Open("` + name + `"): ` + p.err + ` (previous failure)`) + } + <-p.loaded + return p, nil + } + var cErr *C.char + h := C.pluginOpen((*C.char)(unsafe.Pointer(&cPath[0])), &cErr) + if h == 0 { + pluginsMu.Unlock() + return nil, errors.New(`plugin.Open("` + name + `"): ` + C.GoString(cErr)) + } + // TODO(crawshaw): look for plugin note, confirm it is a Go plugin + // and it was built with the correct toolchain. + if len(name) > 3 && name[len(name)-3:] == ".so" { + name = name[:len(name)-3] + } + if plugins == nil { + plugins = make(map[string]*Plugin) + } + pluginpath, syms, initTasks, errstr := lastmoduleinit() + if errstr != "" { + plugins[filepath] = &Plugin{ + pluginpath: pluginpath, + err: errstr, + } + pluginsMu.Unlock() + return nil, errors.New(`plugin.Open("` + name + `"): ` + errstr) + } + // This function can be called from the init function of a plugin. + // Drop a placeholder in the map so subsequent opens can wait on it. + p := &Plugin{ + pluginpath: pluginpath, + loaded: make(chan struct{}), + } + plugins[filepath] = p + pluginsMu.Unlock() + + doInit(initTasks) + + // Fill out the value of each plugin symbol. + updatedSyms := map[string]any{} + for symName, sym := range syms { + isFunc := symName[0] == '.' + if isFunc { + delete(syms, symName) + symName = symName[1:] + } + + fullName := pluginpath + "." + symName + cname := make([]byte, len(fullName)+1) + copy(cname, fullName) + + p := C.pluginLookup(h, (*C.char)(unsafe.Pointer(&cname[0])), &cErr) + if p == nil { + return nil, errors.New(`plugin.Open("` + name + `"): could not find symbol ` + symName + `: ` + C.GoString(cErr)) + } + valp := (*[2]unsafe.Pointer)(unsafe.Pointer(&sym)) + if isFunc { + (*valp)[1] = unsafe.Pointer(&p) + } else { + (*valp)[1] = p + } + // we can't add to syms during iteration as we'll end up processing + // some symbols twice with the inability to tell if the symbol is a function + updatedSyms[symName] = sym + } + p.syms = updatedSyms + + close(p.loaded) + return p, nil +} + +func lookup(p *Plugin, symName string) (Symbol, error) { + if s := p.syms[symName]; s != nil { + return s, nil + } + return nil, errors.New("plugin: symbol " + symName + " not found in plugin " + p.pluginpath) +} + +var ( + pluginsMu sync.Mutex + plugins map[string]*Plugin +) + +// lastmoduleinit is defined in package runtime. +func lastmoduleinit() (pluginpath string, syms map[string]any, inittasks []*initTask, errstr string) + +// doInit is defined in package runtime. +// +//go:linkname doInit runtime.doInit +func doInit(t []*initTask) + +type initTask struct { + // fields defined in runtime.initTask. We only handle pointers to an initTask + // in this package, so the contents are irrelevant. +} diff --git a/platform/dbops/binaries/go/go/src/plugin/plugin_stubs.go b/platform/dbops/binaries/go/go/src/plugin/plugin_stubs.go new file mode 100644 index 0000000000000000000000000000000000000000..2e9492e7c657e66d5a4c8483902095d495ff0a13 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/plugin/plugin_stubs.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!linux && !freebsd && !darwin) || !cgo + +package plugin + +import "errors" + +func lookup(p *Plugin, symName string) (Symbol, error) { + return nil, errors.New("plugin: not implemented") +} + +func open(name string) (*Plugin, error) { + return nil, errors.New("plugin: not implemented") +} diff --git a/platform/dbops/binaries/go/go/src/plugin/plugin_test.go b/platform/dbops/binaries/go/go/src/plugin/plugin_test.go new file mode 100644 index 0000000000000000000000000000000000000000..557987cfa6c94b140da71ab5bc760d6e57d26a1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/plugin/plugin_test.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plugin_test + +import ( + _ "plugin" + "testing" +) + +func TestPlugin(t *testing.T) { + // This test makes sure that executable that imports plugin + // package can actually run. See issue #28789 for details. +} diff --git a/platform/dbops/binaries/go/go/src/reflect/abi.go b/platform/dbops/binaries/go/go/src/reflect/abi.go new file mode 100644 index 0000000000000000000000000000000000000000..2b5f40538057a727bdede3083da61b882c7f1db5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/abi.go @@ -0,0 +1,510 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +// These variables are used by the register assignment +// algorithm in this file. +// +// They should be modified with care (no other reflect code +// may be executing) and are generally only modified +// when testing this package. +// +// They should never be set higher than their internal/abi +// constant counterparts, because the system relies on a +// structure that is at least large enough to hold the +// registers the system supports. +// +// Currently they're set to zero because using the actual +// constants will break every part of the toolchain that +// uses reflect to call functions (e.g. go test, or anything +// that uses text/template). The values that are currently +// commented out there should be the actual values once +// we're ready to use the register ABI everywhere. +var ( + intArgRegs = abi.IntArgRegs + floatArgRegs = abi.FloatArgRegs + floatRegSize = uintptr(abi.EffectiveFloatRegSize) +) + +// abiStep represents an ABI "instruction." Each instruction +// describes one part of how to translate between a Go value +// in memory and a call frame. +type abiStep struct { + kind abiStepKind + + // offset and size together describe a part of a Go value + // in memory. + offset uintptr + size uintptr // size in bytes of the part + + // These fields describe the ABI side of the translation. + stkOff uintptr // stack offset, used if kind == abiStepStack + ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer + freg int // FP register index, used if kind == abiStepFloatReg +} + +// abiStepKind is the "op-code" for an abiStep instruction. +type abiStepKind int + +const ( + abiStepBad abiStepKind = iota + abiStepStack // copy to/from stack + abiStepIntReg // copy to/from integer register + abiStepPointer // copy pointer to/from integer register + abiStepFloatReg // copy to/from FP register +) + +// abiSeq represents a sequence of ABI instructions for copying +// from a series of reflect.Values to a call frame (for call arguments) +// or vice-versa (for call results). +// +// An abiSeq should be populated by calling its addArg method. +type abiSeq struct { + // steps is the set of instructions. + // + // The instructions are grouped together by whole arguments, + // with the starting index for the instructions + // of the i'th Go value available in valueStart. + // + // For instance, if this abiSeq represents 3 arguments + // passed to a function, then the 2nd argument's steps + // begin at steps[valueStart[1]]. + // + // Because reflect accepts Go arguments in distinct + // Values and each Value is stored separately, each abiStep + // that begins a new argument will have its offset + // field == 0. + steps []abiStep + valueStart []int + + stackBytes uintptr // stack space used + iregs, fregs int // registers used +} + +func (a *abiSeq) dump() { + for i, p := range a.steps { + println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg) + } + print("values ") + for _, i := range a.valueStart { + print(i, " ") + } + println() + println("stack", a.stackBytes) + println("iregs", a.iregs) + println("fregs", a.fregs) +} + +// stepsForValue returns the ABI instructions for translating +// the i'th Go argument or return value represented by this +// abiSeq to the Go ABI. +func (a *abiSeq) stepsForValue(i int) []abiStep { + s := a.valueStart[i] + var e int + if i == len(a.valueStart)-1 { + e = len(a.steps) + } else { + e = a.valueStart[i+1] + } + return a.steps[s:e] +} + +// addArg extends the abiSeq with a new Go value of type t. +// +// If the value was stack-assigned, returns the single +// abiStep describing that translation, and nil otherwise. +func (a *abiSeq) addArg(t *abi.Type) *abiStep { + // We'll always be adding a new value, so do that first. + pStart := len(a.steps) + a.valueStart = append(a.valueStart, pStart) + if t.Size() == 0 { + // If the size of the argument type is zero, then + // in order to degrade gracefully into ABI0, we need + // to stack-assign this type. The reason is that + // although zero-sized types take up no space on the + // stack, they do cause the next argument to be aligned. + // So just do that here, but don't bother actually + // generating a new ABI step for it (there's nothing to + // actually copy). + // + // We cannot handle this in the recursive case of + // regAssign because zero-sized *fields* of a + // non-zero-sized struct do not cause it to be + // stack-assigned. So we need a special case here + // at the top. + a.stackBytes = align(a.stackBytes, uintptr(t.Align())) + return nil + } + // Hold a copy of "a" so that we can roll back if + // register assignment fails. + aOld := *a + if !a.regAssign(t, 0) { + // Register assignment failed. Roll back any changes + // and stack-assign. + *a = aOld + a.stackAssign(t.Size(), uintptr(t.Align())) + return &a.steps[len(a.steps)-1] + } + return nil +} + +// addRcvr extends the abiSeq with a new method call +// receiver according to the interface calling convention. +// +// If the receiver was stack-assigned, returns the single +// abiStep describing that translation, and nil otherwise. +// Returns true if the receiver is a pointer. +func (a *abiSeq) addRcvr(rcvr *abi.Type) (*abiStep, bool) { + // The receiver is always one word. + a.valueStart = append(a.valueStart, len(a.steps)) + var ok, ptr bool + if ifaceIndir(rcvr) || rcvr.Pointers() { + ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1) + ptr = true + } else { + // TODO(mknyszek): Is this case even possible? + // The interface data work never contains a non-pointer + // value. This case was copied over from older code + // in the reflect package which only conditionally added + // a pointer bit to the reflect.(Value).Call stack frame's + // GC bitmap. + ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0) + ptr = false + } + if !ok { + a.stackAssign(goarch.PtrSize, goarch.PtrSize) + return &a.steps[len(a.steps)-1], ptr + } + return nil, ptr +} + +// regAssign attempts to reserve argument registers for a value of +// type t, stored at some offset. +// +// It returns whether or not the assignment succeeded, but +// leaves any changes it made to a.steps behind, so the caller +// must undo that work by adjusting a.steps if it fails. +// +// This method along with the assign* methods represent the +// complete register-assignment algorithm for the Go ABI. +func (a *abiSeq) regAssign(t *abi.Type, offset uintptr) bool { + switch Kind(t.Kind()) { + case UnsafePointer, Pointer, Chan, Map, Func: + return a.assignIntN(offset, t.Size(), 1, 0b1) + case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr: + return a.assignIntN(offset, t.Size(), 1, 0b0) + case Int64, Uint64: + switch goarch.PtrSize { + case 4: + return a.assignIntN(offset, 4, 2, 0b0) + case 8: + return a.assignIntN(offset, 8, 1, 0b0) + } + case Float32, Float64: + return a.assignFloatN(offset, t.Size(), 1) + case Complex64: + return a.assignFloatN(offset, 4, 2) + case Complex128: + return a.assignFloatN(offset, 8, 2) + case String: + return a.assignIntN(offset, goarch.PtrSize, 2, 0b01) + case Interface: + return a.assignIntN(offset, goarch.PtrSize, 2, 0b10) + case Slice: + return a.assignIntN(offset, goarch.PtrSize, 3, 0b001) + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + switch tt.Len { + case 0: + // There's nothing to assign, so don't modify + // a.steps but succeed so the caller doesn't + // try to stack-assign this value. + return true + case 1: + return a.regAssign(tt.Elem, offset) + default: + return false + } + case Struct: + st := (*structType)(unsafe.Pointer(t)) + for i := range st.Fields { + f := &st.Fields[i] + if !a.regAssign(f.Typ, offset+f.Offset) { + return false + } + } + return true + default: + print("t.Kind == ", t.Kind(), "\n") + panic("unknown type kind") + } + panic("unhandled register assignment path") +} + +// assignIntN assigns n values to registers, each "size" bytes large, +// from the data at [offset, offset+n*size) in memory. Each value at +// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the +// next n integer registers. +// +// Bit i in ptrMap indicates whether the i'th value is a pointer. +// n must be <= 8. +// +// Returns whether assignment succeeded. +func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool { + if n > 8 || n < 0 { + panic("invalid n") + } + if ptrMap != 0 && size != goarch.PtrSize { + panic("non-empty pointer map passed for non-pointer-size values") + } + if a.iregs+n > intArgRegs { + return false + } + for i := 0; i < n; i++ { + kind := abiStepIntReg + if ptrMap&(uint8(1)< floatArgRegs || floatRegSize < size { + return false + } + for i := 0; i < n; i++ { + a.steps = append(a.steps, abiStep{ + kind: abiStepFloatReg, + offset: offset + uintptr(i)*size, + size: size, + freg: a.fregs, + }) + a.fregs++ + } + return true +} + +// stackAssign reserves space for one value that is "size" bytes +// large with alignment "alignment" to the stack. +// +// Should not be called directly; use addArg instead. +func (a *abiSeq) stackAssign(size, alignment uintptr) { + a.stackBytes = align(a.stackBytes, alignment) + a.steps = append(a.steps, abiStep{ + kind: abiStepStack, + offset: 0, // Only used for whole arguments, so the memory offset is 0. + size: size, + stkOff: a.stackBytes, + }) + a.stackBytes += size +} + +// abiDesc describes the ABI for a function or method. +type abiDesc struct { + // call and ret represent the translation steps for + // the call and return paths of a Go function. + call, ret abiSeq + + // These fields describe the stack space allocated + // for the call. stackCallArgsSize is the amount of space + // reserved for arguments but not return values. retOffset + // is the offset at which return values begin, and + // spill is the size in bytes of additional space reserved + // to spill argument registers into in case of preemption in + // reflectcall's stack frame. + stackCallArgsSize, retOffset, spill uintptr + + // stackPtrs is a bitmap that indicates whether + // each word in the ABI stack space (stack-assigned + // args + return values) is a pointer. Used + // as the heap pointer bitmap for stack space + // passed to reflectcall. + stackPtrs *bitVector + + // inRegPtrs is a bitmap whose i'th bit indicates + // whether the i'th integer argument register contains + // a pointer. Used by makeFuncStub and methodValueCall + // to make result pointers visible to the GC. + // + // outRegPtrs is the same, but for result values. + // Used by reflectcall to make result pointers visible + // to the GC. + inRegPtrs, outRegPtrs abi.IntArgRegBitmap +} + +func (a *abiDesc) dump() { + println("ABI") + println("call") + a.call.dump() + println("ret") + a.ret.dump() + println("stackCallArgsSize", a.stackCallArgsSize) + println("retOffset", a.retOffset) + println("spill", a.spill) + print("inRegPtrs:") + dumpPtrBitMap(a.inRegPtrs) + println() + print("outRegPtrs:") + dumpPtrBitMap(a.outRegPtrs) + println() +} + +func dumpPtrBitMap(b abi.IntArgRegBitmap) { + for i := 0; i < intArgRegs; i++ { + x := 0 + if b.Get(i) { + x = 1 + } + print(" ", x) + } +} + +func newAbiDesc(t *funcType, rcvr *abi.Type) abiDesc { + // We need to add space for this argument to + // the frame so that it can spill args into it. + // + // The size of this space is just the sum of the sizes + // of each register-allocated type. + // + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + spill := uintptr(0) + + // Compute gc program & stack bitmap for stack arguments + stackPtrs := new(bitVector) + + // Compute the stack frame pointer bitmap and register + // pointer bitmap for arguments. + inRegPtrs := abi.IntArgRegBitmap{} + + // Compute abiSeq for input parameters. + var in abiSeq + if rcvr != nil { + stkStep, isPtr := in.addRcvr(rcvr) + if stkStep != nil { + if isPtr { + stackPtrs.append(1) + } else { + stackPtrs.append(0) + } + } else { + spill += goarch.PtrSize + } + } + for i, arg := range t.InSlice() { + stkStep := in.addArg(arg) + if stkStep != nil { + addTypeBits(stackPtrs, stkStep.stkOff, arg) + } else { + spill = align(spill, uintptr(arg.Align())) + spill += arg.Size() + for _, st := range in.stepsForValue(i) { + if st.kind == abiStepPointer { + inRegPtrs.Set(st.ireg) + } + } + } + } + spill = align(spill, goarch.PtrSize) + + // From the input parameters alone, we now know + // the stackCallArgsSize and retOffset. + stackCallArgsSize := in.stackBytes + retOffset := align(in.stackBytes, goarch.PtrSize) + + // Compute the stack frame pointer bitmap and register + // pointer bitmap for return values. + outRegPtrs := abi.IntArgRegBitmap{} + + // Compute abiSeq for output parameters. + var out abiSeq + // Stack-assigned return values do not share + // space with arguments like they do with registers, + // so we need to inject a stack offset here. + // Fake it by artificially extending stackBytes by + // the return offset. + out.stackBytes = retOffset + for i, res := range t.OutSlice() { + stkStep := out.addArg(res) + if stkStep != nil { + addTypeBits(stackPtrs, stkStep.stkOff, res) + } else { + for _, st := range out.stepsForValue(i) { + if st.kind == abiStepPointer { + outRegPtrs.Set(st.ireg) + } + } + } + } + // Undo the faking from earlier so that stackBytes + // is accurate. + out.stackBytes -= retOffset + return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, inRegPtrs, outRegPtrs} +} + +// intFromReg loads an argSize sized integer from reg and places it at to. +// +// argSize must be non-zero, fit in a register, and a power-of-two. +func intFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) { + memmove(to, r.IntRegArgAddr(reg, argSize), argSize) +} + +// intToReg loads an argSize sized integer and stores it into reg. +// +// argSize must be non-zero, fit in a register, and a power-of-two. +func intToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) { + memmove(r.IntRegArgAddr(reg, argSize), from, argSize) +} + +// floatFromReg loads a float value from its register representation in r. +// +// argSize must be 4 or 8. +func floatFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) { + switch argSize { + case 4: + *(*float32)(to) = archFloat32FromReg(r.Floats[reg]) + case 8: + *(*float64)(to) = *(*float64)(unsafe.Pointer(&r.Floats[reg])) + default: + panic("bad argSize") + } +} + +// floatToReg stores a float value in its register representation in r. +// +// argSize must be either 4 or 8. +func floatToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) { + switch argSize { + case 4: + r.Floats[reg] = archFloat32ToReg(*(*float32)(from)) + case 8: + r.Floats[reg] = *(*uint64)(from) + default: + panic("bad argSize") + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/abi_test.go b/platform/dbops/binaries/go/go/src/reflect/abi_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d93472779012ba5a1d2a5694a2872787e0b789a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/abi_test.go @@ -0,0 +1,989 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.regabiargs + +package reflect_test + +import ( + "internal/abi" + "math" + "math/rand" + "reflect" + "runtime" + "testing" + "testing/quick" +) + +// As of early May 2021 this is no longer necessary for amd64, +// but it remains in case this is needed for the next register abi port. +// TODO (1.18) If enabling register ABI on additional architectures turns out not to need this, remove it. +type MagicLastTypeNameForTestingRegisterABI struct{} + +func TestMethodValueCallABI(t *testing.T) { + // Enable register-based reflect.Call and ensure we don't + // use potentially incorrect cached versions by clearing + // the cache before we start and after we're done. + defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize)) + + // This test is simple. Calling a method value involves + // pretty much just plumbing whatever arguments in whichever + // location through to reflectcall. They're already set up + // for us, so there isn't a whole lot to do. Let's just + // make sure that we can pass register and stack arguments + // through. The exact combination is not super important. + makeMethodValue := func(method string) (*StructWithMethods, any) { + s := new(StructWithMethods) + v := reflect.ValueOf(s).MethodByName(method) + return s, v.Interface() + } + + a0 := StructFewRegs{ + 10, 11, 12, 13, + 20.0, 21.0, 22.0, 23.0, + } + a1 := [4]uint64{100, 101, 102, 103} + a2 := StructFillRegs{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, + } + + s, i := makeMethodValue("AllRegsCall") + f0 := i.(func(StructFewRegs, MagicLastTypeNameForTestingRegisterABI) StructFewRegs) + r0 := f0(a0, MagicLastTypeNameForTestingRegisterABI{}) + if r0 != a0 { + t.Errorf("bad method value call: got %#v, want %#v", r0, a0) + } + if s.Value != 1 { + t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 1) + } + + s, i = makeMethodValue("RegsAndStackCall") + f1 := i.(func(StructFewRegs, [4]uint64, MagicLastTypeNameForTestingRegisterABI) (StructFewRegs, [4]uint64)) + r0, r1 := f1(a0, a1, MagicLastTypeNameForTestingRegisterABI{}) + if r0 != a0 { + t.Errorf("bad method value call: got %#v, want %#v", r0, a0) + } + if r1 != a1 { + t.Errorf("bad method value call: got %#v, want %#v", r1, a1) + } + if s.Value != 2 { + t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 2) + } + + s, i = makeMethodValue("SpillStructCall") + f2 := i.(func(StructFillRegs, MagicLastTypeNameForTestingRegisterABI) StructFillRegs) + r2 := f2(a2, MagicLastTypeNameForTestingRegisterABI{}) + if r2 != a2 { + t.Errorf("bad method value call: got %#v, want %#v", r2, a2) + } + if s.Value != 3 { + t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 3) + } + + s, i = makeMethodValue("ValueRegMethodSpillInt") + f3 := i.(func(StructFillRegs, int, MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, int)) + r3a, r3b := f3(a2, 42, MagicLastTypeNameForTestingRegisterABI{}) + if r3a != a2 { + t.Errorf("bad method value call: got %#v, want %#v", r3a, a2) + } + if r3b != 42 { + t.Errorf("bad method value call: got %#v, want %#v", r3b, 42) + } + if s.Value != 4 { + t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 4) + } + + s, i = makeMethodValue("ValueRegMethodSpillPtr") + f4 := i.(func(StructFillRegs, *byte, MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, *byte)) + vb := byte(10) + r4a, r4b := f4(a2, &vb, MagicLastTypeNameForTestingRegisterABI{}) + if r4a != a2 { + t.Errorf("bad method value call: got %#v, want %#v", r4a, a2) + } + if r4b != &vb { + t.Errorf("bad method value call: got %#v, want %#v", r4b, &vb) + } + if s.Value != 5 { + t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 5) + } +} + +type StructWithMethods struct { + Value int +} + +type StructFewRegs struct { + a0, a1, a2, a3 int + f0, f1, f2, f3 float64 +} + +type StructFillRegs struct { + a0, a1, a2, a3, a4, a5, a6, a7, a8 int + f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14 float64 +} + +func (m *StructWithMethods) AllRegsCall(s StructFewRegs, _ MagicLastTypeNameForTestingRegisterABI) StructFewRegs { + m.Value = 1 + return s +} + +func (m *StructWithMethods) RegsAndStackCall(s StructFewRegs, a [4]uint64, _ MagicLastTypeNameForTestingRegisterABI) (StructFewRegs, [4]uint64) { + m.Value = 2 + return s, a +} + +func (m *StructWithMethods) SpillStructCall(s StructFillRegs, _ MagicLastTypeNameForTestingRegisterABI) StructFillRegs { + m.Value = 3 + return s +} + +// When called as a method value, i is passed on the stack. +// When called as a method, i is passed in a register. +func (m *StructWithMethods) ValueRegMethodSpillInt(s StructFillRegs, i int, _ MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, int) { + m.Value = 4 + return s, i +} + +// When called as a method value, i is passed on the stack. +// When called as a method, i is passed in a register. +func (m *StructWithMethods) ValueRegMethodSpillPtr(s StructFillRegs, i *byte, _ MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, *byte) { + m.Value = 5 + return s, i +} + +func TestReflectCallABI(t *testing.T) { + // Enable register-based reflect.Call and ensure we don't + // use potentially incorrect cached versions by clearing + // the cache before we start and after we're done. + defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize)) + + // Execute the functions defined below which all have the + // same form and perform the same function: pass all arguments + // to return values. The purpose is to test the call boundary + // and make sure it works. + r := rand.New(rand.NewSource(genValueRandSeed)) + for _, fn := range abiCallTestCases { + fn := reflect.ValueOf(fn) + t.Run(runtime.FuncForPC(fn.Pointer()).Name(), func(t *testing.T) { + typ := fn.Type() + if typ.Kind() != reflect.Func { + t.Fatalf("test case is not a function, has type: %s", typ.String()) + } + if typ.NumIn() != typ.NumOut() { + t.Fatalf("test case has different number of inputs and outputs: %d in, %d out", typ.NumIn(), typ.NumOut()) + } + var args []reflect.Value + for i := 0; i < typ.NumIn(); i++ { + args = append(args, genValue(t, typ.In(i), r)) + } + results := fn.Call(args) + for i := range results { + x, y := args[i].Interface(), results[i].Interface() + if reflect.DeepEqual(x, y) { + continue + } + t.Errorf("arg and result %d differ: got %+v, want %+v", i, y, x) + } + }) + } +} + +func TestReflectMakeFuncCallABI(t *testing.T) { + // Enable register-based reflect.MakeFunc and ensure we don't + // use potentially incorrect cached versions by clearing + // the cache before we start and after we're done. + defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize)) + + // Execute the functions defined below which all have the + // same form and perform the same function: pass all arguments + // to return values. The purpose is to test the call boundary + // and make sure it works. + r := rand.New(rand.NewSource(genValueRandSeed)) + makeFuncHandler := func(args []reflect.Value) []reflect.Value { + if len(args) == 0 { + return []reflect.Value{} + } + return args[:len(args)-1] // The last Value is an empty magic value. + } + for _, callFn := range abiMakeFuncTestCases { + fnTyp := reflect.TypeOf(callFn).In(0) + fn := reflect.MakeFunc(fnTyp, makeFuncHandler) + callFn := reflect.ValueOf(callFn) + t.Run(runtime.FuncForPC(callFn.Pointer()).Name(), func(t *testing.T) { + args := []reflect.Value{fn} + for i := 0; i < fnTyp.NumIn()-1; /* last one is magic type */ i++ { + args = append(args, genValue(t, fnTyp.In(i), r)) + } + results := callFn.Call(args) + for i := range results { + x, y := args[i+1].Interface(), results[i].Interface() + if reflect.DeepEqual(x, y) { + continue + } + t.Errorf("arg and result %d differ: got %+v, want %+v", i, y, x) + } + }) + } + t.Run("OnlyPointerInRegisterGC", func(t *testing.T) { + // This test attempts to induce a failure wherein + // the last pointer to an object is passed via registers. + // If makeFuncStub doesn't successfully store the pointer + // to a location visible to the GC, the object should be + // freed and then the next GC should notice that an object + // was inexplicably revived. + var f func(b *uint64, _ MagicLastTypeNameForTestingRegisterABI) *uint64 + mkfn := reflect.MakeFunc(reflect.TypeOf(f), func(args []reflect.Value) []reflect.Value { + *(args[0].Interface().(*uint64)) = 5 + return args[:1] + }) + fn := mkfn.Interface().(func(*uint64, MagicLastTypeNameForTestingRegisterABI) *uint64) + + // Call the MakeFunc'd function while trying pass the only pointer + // to a new heap-allocated uint64. + *reflect.CallGC = true + x := fn(new(uint64), MagicLastTypeNameForTestingRegisterABI{}) + *reflect.CallGC = false + + // Check for bad pointers (which should be x if things went wrong). + runtime.GC() + + // Sanity check x. + if *x != 5 { + t.Fatalf("failed to set value in object") + } + }) +} + +var abiCallTestCases = []any{ + passNone, + passInt, + passInt8, + passInt16, + passInt32, + passInt64, + passUint, + passUint8, + passUint16, + passUint32, + passUint64, + passFloat32, + passFloat64, + passComplex64, + passComplex128, + passManyInt, + passManyFloat64, + passArray1, + passArray, + passArray1Mix, + passString, + // TODO(mknyszek): Test passing interface values. + passSlice, + passPointer, + passStruct1, + passStruct2, + passStruct3, + passStruct4, + passStruct5, + passStruct6, + passStruct7, + passStruct8, + passStruct9, + passStruct10, + // TODO(mknyszek): Test passing unsafe.Pointer values. + // TODO(mknyszek): Test passing chan values. + passStruct11, + passStruct12, + passStruct13, + passStruct14, + passStruct15, + pass2Struct1, + passEmptyStruct, + passStruct10AndSmall, +} + +// Functions for testing reflect function call functionality. + +//go:registerparams +//go:noinline +func passNone() {} + +//go:registerparams +//go:noinline +func passInt(a int) int { + return a +} + +//go:registerparams +//go:noinline +func passInt8(a int8) int8 { + return a +} + +//go:registerparams +//go:noinline +func passInt16(a int16) int16 { + return a +} + +//go:registerparams +//go:noinline +func passInt32(a int32) int32 { + return a +} + +//go:registerparams +//go:noinline +func passInt64(a int64) int64 { + return a +} + +//go:registerparams +//go:noinline +func passUint(a uint) uint { + return a +} + +//go:registerparams +//go:noinline +func passUint8(a uint8) uint8 { + return a +} + +//go:registerparams +//go:noinline +func passUint16(a uint16) uint16 { + return a +} + +//go:registerparams +//go:noinline +func passUint32(a uint32) uint32 { + return a +} + +//go:registerparams +//go:noinline +func passUint64(a uint64) uint64 { + return a +} + +//go:registerparams +//go:noinline +func passFloat32(a float32) float32 { + return a +} + +//go:registerparams +//go:noinline +func passFloat64(a float64) float64 { + return a +} + +//go:registerparams +//go:noinline +func passComplex64(a complex64) complex64 { + return a +} + +//go:registerparams +//go:noinline +func passComplex128(a complex128) complex128 { + return a +} + +//go:registerparams +//go:noinline +func passArray1(a [1]uint32) [1]uint32 { + return a +} + +//go:registerparams +//go:noinline +func passArray(a [2]uintptr) [2]uintptr { + return a +} + +//go:registerparams +//go:noinline +func passArray1Mix(a int, b [1]uint32, c float64) (int, [1]uint32, float64) { + return a, b, c +} + +//go:registerparams +//go:noinline +func passString(a string) string { + return a +} + +//go:registerparams +//go:noinline +func passSlice(a []byte) []byte { + return a +} + +//go:registerparams +//go:noinline +func passPointer(a *byte) *byte { + return a +} + +//go:registerparams +//go:noinline +func passManyInt(a, b, c, d, e, f, g, h, i, j int) (int, int, int, int, int, int, int, int, int, int) { + return a, b, c, d, e, f, g, h, i, j +} + +//go:registerparams +//go:noinline +func passManyFloat64(a, b, c, d, e, f, g, h, i, j, l, m, n, o, p, q, r, s, t float64) (float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64) { + return a, b, c, d, e, f, g, h, i, j, l, m, n, o, p, q, r, s, t +} + +//go:registerparams +//go:noinline +func passStruct1(a Struct1) Struct1 { + return a +} + +//go:registerparams +//go:noinline +func passStruct2(a Struct2) Struct2 { + return a +} + +//go:registerparams +//go:noinline +func passStruct3(a Struct3) Struct3 { + return a +} + +//go:registerparams +//go:noinline +func passStruct4(a Struct4) Struct4 { + return a +} + +//go:registerparams +//go:noinline +func passStruct5(a Struct5) Struct5 { + return a +} + +//go:registerparams +//go:noinline +func passStruct6(a Struct6) Struct6 { + return a +} + +//go:registerparams +//go:noinline +func passStruct7(a Struct7) Struct7 { + return a +} + +//go:registerparams +//go:noinline +func passStruct8(a Struct8) Struct8 { + return a +} + +//go:registerparams +//go:noinline +func passStruct9(a Struct9) Struct9 { + return a +} + +//go:registerparams +//go:noinline +func passStruct10(a Struct10) Struct10 { + return a +} + +//go:registerparams +//go:noinline +func passStruct11(a Struct11) Struct11 { + return a +} + +//go:registerparams +//go:noinline +func passStruct12(a Struct12) Struct12 { + return a +} + +//go:registerparams +//go:noinline +func passStruct13(a Struct13) Struct13 { + return a +} + +//go:registerparams +//go:noinline +func passStruct14(a Struct14) Struct14 { + return a +} + +//go:registerparams +//go:noinline +func passStruct15(a Struct15) Struct15 { + return a +} + +//go:registerparams +//go:noinline +func pass2Struct1(a, b Struct1) (x, y Struct1) { + return a, b +} + +//go:registerparams +//go:noinline +func passEmptyStruct(a int, b struct{}, c float64) (int, struct{}, float64) { + return a, b, c +} + +// This test case forces a large argument to the stack followed by more +// in-register arguments. +// +//go:registerparams +//go:noinline +func passStruct10AndSmall(a Struct10, b byte, c uint) (Struct10, byte, uint) { + return a, b, c +} + +var abiMakeFuncTestCases = []any{ + callArgsNone, + callArgsInt, + callArgsInt8, + callArgsInt16, + callArgsInt32, + callArgsInt64, + callArgsUint, + callArgsUint8, + callArgsUint16, + callArgsUint32, + callArgsUint64, + callArgsFloat32, + callArgsFloat64, + callArgsComplex64, + callArgsComplex128, + callArgsManyInt, + callArgsManyFloat64, + callArgsArray1, + callArgsArray, + callArgsArray1Mix, + callArgsString, + // TODO(mknyszek): Test callArgsing interface values. + callArgsSlice, + callArgsPointer, + callArgsStruct1, + callArgsStruct2, + callArgsStruct3, + callArgsStruct4, + callArgsStruct5, + callArgsStruct6, + callArgsStruct7, + callArgsStruct8, + callArgsStruct9, + callArgsStruct10, + // TODO(mknyszek): Test callArgsing unsafe.Pointer values. + // TODO(mknyszek): Test callArgsing chan values. + callArgsStruct11, + callArgsStruct12, + callArgsStruct13, + callArgsStruct14, + callArgsStruct15, + callArgs2Struct1, + callArgsEmptyStruct, +} + +//go:registerparams +//go:noinline +func callArgsNone(f func(MagicLastTypeNameForTestingRegisterABI)) { + f(MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsInt(f func(int, MagicLastTypeNameForTestingRegisterABI) int, a0 int) int { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsInt8(f func(int8, MagicLastTypeNameForTestingRegisterABI) int8, a0 int8) int8 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsInt16(f func(int16, MagicLastTypeNameForTestingRegisterABI) int16, a0 int16) int16 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsInt32(f func(int32, MagicLastTypeNameForTestingRegisterABI) int32, a0 int32) int32 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsInt64(f func(int64, MagicLastTypeNameForTestingRegisterABI) int64, a0 int64) int64 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsUint(f func(uint, MagicLastTypeNameForTestingRegisterABI) uint, a0 uint) uint { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsUint8(f func(uint8, MagicLastTypeNameForTestingRegisterABI) uint8, a0 uint8) uint8 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsUint16(f func(uint16, MagicLastTypeNameForTestingRegisterABI) uint16, a0 uint16) uint16 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsUint32(f func(uint32, MagicLastTypeNameForTestingRegisterABI) uint32, a0 uint32) uint32 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsUint64(f func(uint64, MagicLastTypeNameForTestingRegisterABI) uint64, a0 uint64) uint64 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsFloat32(f func(float32, MagicLastTypeNameForTestingRegisterABI) float32, a0 float32) float32 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsFloat64(f func(float64, MagicLastTypeNameForTestingRegisterABI) float64, a0 float64) float64 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsComplex64(f func(complex64, MagicLastTypeNameForTestingRegisterABI) complex64, a0 complex64) complex64 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsComplex128(f func(complex128, MagicLastTypeNameForTestingRegisterABI) complex128, a0 complex128) complex128 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsArray1(f func([1]uint32, MagicLastTypeNameForTestingRegisterABI) [1]uint32, a0 [1]uint32) [1]uint32 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsArray(f func([2]uintptr, MagicLastTypeNameForTestingRegisterABI) [2]uintptr, a0 [2]uintptr) [2]uintptr { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsArray1Mix(f func(int, [1]uint32, float64, MagicLastTypeNameForTestingRegisterABI) (int, [1]uint32, float64), a0 int, a1 [1]uint32, a2 float64) (int, [1]uint32, float64) { + return f(a0, a1, a2, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsString(f func(string, MagicLastTypeNameForTestingRegisterABI) string, a0 string) string { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsSlice(f func([]byte, MagicLastTypeNameForTestingRegisterABI) []byte, a0 []byte) []byte { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsPointer(f func(*byte, MagicLastTypeNameForTestingRegisterABI) *byte, a0 *byte) *byte { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsManyInt(f func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 int, x MagicLastTypeNameForTestingRegisterABI) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9 int), a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 int) (int, int, int, int, int, int, int, int, int, int) { + return f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsManyFloat64(f func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 float64, x MagicLastTypeNameForTestingRegisterABI) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16, r17, r18 float64), a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 float64) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16, r17, r18 float64) { + return f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct1(f func(Struct1, MagicLastTypeNameForTestingRegisterABI) Struct1, a0 Struct1) Struct1 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct2(f func(Struct2, MagicLastTypeNameForTestingRegisterABI) Struct2, a0 Struct2) Struct2 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct3(f func(Struct3, MagicLastTypeNameForTestingRegisterABI) Struct3, a0 Struct3) Struct3 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct4(f func(Struct4, MagicLastTypeNameForTestingRegisterABI) Struct4, a0 Struct4) Struct4 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct5(f func(Struct5, MagicLastTypeNameForTestingRegisterABI) Struct5, a0 Struct5) Struct5 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct6(f func(Struct6, MagicLastTypeNameForTestingRegisterABI) Struct6, a0 Struct6) Struct6 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct7(f func(Struct7, MagicLastTypeNameForTestingRegisterABI) Struct7, a0 Struct7) Struct7 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct8(f func(Struct8, MagicLastTypeNameForTestingRegisterABI) Struct8, a0 Struct8) Struct8 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct9(f func(Struct9, MagicLastTypeNameForTestingRegisterABI) Struct9, a0 Struct9) Struct9 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct10(f func(Struct10, MagicLastTypeNameForTestingRegisterABI) Struct10, a0 Struct10) Struct10 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct11(f func(Struct11, MagicLastTypeNameForTestingRegisterABI) Struct11, a0 Struct11) Struct11 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct12(f func(Struct12, MagicLastTypeNameForTestingRegisterABI) Struct12, a0 Struct12) Struct12 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct13(f func(Struct13, MagicLastTypeNameForTestingRegisterABI) Struct13, a0 Struct13) Struct13 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct14(f func(Struct14, MagicLastTypeNameForTestingRegisterABI) Struct14, a0 Struct14) Struct14 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsStruct15(f func(Struct15, MagicLastTypeNameForTestingRegisterABI) Struct15, a0 Struct15) Struct15 { + return f(a0, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgs2Struct1(f func(Struct1, Struct1, MagicLastTypeNameForTestingRegisterABI) (Struct1, Struct1), a0, a1 Struct1) (r0, r1 Struct1) { + return f(a0, a1, MagicLastTypeNameForTestingRegisterABI{}) +} + +//go:registerparams +//go:noinline +func callArgsEmptyStruct(f func(int, struct{}, float64, MagicLastTypeNameForTestingRegisterABI) (int, struct{}, float64), a0 int, a1 struct{}, a2 float64) (int, struct{}, float64) { + return f(a0, a1, a2, MagicLastTypeNameForTestingRegisterABI{}) +} + +// Struct1 is a simple integer-only aggregate struct. +type Struct1 struct { + A, B, C uint +} + +// Struct2 is Struct1 but with an array-typed field that will +// force it to get passed on the stack. +type Struct2 struct { + A, B, C uint + D [2]uint32 +} + +// Struct3 is Struct2 but with an anonymous array-typed field. +// This should act identically to Struct2. +type Struct3 struct { + A, B, C uint + D [2]uint32 +} + +// Struct4 has byte-length fields that should +// each use up a whole registers. +type Struct4 struct { + A, B int8 + C, D uint8 + E bool +} + +// Struct5 is a relatively large struct +// with both integer and floating point values. +type Struct5 struct { + A uint16 + B int16 + C, D uint32 + E int32 + F, G, H, I, J float32 +} + +// Struct6 has a nested struct. +type Struct6 struct { + Struct1 +} + +// Struct7 is a struct with a nested array-typed field +// that cannot be passed in registers as a result. +type Struct7 struct { + Struct1 + Struct2 +} + +// Struct8 is large aggregate struct type that may be +// passed in registers. +type Struct8 struct { + Struct5 + Struct1 +} + +// Struct9 is a type that has an array type nested +// 2 layers deep, and as a result needs to be passed +// on the stack. +type Struct9 struct { + Struct1 + Struct7 +} + +// Struct10 is a struct type that is too large to be +// passed in registers. +type Struct10 struct { + Struct5 + Struct8 +} + +// Struct11 is a struct type that has several reference +// types in it. +type Struct11 struct { + X map[string]int +} + +// Struct12 has Struct11 embedded into it to test more +// paths. +type Struct12 struct { + A int + Struct11 +} + +// Struct13 tests an empty field. +type Struct13 struct { + A int + X struct{} + B int +} + +// Struct14 tests a non-zero-sized (and otherwise register-assignable) +// struct with a field that is a non-zero length array with zero-sized members. +type Struct14 struct { + A uintptr + X [3]struct{} + B float64 +} + +// Struct15 tests a non-zero-sized (and otherwise register-assignable) +// struct with a struct field that is zero-sized but contains a +// non-zero length array with zero-sized members. +type Struct15 struct { + A uintptr + X struct { + Y [3]struct{} + } + B float64 +} + +const genValueRandSeed = 0 + +// genValue generates a pseudorandom reflect.Value with type t. +// The reflect.Value produced by this function is always the same +// for the same type. +func genValue(t *testing.T, typ reflect.Type, r *rand.Rand) reflect.Value { + // Re-seed and reset the PRNG because we want each value with the + // same type to be the same random value. + r.Seed(genValueRandSeed) + v, ok := quick.Value(typ, r) + if !ok { + t.Fatal("failed to generate value") + } + return v +} + +func TestSignalingNaNArgument(t *testing.T) { + v := reflect.ValueOf(func(x float32) { + // make sure x is a signaling NaN. + u := math.Float32bits(x) + if u != snan { + t.Fatalf("signaling NaN not correct: %x\n", u) + } + }) + v.Call([]reflect.Value{reflect.ValueOf(math.Float32frombits(snan))}) +} + +func TestSignalingNaNReturn(t *testing.T) { + v := reflect.ValueOf(func() float32 { + return math.Float32frombits(snan) + }) + var x float32 + reflect.ValueOf(&x).Elem().Set(v.Call(nil)[0]) + // make sure x is a signaling NaN. + u := math.Float32bits(x) + if u != snan { + t.Fatalf("signaling NaN not correct: %x\n", u) + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/all_test.go b/platform/dbops/binaries/go/go/src/reflect/all_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e77537c9a513a5c6413d7e5d956f0aaf778e8c57 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/all_test.go @@ -0,0 +1,8469 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "bytes" + "encoding/base64" + "flag" + "fmt" + "go/token" + "internal/abi" + "internal/goarch" + "internal/testenv" + "io" + "math" + "math/rand" + "net" + "os" + . "reflect" + "reflect/internal/example1" + "reflect/internal/example2" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +const bucketCount = abi.MapBucketCount + +var sink any + +func TestBool(t *testing.T) { + v := ValueOf(true) + if v.Bool() != true { + t.Fatal("ValueOf(true).Bool() = false") + } +} + +type integer int +type T struct { + a int + b float64 + c string + d *int +} + +var _ = T{} == T{} // tests depend on T being comparable + +type pair struct { + i any + s string +} + +func assert(t *testing.T, s, want string) { + if s != want { + t.Errorf("have %#q want %#q", s, want) + } +} + +var typeTests = []pair{ + {struct{ x int }{}, "int"}, + {struct{ x int8 }{}, "int8"}, + {struct{ x int16 }{}, "int16"}, + {struct{ x int32 }{}, "int32"}, + {struct{ x int64 }{}, "int64"}, + {struct{ x uint }{}, "uint"}, + {struct{ x uint8 }{}, "uint8"}, + {struct{ x uint16 }{}, "uint16"}, + {struct{ x uint32 }{}, "uint32"}, + {struct{ x uint64 }{}, "uint64"}, + {struct{ x float32 }{}, "float32"}, + {struct{ x float64 }{}, "float64"}, + {struct{ x int8 }{}, "int8"}, + {struct{ x (**int8) }{}, "**int8"}, + {struct{ x (**integer) }{}, "**reflect_test.integer"}, + {struct{ x ([32]int32) }{}, "[32]int32"}, + {struct{ x ([]int8) }{}, "[]int8"}, + {struct{ x (map[string]int32) }{}, "map[string]int32"}, + {struct{ x (chan<- string) }{}, "chan<- string"}, + {struct{ x (chan<- chan string) }{}, "chan<- chan string"}, + {struct{ x (chan<- <-chan string) }{}, "chan<- <-chan string"}, + {struct{ x (<-chan <-chan string) }{}, "<-chan <-chan string"}, + {struct{ x (chan (<-chan string)) }{}, "chan (<-chan string)"}, + {struct { + x struct { + c chan *int32 + d float32 + } + }{}, + "struct { c chan *int32; d float32 }", + }, + {struct{ x (func(a int8, b int32)) }{}, "func(int8, int32)"}, + {struct { + x struct { + c func(chan *integer, *int8) + } + }{}, + "struct { c func(chan *reflect_test.integer, *int8) }", + }, + {struct { + x struct { + a int8 + b int32 + } + }{}, + "struct { a int8; b int32 }", + }, + {struct { + x struct { + a int8 + b int8 + c int32 + } + }{}, + "struct { a int8; b int8; c int32 }", + }, + {struct { + x struct { + a int8 + b int8 + c int8 + d int32 + } + }{}, + "struct { a int8; b int8; c int8; d int32 }", + }, + {struct { + x struct { + a int8 + b int8 + c int8 + d int8 + e int32 + } + }{}, + "struct { a int8; b int8; c int8; d int8; e int32 }", + }, + {struct { + x struct { + a int8 + b int8 + c int8 + d int8 + e int8 + f int32 + } + }{}, + "struct { a int8; b int8; c int8; d int8; e int8; f int32 }", + }, + {struct { + x struct { + a int8 `reflect:"hi there"` + } + }{}, + `struct { a int8 "reflect:\"hi there\"" }`, + }, + {struct { + x struct { + a int8 `reflect:"hi \x00there\t\n\"\\"` + } + }{}, + `struct { a int8 "reflect:\"hi \\x00there\\t\\n\\\"\\\\\"" }`, + }, + {struct { + x struct { + f func(args ...int) + } + }{}, + "struct { f func(...int) }", + }, + {struct { + x (interface { + a(func(func(int) int) func(func(int)) int) + b() + }) + }{}, + "interface { reflect_test.a(func(func(int) int) func(func(int)) int); reflect_test.b() }", + }, + {struct { + x struct { + int32 + int64 + } + }{}, + "struct { int32; int64 }", + }, +} + +var valueTests = []pair{ + {new(int), "132"}, + {new(int8), "8"}, + {new(int16), "16"}, + {new(int32), "32"}, + {new(int64), "64"}, + {new(uint), "132"}, + {new(uint8), "8"}, + {new(uint16), "16"}, + {new(uint32), "32"}, + {new(uint64), "64"}, + {new(float32), "256.25"}, + {new(float64), "512.125"}, + {new(complex64), "532.125+10i"}, + {new(complex128), "564.25+1i"}, + {new(string), "stringy cheese"}, + {new(bool), "true"}, + {new(*int8), "*int8(0)"}, + {new(**int8), "**int8(0)"}, + {new([5]int32), "[5]int32{0, 0, 0, 0, 0}"}, + {new(**integer), "**reflect_test.integer(0)"}, + {new(map[string]int32), "map[string]int32{}"}, + {new(chan<- string), "chan<- string"}, + {new(func(a int8, b int32)), "func(int8, int32)(0)"}, + {new(struct { + c chan *int32 + d float32 + }), + "struct { c chan *int32; d float32 }{chan *int32, 0}", + }, + {new(struct{ c func(chan *integer, *int8) }), + "struct { c func(chan *reflect_test.integer, *int8) }{func(chan *reflect_test.integer, *int8)(0)}", + }, + {new(struct { + a int8 + b int32 + }), + "struct { a int8; b int32 }{0, 0}", + }, + {new(struct { + a int8 + b int8 + c int32 + }), + "struct { a int8; b int8; c int32 }{0, 0, 0}", + }, +} + +func testType(t *testing.T, i int, typ Type, want string) { + s := typ.String() + if s != want { + t.Errorf("#%d: have %#q, want %#q", i, s, want) + } +} + +func TestTypes(t *testing.T) { + for i, tt := range typeTests { + testType(t, i, ValueOf(tt.i).Field(0).Type(), tt.s) + } +} + +func TestSet(t *testing.T) { + for i, tt := range valueTests { + v := ValueOf(tt.i) + v = v.Elem() + switch v.Kind() { + case Int: + v.SetInt(132) + case Int8: + v.SetInt(8) + case Int16: + v.SetInt(16) + case Int32: + v.SetInt(32) + case Int64: + v.SetInt(64) + case Uint: + v.SetUint(132) + case Uint8: + v.SetUint(8) + case Uint16: + v.SetUint(16) + case Uint32: + v.SetUint(32) + case Uint64: + v.SetUint(64) + case Float32: + v.SetFloat(256.25) + case Float64: + v.SetFloat(512.125) + case Complex64: + v.SetComplex(532.125 + 10i) + case Complex128: + v.SetComplex(564.25 + 1i) + case String: + v.SetString("stringy cheese") + case Bool: + v.SetBool(true) + } + s := valueToString(v) + if s != tt.s { + t.Errorf("#%d: have %#q, want %#q", i, s, tt.s) + } + } +} + +func TestSetValue(t *testing.T) { + for i, tt := range valueTests { + v := ValueOf(tt.i).Elem() + switch v.Kind() { + case Int: + v.Set(ValueOf(int(132))) + case Int8: + v.Set(ValueOf(int8(8))) + case Int16: + v.Set(ValueOf(int16(16))) + case Int32: + v.Set(ValueOf(int32(32))) + case Int64: + v.Set(ValueOf(int64(64))) + case Uint: + v.Set(ValueOf(uint(132))) + case Uint8: + v.Set(ValueOf(uint8(8))) + case Uint16: + v.Set(ValueOf(uint16(16))) + case Uint32: + v.Set(ValueOf(uint32(32))) + case Uint64: + v.Set(ValueOf(uint64(64))) + case Float32: + v.Set(ValueOf(float32(256.25))) + case Float64: + v.Set(ValueOf(512.125)) + case Complex64: + v.Set(ValueOf(complex64(532.125 + 10i))) + case Complex128: + v.Set(ValueOf(complex128(564.25 + 1i))) + case String: + v.Set(ValueOf("stringy cheese")) + case Bool: + v.Set(ValueOf(true)) + } + s := valueToString(v) + if s != tt.s { + t.Errorf("#%d: have %#q, want %#q", i, s, tt.s) + } + } +} + +func TestMapIterSet(t *testing.T) { + m := make(map[string]any, len(valueTests)) + for _, tt := range valueTests { + m[tt.s] = tt.i + } + v := ValueOf(m) + + k := New(v.Type().Key()).Elem() + e := New(v.Type().Elem()).Elem() + + iter := v.MapRange() + for iter.Next() { + k.SetIterKey(iter) + e.SetIterValue(iter) + want := m[k.String()] + got := e.Interface() + if got != want { + t.Errorf("%q: want (%T) %v, got (%T) %v", k.String(), want, want, got, got) + } + if setkey, key := valueToString(k), valueToString(iter.Key()); setkey != key { + t.Errorf("MapIter.Key() = %q, MapIter.SetKey() = %q", key, setkey) + } + if setval, val := valueToString(e), valueToString(iter.Value()); setval != val { + t.Errorf("MapIter.Value() = %q, MapIter.SetValue() = %q", val, setval) + } + } + + if testenv.OptimizationOff() { + return // no inlining with the noopt builder + } + + got := int(testing.AllocsPerRun(10, func() { + iter := v.MapRange() + for iter.Next() { + k.SetIterKey(iter) + e.SetIterValue(iter) + } + })) + // Calling MapRange should not allocate even though it returns a *MapIter. + // The function is inlineable, so if the local usage does not escape + // the *MapIter, it can remain stack allocated. + want := 0 + if got != want { + t.Errorf("wanted %d alloc, got %d", want, got) + } +} + +func TestCanIntUintFloatComplex(t *testing.T) { + type integer int + type uinteger uint + type float float64 + type complex complex128 + + var ops = [...]string{"CanInt", "CanUint", "CanFloat", "CanComplex"} + + var testCases = []struct { + i any + want [4]bool + }{ + // signed integer + {132, [...]bool{true, false, false, false}}, + {int8(8), [...]bool{true, false, false, false}}, + {int16(16), [...]bool{true, false, false, false}}, + {int32(32), [...]bool{true, false, false, false}}, + {int64(64), [...]bool{true, false, false, false}}, + // unsigned integer + {uint(132), [...]bool{false, true, false, false}}, + {uint8(8), [...]bool{false, true, false, false}}, + {uint16(16), [...]bool{false, true, false, false}}, + {uint32(32), [...]bool{false, true, false, false}}, + {uint64(64), [...]bool{false, true, false, false}}, + {uintptr(0xABCD), [...]bool{false, true, false, false}}, + // floating-point + {float32(256.25), [...]bool{false, false, true, false}}, + {float64(512.125), [...]bool{false, false, true, false}}, + // complex + {complex64(532.125 + 10i), [...]bool{false, false, false, true}}, + {complex128(564.25 + 1i), [...]bool{false, false, false, true}}, + // underlying + {integer(-132), [...]bool{true, false, false, false}}, + {uinteger(132), [...]bool{false, true, false, false}}, + {float(256.25), [...]bool{false, false, true, false}}, + {complex(532.125 + 10i), [...]bool{false, false, false, true}}, + // not-acceptable + {"hello world", [...]bool{false, false, false, false}}, + {new(int), [...]bool{false, false, false, false}}, + {new(uint), [...]bool{false, false, false, false}}, + {new(float64), [...]bool{false, false, false, false}}, + {new(complex64), [...]bool{false, false, false, false}}, + {new([5]int), [...]bool{false, false, false, false}}, + {new(integer), [...]bool{false, false, false, false}}, + {new(map[int]int), [...]bool{false, false, false, false}}, + {new(chan<- int), [...]bool{false, false, false, false}}, + {new(func(a int8)), [...]bool{false, false, false, false}}, + {new(struct{ i int }), [...]bool{false, false, false, false}}, + } + + for i, tc := range testCases { + v := ValueOf(tc.i) + got := [...]bool{v.CanInt(), v.CanUint(), v.CanFloat(), v.CanComplex()} + + for j := range tc.want { + if got[j] != tc.want[j] { + t.Errorf( + "#%d: v.%s() returned %t for type %T, want %t", + i, + ops[j], + got[j], + tc.i, + tc.want[j], + ) + } + } + } +} + +func TestCanSetField(t *testing.T) { + type embed struct{ x, X int } + type Embed struct{ x, X int } + type S1 struct { + embed + x, X int + } + type S2 struct { + *embed + x, X int + } + type S3 struct { + Embed + x, X int + } + type S4 struct { + *Embed + x, X int + } + + type testCase struct { + // -1 means Addr().Elem() of current value + index []int + canSet bool + } + tests := []struct { + val Value + cases []testCase + }{{ + val: ValueOf(&S1{}), + cases: []testCase{ + {[]int{0}, false}, + {[]int{0, -1}, false}, + {[]int{0, 0}, false}, + {[]int{0, 0, -1}, false}, + {[]int{0, -1, 0}, false}, + {[]int{0, -1, 0, -1}, false}, + {[]int{0, 1}, true}, + {[]int{0, 1, -1}, true}, + {[]int{0, -1, 1}, true}, + {[]int{0, -1, 1, -1}, true}, + {[]int{1}, false}, + {[]int{1, -1}, false}, + {[]int{2}, true}, + {[]int{2, -1}, true}, + }, + }, { + val: ValueOf(&S2{embed: &embed{}}), + cases: []testCase{ + {[]int{0}, false}, + {[]int{0, -1}, false}, + {[]int{0, 0}, false}, + {[]int{0, 0, -1}, false}, + {[]int{0, -1, 0}, false}, + {[]int{0, -1, 0, -1}, false}, + {[]int{0, 1}, true}, + {[]int{0, 1, -1}, true}, + {[]int{0, -1, 1}, true}, + {[]int{0, -1, 1, -1}, true}, + {[]int{1}, false}, + {[]int{2}, true}, + }, + }, { + val: ValueOf(&S3{}), + cases: []testCase{ + {[]int{0}, true}, + {[]int{0, -1}, true}, + {[]int{0, 0}, false}, + {[]int{0, 0, -1}, false}, + {[]int{0, -1, 0}, false}, + {[]int{0, -1, 0, -1}, false}, + {[]int{0, 1}, true}, + {[]int{0, 1, -1}, true}, + {[]int{0, -1, 1}, true}, + {[]int{0, -1, 1, -1}, true}, + {[]int{1}, false}, + {[]int{2}, true}, + }, + }, { + val: ValueOf(&S4{Embed: &Embed{}}), + cases: []testCase{ + {[]int{0}, true}, + {[]int{0, -1}, true}, + {[]int{0, 0}, false}, + {[]int{0, 0, -1}, false}, + {[]int{0, -1, 0}, false}, + {[]int{0, -1, 0, -1}, false}, + {[]int{0, 1}, true}, + {[]int{0, 1, -1}, true}, + {[]int{0, -1, 1}, true}, + {[]int{0, -1, 1, -1}, true}, + {[]int{1}, false}, + {[]int{2}, true}, + }, + }} + + for _, tt := range tests { + t.Run(tt.val.Type().Name(), func(t *testing.T) { + for _, tc := range tt.cases { + f := tt.val + for _, i := range tc.index { + if f.Kind() == Pointer { + f = f.Elem() + } + if i == -1 { + f = f.Addr().Elem() + } else { + f = f.Field(i) + } + } + if got := f.CanSet(); got != tc.canSet { + t.Errorf("CanSet() = %v, want %v", got, tc.canSet) + } + } + }) + } +} + +var _i = 7 + +var valueToStringTests = []pair{ + {123, "123"}, + {123.5, "123.5"}, + {byte(123), "123"}, + {"abc", "abc"}, + {T{123, 456.75, "hello", &_i}, "reflect_test.T{123, 456.75, hello, *int(&7)}"}, + {new(chan *T), "*chan *reflect_test.T(&chan *reflect_test.T)"}, + {[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"}, + {&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[10]int(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"}, + {[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"}, + {&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[]int(&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"}, +} + +func TestValueToString(t *testing.T) { + for i, test := range valueToStringTests { + s := valueToString(ValueOf(test.i)) + if s != test.s { + t.Errorf("#%d: have %#q, want %#q", i, s, test.s) + } + } +} + +func TestArrayElemSet(t *testing.T) { + v := ValueOf(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}).Elem() + v.Index(4).SetInt(123) + s := valueToString(v) + const want = "[10]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}" + if s != want { + t.Errorf("[10]int: have %#q want %#q", s, want) + } + + v = ValueOf([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + v.Index(4).SetInt(123) + s = valueToString(v) + const want1 = "[]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}" + if s != want1 { + t.Errorf("[]int: have %#q want %#q", s, want1) + } +} + +func TestPtrPointTo(t *testing.T) { + var ip *int32 + var i int32 = 1234 + vip := ValueOf(&ip) + vi := ValueOf(&i).Elem() + vip.Elem().Set(vi.Addr()) + if *ip != 1234 { + t.Errorf("got %d, want 1234", *ip) + } + + ip = nil + vp := ValueOf(&ip).Elem() + vp.Set(Zero(vp.Type())) + if ip != nil { + t.Errorf("got non-nil (%p), want nil", ip) + } +} + +func TestPtrSetNil(t *testing.T) { + var i int32 = 1234 + ip := &i + vip := ValueOf(&ip) + vip.Elem().Set(Zero(vip.Elem().Type())) + if ip != nil { + t.Errorf("got non-nil (%d), want nil", *ip) + } +} + +func TestMapSetNil(t *testing.T) { + m := make(map[string]int) + vm := ValueOf(&m) + vm.Elem().Set(Zero(vm.Elem().Type())) + if m != nil { + t.Errorf("got non-nil (%p), want nil", m) + } +} + +func TestAll(t *testing.T) { + testType(t, 1, TypeOf((int8)(0)), "int8") + testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8") + + typ := TypeOf((*struct { + c chan *int32 + d float32 + })(nil)) + testType(t, 3, typ, "*struct { c chan *int32; d float32 }") + etyp := typ.Elem() + testType(t, 4, etyp, "struct { c chan *int32; d float32 }") + styp := etyp + f := styp.Field(0) + testType(t, 5, f.Type, "chan *int32") + + f, present := styp.FieldByName("d") + if !present { + t.Errorf("FieldByName says present field is absent") + } + testType(t, 6, f.Type, "float32") + + f, present = styp.FieldByName("absent") + if present { + t.Errorf("FieldByName says absent field is present") + } + + typ = TypeOf([32]int32{}) + testType(t, 7, typ, "[32]int32") + testType(t, 8, typ.Elem(), "int32") + + typ = TypeOf((map[string]*int32)(nil)) + testType(t, 9, typ, "map[string]*int32") + mtyp := typ + testType(t, 10, mtyp.Key(), "string") + testType(t, 11, mtyp.Elem(), "*int32") + + typ = TypeOf((chan<- string)(nil)) + testType(t, 12, typ, "chan<- string") + testType(t, 13, typ.Elem(), "string") + + // make sure tag strings are not part of element type + typ = TypeOf(struct { + d []uint32 `reflect:"TAG"` + }{}).Field(0).Type + testType(t, 14, typ, "[]uint32") +} + +func TestInterfaceGet(t *testing.T) { + var inter struct { + E any + } + inter.E = 123.456 + v1 := ValueOf(&inter) + v2 := v1.Elem().Field(0) + assert(t, v2.Type().String(), "interface {}") + i2 := v2.Interface() + v3 := ValueOf(i2) + assert(t, v3.Type().String(), "float64") +} + +func TestInterfaceValue(t *testing.T) { + var inter struct { + E any + } + inter.E = 123.456 + v1 := ValueOf(&inter) + v2 := v1.Elem().Field(0) + assert(t, v2.Type().String(), "interface {}") + v3 := v2.Elem() + assert(t, v3.Type().String(), "float64") + + i3 := v2.Interface() + if _, ok := i3.(float64); !ok { + t.Error("v2.Interface() did not return float64, got ", TypeOf(i3)) + } +} + +func TestFunctionValue(t *testing.T) { + var x any = func() {} + v := ValueOf(x) + if fmt.Sprint(v.Interface()) != fmt.Sprint(x) { + t.Fatalf("TestFunction returned wrong pointer") + } + assert(t, v.Type().String(), "func()") +} + +func TestGrow(t *testing.T) { + v := ValueOf([]int(nil)) + shouldPanic("reflect.Value.Grow using unaddressable value", func() { v.Grow(0) }) + v = ValueOf(new([]int)).Elem() + v.Grow(0) + if !v.IsNil() { + t.Errorf("v.Grow(0) should still be nil") + } + v.Grow(1) + if v.Cap() == 0 { + t.Errorf("v.Cap = %v, want non-zero", v.Cap()) + } + want := v.UnsafePointer() + v.Grow(1) + got := v.UnsafePointer() + if got != want { + t.Errorf("noop v.Grow should not change pointers") + } + + t.Run("Append", func(t *testing.T) { + var got, want []T + v := ValueOf(&got).Elem() + appendValue := func(vt T) { + v.Grow(1) + v.SetLen(v.Len() + 1) + v.Index(v.Len() - 1).Set(ValueOf(vt)) + } + for i := 0; i < 10; i++ { + vt := T{i, float64(i), strconv.Itoa(i), &i} + appendValue(vt) + want = append(want, vt) + } + if !DeepEqual(got, want) { + t.Errorf("value mismatch:\ngot %v\nwant %v", got, want) + } + }) + + t.Run("Rate", func(t *testing.T) { + var b []byte + v := ValueOf(new([]byte)).Elem() + for i := 0; i < 10; i++ { + b = append(b[:cap(b)], make([]byte, 1)...) + v.SetLen(v.Cap()) + v.Grow(1) + if v.Cap() != cap(b) { + t.Errorf("v.Cap = %v, want %v", v.Cap(), cap(b)) + } + } + }) + + t.Run("ZeroCapacity", func(t *testing.T) { + for i := 0; i < 10; i++ { + v := ValueOf(new([]byte)).Elem() + v.Grow(61) + b := v.Bytes() + b = b[:cap(b)] + for i, c := range b { + if c != 0 { + t.Fatalf("Value.Bytes[%d] = 0x%02x, want 0x00", i, c) + } + b[i] = 0xff + } + runtime.GC() + } + }) +} + +var appendTests = []struct { + orig, extra []int +}{ + {nil, nil}, + {[]int{}, nil}, + {nil, []int{}}, + {[]int{}, []int{}}, + {nil, []int{22}}, + {[]int{}, []int{22}}, + {make([]int, 2, 4), nil}, + {make([]int, 2, 4), []int{}}, + {make([]int, 2, 4), []int{22}}, + {make([]int, 2, 4), []int{22, 33, 44}}, +} + +func TestAppend(t *testing.T) { + for i, test := range appendTests { + origLen, extraLen := len(test.orig), len(test.extra) + want := append(test.orig, test.extra...) + // Convert extra from []int to []Value. + e0 := make([]Value, len(test.extra)) + for j, e := range test.extra { + e0[j] = ValueOf(e) + } + // Convert extra from []int to *SliceValue. + e1 := ValueOf(test.extra) + + // Test Append. + a0 := ValueOf(&test.orig).Elem() + have0 := Append(a0, e0...) + if have0.CanAddr() { + t.Errorf("Append #%d: have slice should not be addressable", i) + } + if !DeepEqual(have0.Interface(), want) { + t.Errorf("Append #%d: have %v, want %v (%p %p)", i, have0, want, test.orig, have0.Interface()) + } + // Check that the orig and extra slices were not modified. + if a0.Len() != len(test.orig) { + t.Errorf("Append #%d: a0.Len: have %d, want %d", i, a0.Len(), origLen) + } + if len(test.orig) != origLen { + t.Errorf("Append #%d origLen: have %v, want %v", i, len(test.orig), origLen) + } + if len(test.extra) != extraLen { + t.Errorf("Append #%d extraLen: have %v, want %v", i, len(test.extra), extraLen) + } + + // Test AppendSlice. + a1 := ValueOf(&test.orig).Elem() + have1 := AppendSlice(a1, e1) + if have1.CanAddr() { + t.Errorf("AppendSlice #%d: have slice should not be addressable", i) + } + if !DeepEqual(have1.Interface(), want) { + t.Errorf("AppendSlice #%d: have %v, want %v", i, have1, want) + } + // Check that the orig and extra slices were not modified. + if a1.Len() != len(test.orig) { + t.Errorf("AppendSlice #%d: a1.Len: have %d, want %d", i, a0.Len(), origLen) + } + if len(test.orig) != origLen { + t.Errorf("AppendSlice #%d origLen: have %v, want %v", i, len(test.orig), origLen) + } + if len(test.extra) != extraLen { + t.Errorf("AppendSlice #%d extraLen: have %v, want %v", i, len(test.extra), extraLen) + } + + // Test Append and AppendSlice with unexported value. + ax := ValueOf(struct{ x []int }{test.orig}).Field(0) + shouldPanic("using unexported field", func() { Append(ax, e0...) }) + shouldPanic("using unexported field", func() { AppendSlice(ax, e1) }) + } +} + +func TestCopy(t *testing.T) { + a := []int{1, 2, 3, 4, 10, 9, 8, 7} + b := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44} + c := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44} + for i := 0; i < len(b); i++ { + if b[i] != c[i] { + t.Fatalf("b != c before test") + } + } + a1 := a + b1 := b + aa := ValueOf(&a1).Elem() + ab := ValueOf(&b1).Elem() + for tocopy := 1; tocopy <= 7; tocopy++ { + aa.SetLen(tocopy) + Copy(ab, aa) + aa.SetLen(8) + for i := 0; i < tocopy; i++ { + if a[i] != b[i] { + t.Errorf("(i) tocopy=%d a[%d]=%d, b[%d]=%d", + tocopy, i, a[i], i, b[i]) + } + } + for i := tocopy; i < len(b); i++ { + if b[i] != c[i] { + if i < len(a) { + t.Errorf("(ii) tocopy=%d a[%d]=%d, b[%d]=%d, c[%d]=%d", + tocopy, i, a[i], i, b[i], i, c[i]) + } else { + t.Errorf("(iii) tocopy=%d b[%d]=%d, c[%d]=%d", + tocopy, i, b[i], i, c[i]) + } + } else { + t.Logf("tocopy=%d elem %d is okay\n", tocopy, i) + } + } + } +} + +func TestCopyString(t *testing.T) { + t.Run("Slice", func(t *testing.T) { + s := bytes.Repeat([]byte{'_'}, 8) + val := ValueOf(s) + + n := Copy(val, ValueOf("")) + if expecting := []byte("________"); n != 0 || !bytes.Equal(s, expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s, expecting) + } + + n = Copy(val, ValueOf("hello")) + if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s, expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s, expecting) + } + + n = Copy(val, ValueOf("helloworld")) + if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s, expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s, expecting) + } + }) + t.Run("Array", func(t *testing.T) { + s := [...]byte{'_', '_', '_', '_', '_', '_', '_', '_'} + val := ValueOf(&s).Elem() + + n := Copy(val, ValueOf("")) + if expecting := []byte("________"); n != 0 || !bytes.Equal(s[:], expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s[:], expecting) + } + + n = Copy(val, ValueOf("hello")) + if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s[:], expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s[:], expecting) + } + + n = Copy(val, ValueOf("helloworld")) + if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s[:], expecting) { + t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s[:], expecting) + } + }) +} + +func TestCopyArray(t *testing.T) { + a := [8]int{1, 2, 3, 4, 10, 9, 8, 7} + b := [11]int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44} + c := b + aa := ValueOf(&a).Elem() + ab := ValueOf(&b).Elem() + Copy(ab, aa) + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + t.Errorf("(i) a[%d]=%d, b[%d]=%d", i, a[i], i, b[i]) + } + } + for i := len(a); i < len(b); i++ { + if b[i] != c[i] { + t.Errorf("(ii) b[%d]=%d, c[%d]=%d", i, b[i], i, c[i]) + } else { + t.Logf("elem %d is okay\n", i) + } + } +} + +func TestBigUnnamedStruct(t *testing.T) { + b := struct{ a, b, c, d int64 }{1, 2, 3, 4} + v := ValueOf(b) + b1 := v.Interface().(struct { + a, b, c, d int64 + }) + if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d { + t.Errorf("ValueOf(%v).Interface().(*Big) = %v", b, b1) + } +} + +type big struct { + a, b, c, d, e int64 +} + +func TestBigStruct(t *testing.T) { + b := big{1, 2, 3, 4, 5} + v := ValueOf(b) + b1 := v.Interface().(big) + if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d || b1.e != b.e { + t.Errorf("ValueOf(%v).Interface().(big) = %v", b, b1) + } +} + +type Basic struct { + x int + y float32 +} + +type NotBasic Basic + +type DeepEqualTest struct { + a, b any + eq bool +} + +// Simple functions for DeepEqual tests. +var ( + fn1 func() // nil. + fn2 func() // nil. + fn3 = func() { fn1() } // Not nil. +) + +type self struct{} + +type Loop *Loop +type Loopy any + +var loop1, loop2 Loop +var loopy1, loopy2 Loopy +var cycleMap1, cycleMap2, cycleMap3 map[string]any + +type structWithSelfPtr struct { + p *structWithSelfPtr + s string +} + +func init() { + loop1 = &loop2 + loop2 = &loop1 + + loopy1 = &loopy2 + loopy2 = &loopy1 + + cycleMap1 = map[string]any{} + cycleMap1["cycle"] = cycleMap1 + cycleMap2 = map[string]any{} + cycleMap2["cycle"] = cycleMap2 + cycleMap3 = map[string]any{} + cycleMap3["different"] = cycleMap3 +} + +var deepEqualTests = []DeepEqualTest{ + // Equalities + {nil, nil, true}, + {1, 1, true}, + {int32(1), int32(1), true}, + {0.5, 0.5, true}, + {float32(0.5), float32(0.5), true}, + {"hello", "hello", true}, + {make([]int, 10), make([]int, 10), true}, + {&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true}, + {Basic{1, 0.5}, Basic{1, 0.5}, true}, + {error(nil), error(nil), true}, + {map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true}, + {fn1, fn2, true}, + {[]byte{1, 2, 3}, []byte{1, 2, 3}, true}, + {[]MyByte{1, 2, 3}, []MyByte{1, 2, 3}, true}, + {MyBytes{1, 2, 3}, MyBytes{1, 2, 3}, true}, + + // Inequalities + {1, 2, false}, + {int32(1), int32(2), false}, + {0.5, 0.6, false}, + {float32(0.5), float32(0.6), false}, + {"hello", "hey", false}, + {make([]int, 10), make([]int, 11), false}, + {&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false}, + {Basic{1, 0.5}, Basic{1, 0.6}, false}, + {Basic{1, 0}, Basic{2, 0}, false}, + {map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false}, + {map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false}, + {map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false}, + {map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false}, + {nil, 1, false}, + {1, nil, false}, + {fn1, fn3, false}, + {fn3, fn3, false}, + {[][]int{{1}}, [][]int{{2}}, false}, + {&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false}, + + // Fun with floating point. + {math.NaN(), math.NaN(), false}, + {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false}, + {&[1]float64{math.NaN()}, self{}, true}, + {[]float64{math.NaN()}, []float64{math.NaN()}, false}, + {[]float64{math.NaN()}, self{}, true}, + {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false}, + {map[float64]float64{math.NaN(): 1}, self{}, true}, + + // Nil vs empty: not the same. + {[]int{}, []int(nil), false}, + {[]int{}, []int{}, true}, + {[]int(nil), []int(nil), true}, + {map[int]int{}, map[int]int(nil), false}, + {map[int]int{}, map[int]int{}, true}, + {map[int]int(nil), map[int]int(nil), true}, + + // Mismatched types + {1, 1.0, false}, + {int32(1), int64(1), false}, + {0.5, "hello", false}, + {[]int{1, 2, 3}, [3]int{1, 2, 3}, false}, + {&[3]any{1, 2, 4}, &[3]any{1, 2, "s"}, false}, + {Basic{1, 0.5}, NotBasic{1, 0.5}, false}, + {map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false}, + {[]byte{1, 2, 3}, []MyByte{1, 2, 3}, false}, + {[]MyByte{1, 2, 3}, MyBytes{1, 2, 3}, false}, + {[]byte{1, 2, 3}, MyBytes{1, 2, 3}, false}, + + // Possible loops. + {&loop1, &loop1, true}, + {&loop1, &loop2, true}, + {&loopy1, &loopy1, true}, + {&loopy1, &loopy2, true}, + {&cycleMap1, &cycleMap2, true}, + {&cycleMap1, &cycleMap3, false}, +} + +func TestDeepEqual(t *testing.T) { + for _, test := range deepEqualTests { + if test.b == (self{}) { + test.b = test.a + } + if r := DeepEqual(test.a, test.b); r != test.eq { + t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq) + } + } +} + +func TestTypeOf(t *testing.T) { + // Special case for nil + if typ := TypeOf(nil); typ != nil { + t.Errorf("expected nil type for nil value; got %v", typ) + } + for _, test := range deepEqualTests { + v := ValueOf(test.a) + if !v.IsValid() { + continue + } + typ := TypeOf(test.a) + if typ != v.Type() { + t.Errorf("TypeOf(%v) = %v, but ValueOf(%v).Type() = %v", test.a, typ, test.a, v.Type()) + } + } +} + +type Recursive struct { + x int + r *Recursive +} + +func TestDeepEqualRecursiveStruct(t *testing.T) { + a, b := new(Recursive), new(Recursive) + *a = Recursive{12, a} + *b = Recursive{12, b} + if !DeepEqual(a, b) { + t.Error("DeepEqual(recursive same) = false, want true") + } +} + +type _Complex struct { + a int + b [3]*_Complex + c *string + d map[float64]float64 +} + +func TestDeepEqualComplexStruct(t *testing.T) { + m := make(map[float64]float64) + stra, strb := "hello", "hello" + a, b := new(_Complex), new(_Complex) + *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m} + *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m} + if !DeepEqual(a, b) { + t.Error("DeepEqual(complex same) = false, want true") + } +} + +func TestDeepEqualComplexStructInequality(t *testing.T) { + m := make(map[float64]float64) + stra, strb := "hello", "helloo" // Difference is here + a, b := new(_Complex), new(_Complex) + *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m} + *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m} + if DeepEqual(a, b) { + t.Error("DeepEqual(complex different) = true, want false") + } +} + +type UnexpT struct { + m map[int]int +} + +func TestDeepEqualUnexportedMap(t *testing.T) { + // Check that DeepEqual can look at unexported fields. + x1 := UnexpT{map[int]int{1: 2}} + x2 := UnexpT{map[int]int{1: 2}} + if !DeepEqual(&x1, &x2) { + t.Error("DeepEqual(x1, x2) = false, want true") + } + + y1 := UnexpT{map[int]int{2: 3}} + if DeepEqual(&x1, &y1) { + t.Error("DeepEqual(x1, y1) = true, want false") + } +} + +var deepEqualPerfTests = []struct { + x, y any +}{ + {x: int8(99), y: int8(99)}, + {x: []int8{99}, y: []int8{99}}, + {x: int16(99), y: int16(99)}, + {x: []int16{99}, y: []int16{99}}, + {x: int32(99), y: int32(99)}, + {x: []int32{99}, y: []int32{99}}, + {x: int64(99), y: int64(99)}, + {x: []int64{99}, y: []int64{99}}, + {x: int(999999), y: int(999999)}, + {x: []int{999999}, y: []int{999999}}, + + {x: uint8(99), y: uint8(99)}, + {x: []uint8{99}, y: []uint8{99}}, + {x: uint16(99), y: uint16(99)}, + {x: []uint16{99}, y: []uint16{99}}, + {x: uint32(99), y: uint32(99)}, + {x: []uint32{99}, y: []uint32{99}}, + {x: uint64(99), y: uint64(99)}, + {x: []uint64{99}, y: []uint64{99}}, + {x: uint(999999), y: uint(999999)}, + {x: []uint{999999}, y: []uint{999999}}, + {x: uintptr(999999), y: uintptr(999999)}, + {x: []uintptr{999999}, y: []uintptr{999999}}, + + {x: float32(1.414), y: float32(1.414)}, + {x: []float32{1.414}, y: []float32{1.414}}, + {x: float64(1.414), y: float64(1.414)}, + {x: []float64{1.414}, y: []float64{1.414}}, + + {x: complex64(1.414), y: complex64(1.414)}, + {x: []complex64{1.414}, y: []complex64{1.414}}, + {x: complex128(1.414), y: complex128(1.414)}, + {x: []complex128{1.414}, y: []complex128{1.414}}, + + {x: true, y: true}, + {x: []bool{true}, y: []bool{true}}, + + {x: "abcdef", y: "abcdef"}, + {x: []string{"abcdef"}, y: []string{"abcdef"}}, + + {x: []byte("abcdef"), y: []byte("abcdef")}, + {x: [][]byte{[]byte("abcdef")}, y: [][]byte{[]byte("abcdef")}}, + + {x: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}, y: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}, + {x: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}, y: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}}, +} + +func TestDeepEqualAllocs(t *testing.T) { + for _, tt := range deepEqualPerfTests { + t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) { + got := testing.AllocsPerRun(100, func() { + if !DeepEqual(tt.x, tt.y) { + t.Errorf("DeepEqual(%v, %v)=false", tt.x, tt.y) + } + }) + if int(got) != 0 { + t.Errorf("DeepEqual(%v, %v) allocated %d times", tt.x, tt.y, int(got)) + } + }) + } +} + +func check2ndField(x any, offs uintptr, t *testing.T) { + s := ValueOf(x) + f := s.Type().Field(1) + if f.Offset != offs { + t.Error("mismatched offsets in structure alignment:", f.Offset, offs) + } +} + +// Check that structure alignment & offsets viewed through reflect agree with those +// from the compiler itself. +func TestAlignment(t *testing.T) { + type T1inner struct { + a int + } + type T1 struct { + T1inner + f int + } + type T2inner struct { + a, b int + } + type T2 struct { + T2inner + f int + } + + x := T1{T1inner{2}, 17} + check2ndField(x, uintptr(unsafe.Pointer(&x.f))-uintptr(unsafe.Pointer(&x)), t) + + x1 := T2{T2inner{2, 3}, 17} + check2ndField(x1, uintptr(unsafe.Pointer(&x1.f))-uintptr(unsafe.Pointer(&x1)), t) +} + +func Nil(a any, t *testing.T) { + n := ValueOf(a).Field(0) + if !n.IsNil() { + t.Errorf("%v should be nil", a) + } +} + +func NotNil(a any, t *testing.T) { + n := ValueOf(a).Field(0) + if n.IsNil() { + t.Errorf("value of type %v should not be nil", ValueOf(a).Type().String()) + } +} + +func TestIsNil(t *testing.T) { + // These implement IsNil. + // Wrap in extra struct to hide interface type. + doNil := []any{ + struct{ x *int }{}, + struct{ x any }{}, + struct{ x map[string]int }{}, + struct{ x func() bool }{}, + struct{ x chan int }{}, + struct{ x []string }{}, + struct{ x unsafe.Pointer }{}, + } + for _, ts := range doNil { + ty := TypeOf(ts).Field(0).Type + v := Zero(ty) + v.IsNil() // panics if not okay to call + } + + // Check the implementations + var pi struct { + x *int + } + Nil(pi, t) + pi.x = new(int) + NotNil(pi, t) + + var si struct { + x []int + } + Nil(si, t) + si.x = make([]int, 10) + NotNil(si, t) + + var ci struct { + x chan int + } + Nil(ci, t) + ci.x = make(chan int) + NotNil(ci, t) + + var mi struct { + x map[int]int + } + Nil(mi, t) + mi.x = make(map[int]int) + NotNil(mi, t) + + var ii struct { + x any + } + Nil(ii, t) + ii.x = 2 + NotNil(ii, t) + + var fi struct { + x func(t *testing.T) + } + Nil(fi, t) + fi.x = TestIsNil + NotNil(fi, t) +} + +func setField[S, V any](in S, offset uintptr, value V) (out S) { + *(*V)(unsafe.Add(unsafe.Pointer(&in), offset)) = value + return in +} + +func TestIsZero(t *testing.T) { + for i, tt := range []struct { + x any + want bool + }{ + // Booleans + {true, false}, + {false, true}, + // Numeric types + {int(0), true}, + {int(1), false}, + {int8(0), true}, + {int8(1), false}, + {int16(0), true}, + {int16(1), false}, + {int32(0), true}, + {int32(1), false}, + {int64(0), true}, + {int64(1), false}, + {uint(0), true}, + {uint(1), false}, + {uint8(0), true}, + {uint8(1), false}, + {uint16(0), true}, + {uint16(1), false}, + {uint32(0), true}, + {uint32(1), false}, + {uint64(0), true}, + {uint64(1), false}, + {float32(0), true}, + {float32(1.2), false}, + {float64(0), true}, + {float64(1.2), false}, + {math.Copysign(0, -1), true}, + {complex64(0), true}, + {complex64(1.2), false}, + {complex128(0), true}, + {complex128(1.2), false}, + {complex(math.Copysign(0, -1), 0), true}, + {complex(0, math.Copysign(0, -1)), true}, + {complex(math.Copysign(0, -1), math.Copysign(0, -1)), true}, + {uintptr(0), true}, + {uintptr(128), false}, + // Array + {Zero(TypeOf([5]string{})).Interface(), true}, + {[5]string{}, true}, // comparable array + {[5]string{"", "", "", "a", ""}, false}, // comparable array + {[1]*int{}, true}, // direct pointer array + {[1]*int{new(int)}, false}, // direct pointer array + {[3][]int{}, true}, // incomparable array + {[3][]int{{1}}, false}, // incomparable array + {[1 << 12]byte{}, true}, + {[1 << 12]byte{1}, false}, + {[1]struct{ p *int }{}, true}, + {[1]struct{ p *int }{{new(int)}}, false}, + {[3]Value{}, true}, + {[3]Value{{}, ValueOf(0), {}}, false}, + // Chan + {(chan string)(nil), true}, + {make(chan string), false}, + {time.After(1), false}, + // Func + {(func())(nil), true}, + {New, false}, + // Interface + {New(TypeOf(new(error)).Elem()).Elem(), true}, + {(io.Reader)(strings.NewReader("")), false}, + // Map + {(map[string]string)(nil), true}, + {map[string]string{}, false}, + {make(map[string]string), false}, + // Pointer + {(*func())(nil), true}, + {(*int)(nil), true}, + {new(int), false}, + // Slice + {[]string{}, false}, + {([]string)(nil), true}, + {make([]string, 0), false}, + // Strings + {"", true}, + {"not-zero", false}, + // Structs + {T{}, true}, // comparable struct + {T{123, 456.75, "hello", &_i}, false}, // comparable struct + {struct{ p *int }{}, true}, // direct pointer struct + {struct{ p *int }{new(int)}, false}, // direct pointer struct + {struct{ s []int }{}, true}, // incomparable struct + {struct{ s []int }{[]int{1}}, false}, // incomparable struct + {struct{ Value }{}, true}, + {struct{ Value }{ValueOf(0)}, false}, + {struct{ _, a, _ uintptr }{}, true}, // comparable struct with blank fields + {setField(struct{ _, a, _ uintptr }{}, 0*unsafe.Sizeof(uintptr(0)), 1), true}, + {setField(struct{ _, a, _ uintptr }{}, 1*unsafe.Sizeof(uintptr(0)), 1), false}, + {setField(struct{ _, a, _ uintptr }{}, 2*unsafe.Sizeof(uintptr(0)), 1), true}, + {struct{ _, a, _ func() }{}, true}, // incomparable struct with blank fields + {setField(struct{ _, a, _ func() }{}, 0*unsafe.Sizeof((func())(nil)), func() {}), true}, + {setField(struct{ _, a, _ func() }{}, 1*unsafe.Sizeof((func())(nil)), func() {}), false}, + {setField(struct{ _, a, _ func() }{}, 2*unsafe.Sizeof((func())(nil)), func() {}), true}, + {struct{ a [256]S }{}, true}, + {struct{ a [256]S }{a: [256]S{2: {i1: 1}}}, false}, + {struct{ a [256]float32 }{}, true}, + {struct{ a [256]float32 }{a: [256]float32{2: 1.0}}, false}, + {struct{ _, a [256]S }{}, true}, + {setField(struct{ _, a [256]S }{}, 0*unsafe.Sizeof(int64(0)), int64(1)), true}, + // UnsafePointer + {(unsafe.Pointer)(nil), true}, + {(unsafe.Pointer)(new(int)), false}, + } { + var x Value + if v, ok := tt.x.(Value); ok { + x = v + } else { + x = ValueOf(tt.x) + } + + b := x.IsZero() + if b != tt.want { + t.Errorf("%d: IsZero((%s)(%+v)) = %t, want %t", i, x.Kind(), tt.x, b, tt.want) + } + + if !Zero(TypeOf(tt.x)).IsZero() { + t.Errorf("%d: IsZero(Zero(TypeOf((%s)(%+v)))) is false", i, x.Kind(), tt.x) + } + + p := New(x.Type()).Elem() + p.Set(x) + p.SetZero() + if !p.IsZero() { + t.Errorf("%d: IsZero((%s)(%+v)) is true after SetZero", i, p.Kind(), tt.x) + } + } + + func() { + defer func() { + if r := recover(); r == nil { + t.Error("should panic for invalid value") + } + }() + (Value{}).IsZero() + }() +} + +func TestInternalIsZero(t *testing.T) { + b := make([]byte, 512) + for a := 0; a < 8; a++ { + for i := 1; i <= 512-a; i++ { + InternalIsZero(b[a : a+i]) + } + } +} + +func TestInterfaceExtraction(t *testing.T) { + var s struct { + W io.Writer + } + + s.W = os.Stdout + v := Indirect(ValueOf(&s)).Field(0).Interface() + if v != s.W.(any) { + t.Error("Interface() on interface: ", v, s.W) + } +} + +func TestNilPtrValueSub(t *testing.T) { + var pi *int + if pv := ValueOf(pi); pv.Elem().IsValid() { + t.Error("ValueOf((*int)(nil)).Elem().IsValid()") + } +} + +func TestMap(t *testing.T) { + m := map[string]int{"a": 1, "b": 2} + mv := ValueOf(m) + if n := mv.Len(); n != len(m) { + t.Errorf("Len = %d, want %d", n, len(m)) + } + keys := mv.MapKeys() + newmap := MakeMap(mv.Type()) + for k, v := range m { + // Check that returned Keys match keys in range. + // These aren't required to be in the same order. + seen := false + for _, kv := range keys { + if kv.String() == k { + seen = true + break + } + } + if !seen { + t.Errorf("Missing key %q", k) + } + + // Check that value lookup is correct. + vv := mv.MapIndex(ValueOf(k)) + if vi := vv.Int(); vi != int64(v) { + t.Errorf("Key %q: have value %d, want %d", k, vi, v) + } + + // Copy into new map. + newmap.SetMapIndex(ValueOf(k), ValueOf(v)) + } + vv := mv.MapIndex(ValueOf("not-present")) + if vv.IsValid() { + t.Errorf("Invalid key: got non-nil value %s", valueToString(vv)) + } + + newm := newmap.Interface().(map[string]int) + if len(newm) != len(m) { + t.Errorf("length after copy: newm=%d, m=%d", len(newm), len(m)) + } + + for k, v := range newm { + mv, ok := m[k] + if mv != v { + t.Errorf("newm[%q] = %d, but m[%q] = %d, %v", k, v, k, mv, ok) + } + } + + newmap.SetMapIndex(ValueOf("a"), Value{}) + v, ok := newm["a"] + if ok { + t.Errorf("newm[\"a\"] = %d after delete", v) + } + + mv = ValueOf(&m).Elem() + mv.Set(Zero(mv.Type())) + if m != nil { + t.Errorf("mv.Set(nil) failed") + } + + type S string + shouldPanic("not assignable", func() { mv.MapIndex(ValueOf(S("key"))) }) + shouldPanic("not assignable", func() { mv.SetMapIndex(ValueOf(S("key")), ValueOf(0)) }) +} + +func TestNilMap(t *testing.T) { + var m map[string]int + mv := ValueOf(m) + keys := mv.MapKeys() + if len(keys) != 0 { + t.Errorf(">0 keys for nil map: %v", keys) + } + + // Check that value for missing key is zero. + x := mv.MapIndex(ValueOf("hello")) + if x.Kind() != Invalid { + t.Errorf("m.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x) + } + + // Check big value too. + var mbig map[string][10 << 20]byte + x = ValueOf(mbig).MapIndex(ValueOf("hello")) + if x.Kind() != Invalid { + t.Errorf("mbig.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x) + } + + // Test that deletes from a nil map succeed. + mv.SetMapIndex(ValueOf("hi"), Value{}) +} + +func TestChan(t *testing.T) { + for loop := 0; loop < 2; loop++ { + var c chan int + var cv Value + + // check both ways to allocate channels + switch loop { + case 1: + c = make(chan int, 1) + cv = ValueOf(c) + case 0: + cv = MakeChan(TypeOf(c), 1) + c = cv.Interface().(chan int) + } + + // Send + cv.Send(ValueOf(2)) + if i := <-c; i != 2 { + t.Errorf("reflect Send 2, native recv %d", i) + } + + // Recv + c <- 3 + if i, ok := cv.Recv(); i.Int() != 3 || !ok { + t.Errorf("native send 3, reflect Recv %d, %t", i.Int(), ok) + } + + // TryRecv fail + val, ok := cv.TryRecv() + if val.IsValid() || ok { + t.Errorf("TryRecv on empty chan: %s, %t", valueToString(val), ok) + } + + // TryRecv success + c <- 4 + val, ok = cv.TryRecv() + if !val.IsValid() { + t.Errorf("TryRecv on ready chan got nil") + } else if i := val.Int(); i != 4 || !ok { + t.Errorf("native send 4, TryRecv %d, %t", i, ok) + } + + // TrySend fail + c <- 100 + ok = cv.TrySend(ValueOf(5)) + i := <-c + if ok { + t.Errorf("TrySend on full chan succeeded: value %d", i) + } + + // TrySend success + ok = cv.TrySend(ValueOf(6)) + if !ok { + t.Errorf("TrySend on empty chan failed") + select { + case x := <-c: + t.Errorf("TrySend failed but it did send %d", x) + default: + } + } else { + if i = <-c; i != 6 { + t.Errorf("TrySend 6, recv %d", i) + } + } + + // Close + c <- 123 + cv.Close() + if i, ok := cv.Recv(); i.Int() != 123 || !ok { + t.Errorf("send 123 then close; Recv %d, %t", i.Int(), ok) + } + if i, ok := cv.Recv(); i.Int() != 0 || ok { + t.Errorf("after close Recv %d, %t", i.Int(), ok) + } + // Closing a read-only channel + shouldPanic("", func() { + c := make(<-chan int, 1) + cv := ValueOf(c) + cv.Close() + }) + } + + // check creation of unbuffered channel + var c chan int + cv := MakeChan(TypeOf(c), 0) + c = cv.Interface().(chan int) + if cv.TrySend(ValueOf(7)) { + t.Errorf("TrySend on sync chan succeeded") + } + if v, ok := cv.TryRecv(); v.IsValid() || ok { + t.Errorf("TryRecv on sync chan succeeded: isvalid=%v ok=%v", v.IsValid(), ok) + } + + // len/cap + cv = MakeChan(TypeOf(c), 10) + c = cv.Interface().(chan int) + for i := 0; i < 3; i++ { + c <- i + } + if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) { + t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c)) + } +} + +// caseInfo describes a single case in a select test. +type caseInfo struct { + desc string + canSelect bool + recv Value + closed bool + helper func() + panic bool +} + +var allselect = flag.Bool("allselect", false, "exhaustive select test") + +func TestSelect(t *testing.T) { + selectWatch.once.Do(func() { go selectWatcher() }) + + var x exhaustive + nch := 0 + newop := func(n int, cap int) (ch, val Value) { + nch++ + if nch%101%2 == 1 { + c := make(chan int, cap) + ch = ValueOf(c) + val = ValueOf(n) + } else { + c := make(chan string, cap) + ch = ValueOf(c) + val = ValueOf(fmt.Sprint(n)) + } + return + } + + for n := 0; x.Next(); n++ { + if testing.Short() && n >= 1000 { + break + } + if n >= 100000 && !*allselect { + break + } + if n%100000 == 0 && testing.Verbose() { + println("TestSelect", n) + } + var cases []SelectCase + var info []caseInfo + + // Ready send. + if x.Maybe() { + ch, val := newop(len(cases), 1) + cases = append(cases, SelectCase{ + Dir: SelectSend, + Chan: ch, + Send: val, + }) + info = append(info, caseInfo{desc: "ready send", canSelect: true}) + } + + // Ready recv. + if x.Maybe() { + ch, val := newop(len(cases), 1) + ch.Send(val) + cases = append(cases, SelectCase{ + Dir: SelectRecv, + Chan: ch, + }) + info = append(info, caseInfo{desc: "ready recv", canSelect: true, recv: val}) + } + + // Blocking send. + if x.Maybe() { + ch, val := newop(len(cases), 0) + cases = append(cases, SelectCase{ + Dir: SelectSend, + Chan: ch, + Send: val, + }) + // Let it execute? + if x.Maybe() { + f := func() { ch.Recv() } + info = append(info, caseInfo{desc: "blocking send", helper: f}) + } else { + info = append(info, caseInfo{desc: "blocking send"}) + } + } + + // Blocking recv. + if x.Maybe() { + ch, val := newop(len(cases), 0) + cases = append(cases, SelectCase{ + Dir: SelectRecv, + Chan: ch, + }) + // Let it execute? + if x.Maybe() { + f := func() { ch.Send(val) } + info = append(info, caseInfo{desc: "blocking recv", recv: val, helper: f}) + } else { + info = append(info, caseInfo{desc: "blocking recv"}) + } + } + + // Zero Chan send. + if x.Maybe() { + // Maybe include value to send. + var val Value + if x.Maybe() { + val = ValueOf(100) + } + cases = append(cases, SelectCase{ + Dir: SelectSend, + Send: val, + }) + info = append(info, caseInfo{desc: "zero Chan send"}) + } + + // Zero Chan receive. + if x.Maybe() { + cases = append(cases, SelectCase{ + Dir: SelectRecv, + }) + info = append(info, caseInfo{desc: "zero Chan recv"}) + } + + // nil Chan send. + if x.Maybe() { + cases = append(cases, SelectCase{ + Dir: SelectSend, + Chan: ValueOf((chan int)(nil)), + Send: ValueOf(101), + }) + info = append(info, caseInfo{desc: "nil Chan send"}) + } + + // nil Chan recv. + if x.Maybe() { + cases = append(cases, SelectCase{ + Dir: SelectRecv, + Chan: ValueOf((chan int)(nil)), + }) + info = append(info, caseInfo{desc: "nil Chan recv"}) + } + + // closed Chan send. + if x.Maybe() { + ch := make(chan int) + close(ch) + cases = append(cases, SelectCase{ + Dir: SelectSend, + Chan: ValueOf(ch), + Send: ValueOf(101), + }) + info = append(info, caseInfo{desc: "closed Chan send", canSelect: true, panic: true}) + } + + // closed Chan recv. + if x.Maybe() { + ch, val := newop(len(cases), 0) + ch.Close() + val = Zero(val.Type()) + cases = append(cases, SelectCase{ + Dir: SelectRecv, + Chan: ch, + }) + info = append(info, caseInfo{desc: "closed Chan recv", canSelect: true, closed: true, recv: val}) + } + + var helper func() // goroutine to help the select complete + + // Add default? Must be last case here, but will permute. + // Add the default if the select would otherwise + // block forever, and maybe add it anyway. + numCanSelect := 0 + canProceed := false + canBlock := true + canPanic := false + helpers := []int{} + for i, c := range info { + if c.canSelect { + canProceed = true + canBlock = false + numCanSelect++ + if c.panic { + canPanic = true + } + } else if c.helper != nil { + canProceed = true + helpers = append(helpers, i) + } + } + if !canProceed || x.Maybe() { + cases = append(cases, SelectCase{ + Dir: SelectDefault, + }) + info = append(info, caseInfo{desc: "default", canSelect: canBlock}) + numCanSelect++ + } else if canBlock { + // Select needs to communicate with another goroutine. + cas := &info[helpers[x.Choose(len(helpers))]] + helper = cas.helper + cas.canSelect = true + numCanSelect++ + } + + // Permute cases and case info. + // Doing too much here makes the exhaustive loop + // too exhausting, so just do two swaps. + for loop := 0; loop < 2; loop++ { + i := x.Choose(len(cases)) + j := x.Choose(len(cases)) + cases[i], cases[j] = cases[j], cases[i] + info[i], info[j] = info[j], info[i] + } + + if helper != nil { + // We wait before kicking off a goroutine to satisfy a blocked select. + // The pause needs to be big enough to let the select block before + // we run the helper, but if we lose that race once in a while it's okay: the + // select will just proceed immediately. Not a big deal. + // For short tests we can grow [sic] the timeout a bit without fear of taking too long + pause := 10 * time.Microsecond + if testing.Short() { + pause = 100 * time.Microsecond + } + time.AfterFunc(pause, helper) + } + + // Run select. + i, recv, recvOK, panicErr := runSelect(cases, info) + if panicErr != nil && !canPanic { + t.Fatalf("%s\npanicked unexpectedly: %v", fmtSelect(info), panicErr) + } + if panicErr == nil && canPanic && numCanSelect == 1 { + t.Fatalf("%s\nselected #%d incorrectly (should panic)", fmtSelect(info), i) + } + if panicErr != nil { + continue + } + + cas := info[i] + if !cas.canSelect { + recvStr := "" + if recv.IsValid() { + recvStr = fmt.Sprintf(", received %v, %v", recv.Interface(), recvOK) + } + t.Fatalf("%s\nselected #%d incorrectly%s", fmtSelect(info), i, recvStr) + } + if cas.panic { + t.Fatalf("%s\nselected #%d incorrectly (case should panic)", fmtSelect(info), i) + } + + if cases[i].Dir == SelectRecv { + if !recv.IsValid() { + t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, cas.recv.Interface(), !cas.closed) + } + if !cas.recv.IsValid() { + t.Fatalf("%s\nselected #%d but internal error: missing recv value", fmtSelect(info), i) + } + if recv.Interface() != cas.recv.Interface() || recvOK != !cas.closed { + if recv.Interface() == cas.recv.Interface() && recvOK == !cas.closed { + t.Fatalf("%s\nselected #%d, got %#v, %v, and DeepEqual is broken on %T", fmtSelect(info), i, recv.Interface(), recvOK, recv.Interface()) + } + t.Fatalf("%s\nselected #%d but got %#v, %v, want %#v, %v", fmtSelect(info), i, recv.Interface(), recvOK, cas.recv.Interface(), !cas.closed) + } + } else { + if recv.IsValid() || recvOK { + t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, Value{}, false) + } + } + } +} + +func TestSelectMaxCases(t *testing.T) { + var sCases []SelectCase + channel := make(chan int) + close(channel) + for i := 0; i < 65536; i++ { + sCases = append(sCases, SelectCase{ + Dir: SelectRecv, + Chan: ValueOf(channel), + }) + } + // Should not panic + _, _, _ = Select(sCases) + sCases = append(sCases, SelectCase{ + Dir: SelectRecv, + Chan: ValueOf(channel), + }) + defer func() { + if err := recover(); err != nil { + if err.(string) != "reflect.Select: too many cases (max 65536)" { + t.Fatalf("unexpected error from select call with greater than max supported cases") + } + } else { + t.Fatalf("expected select call to panic with greater than max supported cases") + } + }() + // Should panic + _, _, _ = Select(sCases) +} + +func TestSelectNop(t *testing.T) { + // "select { default: }" should always return the default case. + chosen, _, _ := Select([]SelectCase{{Dir: SelectDefault}}) + if chosen != 0 { + t.Fatalf("expected Select to return 0, but got %#v", chosen) + } +} + +// selectWatch and the selectWatcher are a watchdog mechanism for running Select. +// If the selectWatcher notices that the select has been blocked for >1 second, it prints +// an error describing the select and panics the entire test binary. +var selectWatch struct { + sync.Mutex + once sync.Once + now time.Time + info []caseInfo +} + +func selectWatcher() { + for { + time.Sleep(1 * time.Second) + selectWatch.Lock() + if selectWatch.info != nil && time.Since(selectWatch.now) > 10*time.Second { + fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info)) + panic("select stuck") + } + selectWatch.Unlock() + } +} + +// runSelect runs a single select test. +// It returns the values returned by Select but also returns +// a panic value if the Select panics. +func runSelect(cases []SelectCase, info []caseInfo) (chosen int, recv Value, recvOK bool, panicErr any) { + defer func() { + panicErr = recover() + + selectWatch.Lock() + selectWatch.info = nil + selectWatch.Unlock() + }() + + selectWatch.Lock() + selectWatch.now = time.Now() + selectWatch.info = info + selectWatch.Unlock() + + chosen, recv, recvOK = Select(cases) + return +} + +// fmtSelect formats the information about a single select test. +func fmtSelect(info []caseInfo) string { + var buf strings.Builder + fmt.Fprintf(&buf, "\nselect {\n") + for i, cas := range info { + fmt.Fprintf(&buf, "%d: %s", i, cas.desc) + if cas.recv.IsValid() { + fmt.Fprintf(&buf, " val=%#v", cas.recv.Interface()) + } + if cas.canSelect { + fmt.Fprintf(&buf, " canselect") + } + if cas.panic { + fmt.Fprintf(&buf, " panic") + } + fmt.Fprintf(&buf, "\n") + } + fmt.Fprintf(&buf, "}") + return buf.String() +} + +type two [2]uintptr + +// Difficult test for function call because of +// implicit padding between arguments. +func dummy(b byte, c int, d byte, e two, f byte, g float32, h byte) (i byte, j int, k byte, l two, m byte, n float32, o byte) { + return b, c, d, e, f, g, h +} + +func TestFunc(t *testing.T) { + ret := ValueOf(dummy).Call([]Value{ + ValueOf(byte(10)), + ValueOf(20), + ValueOf(byte(30)), + ValueOf(two{40, 50}), + ValueOf(byte(60)), + ValueOf(float32(70)), + ValueOf(byte(80)), + }) + if len(ret) != 7 { + t.Fatalf("Call returned %d values, want 7", len(ret)) + } + + i := byte(ret[0].Uint()) + j := int(ret[1].Int()) + k := byte(ret[2].Uint()) + l := ret[3].Interface().(two) + m := byte(ret[4].Uint()) + n := float32(ret[5].Float()) + o := byte(ret[6].Uint()) + + if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 { + t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o) + } + + for i, v := range ret { + if v.CanAddr() { + t.Errorf("result %d is addressable", i) + } + } +} + +func TestCallConvert(t *testing.T) { + v := ValueOf(new(io.ReadWriter)).Elem() + f := ValueOf(func(r io.Reader) io.Reader { return r }) + out := f.Call([]Value{v}) + if len(out) != 1 || out[0].Type() != TypeOf(new(io.Reader)).Elem() || !out[0].IsNil() { + t.Errorf("expected [nil], got %v", out) + } +} + +type emptyStruct struct{} + +type nonEmptyStruct struct { + member int +} + +func returnEmpty() emptyStruct { + return emptyStruct{} +} + +func takesEmpty(e emptyStruct) { +} + +func returnNonEmpty(i int) nonEmptyStruct { + return nonEmptyStruct{member: i} +} + +func takesNonEmpty(n nonEmptyStruct) int { + return n.member +} + +func TestCallWithStruct(t *testing.T) { + r := ValueOf(returnEmpty).Call(nil) + if len(r) != 1 || r[0].Type() != TypeOf(emptyStruct{}) { + t.Errorf("returning empty struct returned %#v instead", r) + } + r = ValueOf(takesEmpty).Call([]Value{ValueOf(emptyStruct{})}) + if len(r) != 0 { + t.Errorf("takesEmpty returned values: %#v", r) + } + r = ValueOf(returnNonEmpty).Call([]Value{ValueOf(42)}) + if len(r) != 1 || r[0].Type() != TypeOf(nonEmptyStruct{}) || r[0].Field(0).Int() != 42 { + t.Errorf("returnNonEmpty returned %#v", r) + } + r = ValueOf(takesNonEmpty).Call([]Value{ValueOf(nonEmptyStruct{member: 42})}) + if len(r) != 1 || r[0].Type() != TypeOf(1) || r[0].Int() != 42 { + t.Errorf("takesNonEmpty returned %#v", r) + } +} + +func TestCallReturnsEmpty(t *testing.T) { + // Issue 21717: past-the-end pointer write in Call with + // nonzero-sized frame and zero-sized return value. + runtime.GC() + var finalized uint32 + f := func() (emptyStruct, *[2]int64) { + i := new([2]int64) // big enough to not be tinyalloc'd, so finalizer always runs when i dies + runtime.SetFinalizer(i, func(*[2]int64) { atomic.StoreUint32(&finalized, 1) }) + return emptyStruct{}, i + } + v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run. + timeout := time.After(5 * time.Second) + for atomic.LoadUint32(&finalized) == 0 { + select { + case <-timeout: + t.Fatal("finalizer did not run") + default: + } + runtime.Gosched() + runtime.GC() + } + runtime.KeepAlive(v) +} + +func TestMakeFunc(t *testing.T) { + f := dummy + fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in }) + ValueOf(&f).Elem().Set(fv) + + // Call g with small arguments so that there is + // something predictable (and different from the + // correct results) in those positions on the stack. + g := dummy + g(1, 2, 3, two{4, 5}, 6, 7, 8) + + // Call constructed function f. + i, j, k, l, m, n, o := f(10, 20, 30, two{40, 50}, 60, 70, 80) + if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 { + t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o) + } +} + +func TestMakeFuncInterface(t *testing.T) { + fn := func(i int) int { return i } + incr := func(in []Value) []Value { + return []Value{ValueOf(int(in[0].Int() + 1))} + } + fv := MakeFunc(TypeOf(fn), incr) + ValueOf(&fn).Elem().Set(fv) + if r := fn(2); r != 3 { + t.Errorf("Call returned %d, want 3", r) + } + if r := fv.Call([]Value{ValueOf(14)})[0].Int(); r != 15 { + t.Errorf("Call returned %d, want 15", r) + } + if r := fv.Interface().(func(int) int)(26); r != 27 { + t.Errorf("Call returned %d, want 27", r) + } +} + +func TestMakeFuncVariadic(t *testing.T) { + // Test that variadic arguments are packed into a slice and passed as last arg + fn := func(_ int, is ...int) []int { return nil } + fv := MakeFunc(TypeOf(fn), func(in []Value) []Value { return in[1:2] }) + ValueOf(&fn).Elem().Set(fv) + + r := fn(1, 2, 3) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } + + r = fn(1, []int{2, 3}...) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } + + r = fv.Call([]Value{ValueOf(1), ValueOf(2), ValueOf(3)})[0].Interface().([]int) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } + + r = fv.CallSlice([]Value{ValueOf(1), ValueOf([]int{2, 3})})[0].Interface().([]int) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } + + f := fv.Interface().(func(int, ...int) []int) + + r = f(1, 2, 3) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } + r = f(1, []int{2, 3}...) + if r[0] != 2 || r[1] != 3 { + t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1]) + } +} + +// Dummy type that implements io.WriteCloser +type WC struct { +} + +func (w *WC) Write(p []byte) (n int, err error) { + return 0, nil +} +func (w *WC) Close() error { + return nil +} + +func TestMakeFuncValidReturnAssignments(t *testing.T) { + // reflect.Values returned from the wrapped function should be assignment-converted + // to the types returned by the result of MakeFunc. + + // Concrete types should be promotable to interfaces they implement. + var f func() error + f = MakeFunc(TypeOf(f), func([]Value) []Value { + return []Value{ValueOf(io.EOF)} + }).Interface().(func() error) + f() + + // Super-interfaces should be promotable to simpler interfaces. + var g func() io.Writer + g = MakeFunc(TypeOf(g), func([]Value) []Value { + var w io.WriteCloser = &WC{} + return []Value{ValueOf(&w).Elem()} + }).Interface().(func() io.Writer) + g() + + // Channels should be promotable to directional channels. + var h func() <-chan int + h = MakeFunc(TypeOf(h), func([]Value) []Value { + return []Value{ValueOf(make(chan int))} + }).Interface().(func() <-chan int) + h() + + // Unnamed types should be promotable to named types. + type T struct{ a, b, c int } + var i func() T + i = MakeFunc(TypeOf(i), func([]Value) []Value { + return []Value{ValueOf(struct{ a, b, c int }{a: 1, b: 2, c: 3})} + }).Interface().(func() T) + i() +} + +func TestMakeFuncInvalidReturnAssignments(t *testing.T) { + // Type doesn't implement the required interface. + shouldPanic("", func() { + var f func() error + f = MakeFunc(TypeOf(f), func([]Value) []Value { + return []Value{ValueOf(int(7))} + }).Interface().(func() error) + f() + }) + // Assigning to an interface with additional methods. + shouldPanic("", func() { + var f func() io.ReadWriteCloser + f = MakeFunc(TypeOf(f), func([]Value) []Value { + var w io.WriteCloser = &WC{} + return []Value{ValueOf(&w).Elem()} + }).Interface().(func() io.ReadWriteCloser) + f() + }) + // Directional channels can't be assigned to bidirectional ones. + shouldPanic("", func() { + var f func() chan int + f = MakeFunc(TypeOf(f), func([]Value) []Value { + var c <-chan int = make(chan int) + return []Value{ValueOf(c)} + }).Interface().(func() chan int) + f() + }) + // Two named types which are otherwise identical. + shouldPanic("", func() { + type T struct{ a, b, c int } + type U struct{ a, b, c int } + var f func() T + f = MakeFunc(TypeOf(f), func([]Value) []Value { + return []Value{ValueOf(U{a: 1, b: 2, c: 3})} + }).Interface().(func() T) + f() + }) +} + +type Point struct { + x, y int +} + +// This will be index 0. +func (p Point) AnotherMethod(scale int) int { + return -1 +} + +// This will be index 1. +func (p Point) Dist(scale int) int { + //println("Point.Dist", p.x, p.y, scale) + return p.x*p.x*scale + p.y*p.y*scale +} + +// This will be index 2. +func (p Point) GCMethod(k int) int { + runtime.GC() + return k + p.x +} + +// This will be index 3. +func (p Point) NoArgs() { + // Exercise no-argument/no-result paths. +} + +// This will be index 4. +func (p Point) TotalDist(points ...Point) int { + tot := 0 + for _, q := range points { + dx := q.x - p.x + dy := q.y - p.y + tot += dx*dx + dy*dy // Should call Sqrt, but it's just a test. + + } + return tot +} + +// This will be index 5. +func (p *Point) Int64Method(x int64) int64 { + return x +} + +// This will be index 6. +func (p *Point) Int32Method(x int32) int32 { + return x +} + +func TestMethod(t *testing.T) { + // Non-curried method of type. + p := Point{3, 4} + i := TypeOf(p).Method(1).Func.Call([]Value{ValueOf(p), ValueOf(10)})[0].Int() + if i != 250 { + t.Errorf("Type Method returned %d; want 250", i) + } + + m, ok := TypeOf(p).MethodByName("Dist") + if !ok { + t.Fatalf("method by name failed") + } + i = m.Func.Call([]Value{ValueOf(p), ValueOf(11)})[0].Int() + if i != 275 { + t.Errorf("Type MethodByName returned %d; want 275", i) + } + + m, ok = TypeOf(p).MethodByName("NoArgs") + if !ok { + t.Fatalf("method by name failed") + } + n := len(m.Func.Call([]Value{ValueOf(p)})) + if n != 0 { + t.Errorf("NoArgs returned %d values; want 0", n) + } + + i = TypeOf(&p).Method(1).Func.Call([]Value{ValueOf(&p), ValueOf(12)})[0].Int() + if i != 300 { + t.Errorf("Pointer Type Method returned %d; want 300", i) + } + + m, ok = TypeOf(&p).MethodByName("Dist") + if !ok { + t.Fatalf("ptr method by name failed") + } + i = m.Func.Call([]Value{ValueOf(&p), ValueOf(13)})[0].Int() + if i != 325 { + t.Errorf("Pointer Type MethodByName returned %d; want 325", i) + } + + m, ok = TypeOf(&p).MethodByName("NoArgs") + if !ok { + t.Fatalf("method by name failed") + } + n = len(m.Func.Call([]Value{ValueOf(&p)})) + if n != 0 { + t.Errorf("NoArgs returned %d values; want 0", n) + } + + _, ok = TypeOf(&p).MethodByName("AA") + if ok { + t.Errorf(`MethodByName("AA") should have failed`) + } + + _, ok = TypeOf(&p).MethodByName("ZZ") + if ok { + t.Errorf(`MethodByName("ZZ") should have failed`) + } + + // Curried method of value. + tfunc := TypeOf((func(int) int)(nil)) + v := ValueOf(p).Method(1) + if tt := v.Type(); tt != tfunc { + t.Errorf("Value Method Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(14)})[0].Int() + if i != 350 { + t.Errorf("Value Method returned %d; want 350", i) + } + v = ValueOf(p).MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(15)})[0].Int() + if i != 375 { + t.Errorf("Value MethodByName returned %d; want 375", i) + } + v = ValueOf(p).MethodByName("NoArgs") + v.Call(nil) + + // Curried method of pointer. + v = ValueOf(&p).Method(1) + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(16)})[0].Int() + if i != 400 { + t.Errorf("Pointer Value Method returned %d; want 400", i) + } + v = ValueOf(&p).MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(17)})[0].Int() + if i != 425 { + t.Errorf("Pointer Value MethodByName returned %d; want 425", i) + } + v = ValueOf(&p).MethodByName("NoArgs") + v.Call(nil) + + // Curried method of interface value. + // Have to wrap interface value in a struct to get at it. + // Passing it to ValueOf directly would + // access the underlying Point, not the interface. + var x interface { + Dist(int) int + } = p + pv := ValueOf(&x).Elem() + v = pv.Method(0) + if tt := v.Type(); tt != tfunc { + t.Errorf("Interface Method Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(18)})[0].Int() + if i != 450 { + t.Errorf("Interface Method returned %d; want 450", i) + } + v = pv.MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc) + } + i = v.Call([]Value{ValueOf(19)})[0].Int() + if i != 475 { + t.Errorf("Interface MethodByName returned %d; want 475", i) + } +} + +func TestMethodValue(t *testing.T) { + p := Point{3, 4} + var i int64 + + // Check that method value have the same underlying code pointers. + if p1, p2 := ValueOf(Point{1, 1}).Method(1), ValueOf(Point{2, 2}).Method(1); p1.Pointer() != p2.Pointer() { + t.Errorf("methodValueCall mismatched: %v - %v", p1, p2) + } + + // Curried method of value. + tfunc := TypeOf((func(int) int)(nil)) + v := ValueOf(p).Method(1) + if tt := v.Type(); tt != tfunc { + t.Errorf("Value Method Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(10)})[0].Int() + if i != 250 { + t.Errorf("Value Method returned %d; want 250", i) + } + v = ValueOf(p).MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(11)})[0].Int() + if i != 275 { + t.Errorf("Value MethodByName returned %d; want 275", i) + } + v = ValueOf(p).MethodByName("NoArgs") + ValueOf(v.Interface()).Call(nil) + v.Interface().(func())() + + // Curried method of pointer. + v = ValueOf(&p).Method(1) + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(12)})[0].Int() + if i != 300 { + t.Errorf("Pointer Value Method returned %d; want 300", i) + } + v = ValueOf(&p).MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(13)})[0].Int() + if i != 325 { + t.Errorf("Pointer Value MethodByName returned %d; want 325", i) + } + v = ValueOf(&p).MethodByName("NoArgs") + ValueOf(v.Interface()).Call(nil) + v.Interface().(func())() + + // Curried method of pointer to pointer. + pp := &p + v = ValueOf(&pp).Elem().Method(1) + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Pointer Value Method Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(14)})[0].Int() + if i != 350 { + t.Errorf("Pointer Pointer Value Method returned %d; want 350", i) + } + v = ValueOf(&pp).Elem().MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Pointer Pointer Value MethodByName Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(15)})[0].Int() + if i != 375 { + t.Errorf("Pointer Pointer Value MethodByName returned %d; want 375", i) + } + + // Curried method of interface value. + // Have to wrap interface value in a struct to get at it. + // Passing it to ValueOf directly would + // access the underlying Point, not the interface. + var s = struct { + X interface { + Dist(int) int + } + }{p} + pv := ValueOf(s).Field(0) + v = pv.Method(0) + if tt := v.Type(); tt != tfunc { + t.Errorf("Interface Method Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(16)})[0].Int() + if i != 400 { + t.Errorf("Interface Method returned %d; want 400", i) + } + v = pv.MethodByName("Dist") + if tt := v.Type(); tt != tfunc { + t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc) + } + i = ValueOf(v.Interface()).Call([]Value{ValueOf(17)})[0].Int() + if i != 425 { + t.Errorf("Interface MethodByName returned %d; want 425", i) + } + + // For issue #33628: method args are not stored at the right offset + // on amd64p32. + m64 := ValueOf(&p).MethodByName("Int64Method").Interface().(func(int64) int64) + if x := m64(123); x != 123 { + t.Errorf("Int64Method returned %d; want 123", x) + } + m32 := ValueOf(&p).MethodByName("Int32Method").Interface().(func(int32) int32) + if x := m32(456); x != 456 { + t.Errorf("Int32Method returned %d; want 456", x) + } +} + +func TestVariadicMethodValue(t *testing.T) { + p := Point{3, 4} + points := []Point{{20, 21}, {22, 23}, {24, 25}} + want := int64(p.TotalDist(points[0], points[1], points[2])) + + // Variadic method of type. + tfunc := TypeOf((func(Point, ...Point) int)(nil)) + if tt := TypeOf(p).Method(4).Type; tt != tfunc { + t.Errorf("Variadic Method Type from TypeOf is %s; want %s", tt, tfunc) + } + + // Curried method of value. + tfunc = TypeOf((func(...Point) int)(nil)) + v := ValueOf(p).Method(4) + if tt := v.Type(); tt != tfunc { + t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc) + } + i := ValueOf(v.Interface()).Call([]Value{ValueOf(points[0]), ValueOf(points[1]), ValueOf(points[2])})[0].Int() + if i != want { + t.Errorf("Variadic Method returned %d; want %d", i, want) + } + i = ValueOf(v.Interface()).CallSlice([]Value{ValueOf(points)})[0].Int() + if i != want { + t.Errorf("Variadic Method CallSlice returned %d; want %d", i, want) + } + + f := v.Interface().(func(...Point) int) + i = int64(f(points[0], points[1], points[2])) + if i != want { + t.Errorf("Variadic Method Interface returned %d; want %d", i, want) + } + i = int64(f(points...)) + if i != want { + t.Errorf("Variadic Method Interface Slice returned %d; want %d", i, want) + } +} + +type DirectIfaceT struct { + p *int +} + +func (d DirectIfaceT) M() int { return *d.p } + +func TestDirectIfaceMethod(t *testing.T) { + x := 42 + v := DirectIfaceT{&x} + typ := TypeOf(v) + m, ok := typ.MethodByName("M") + if !ok { + t.Fatalf("cannot find method M") + } + in := []Value{ValueOf(v)} + out := m.Func.Call(in) + if got := out[0].Int(); got != 42 { + t.Errorf("Call with value receiver got %d, want 42", got) + } + + pv := &v + typ = TypeOf(pv) + m, ok = typ.MethodByName("M") + if !ok { + t.Fatalf("cannot find method M") + } + in = []Value{ValueOf(pv)} + out = m.Func.Call(in) + if got := out[0].Int(); got != 42 { + t.Errorf("Call with pointer receiver got %d, want 42", got) + } +} + +// Reflect version of $GOROOT/test/method5.go + +// Concrete types implementing M method. +// Smaller than a word, word-sized, larger than a word. +// Value and pointer receivers. + +type Tinter interface { + M(int, byte) (byte, int) +} + +type Tsmallv byte + +func (v Tsmallv) M(x int, b byte) (byte, int) { return b, x + int(v) } + +type Tsmallp byte + +func (p *Tsmallp) M(x int, b byte) (byte, int) { return b, x + int(*p) } + +type Twordv uintptr + +func (v Twordv) M(x int, b byte) (byte, int) { return b, x + int(v) } + +type Twordp uintptr + +func (p *Twordp) M(x int, b byte) (byte, int) { return b, x + int(*p) } + +type Tbigv [2]uintptr + +func (v Tbigv) M(x int, b byte) (byte, int) { return b, x + int(v[0]) + int(v[1]) } + +type Tbigp [2]uintptr + +func (p *Tbigp) M(x int, b byte) (byte, int) { return b, x + int(p[0]) + int(p[1]) } + +type tinter interface { + m(int, byte) (byte, int) +} + +// Embedding via pointer. + +type Tm1 struct { + Tm2 +} + +type Tm2 struct { + *Tm3 +} + +type Tm3 struct { + *Tm4 +} + +type Tm4 struct { +} + +func (t4 Tm4) M(x int, b byte) (byte, int) { return b, x + 40 } + +func TestMethod5(t *testing.T) { + CheckF := func(name string, f func(int, byte) (byte, int), inc int) { + b, x := f(1000, 99) + if b != 99 || x != 1000+inc { + t.Errorf("%s(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc) + } + } + + CheckV := func(name string, i Value, inc int) { + bx := i.Method(0).Call([]Value{ValueOf(1000), ValueOf(byte(99))}) + b := bx[0].Interface() + x := bx[1].Interface() + if b != byte(99) || x != 1000+inc { + t.Errorf("direct %s.M(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc) + } + + CheckF(name+".M", i.Method(0).Interface().(func(int, byte) (byte, int)), inc) + } + + var TinterType = TypeOf(new(Tinter)).Elem() + + CheckI := func(name string, i any, inc int) { + v := ValueOf(i) + CheckV(name, v, inc) + CheckV("(i="+name+")", v.Convert(TinterType), inc) + } + + sv := Tsmallv(1) + CheckI("sv", sv, 1) + CheckI("&sv", &sv, 1) + + sp := Tsmallp(2) + CheckI("&sp", &sp, 2) + + wv := Twordv(3) + CheckI("wv", wv, 3) + CheckI("&wv", &wv, 3) + + wp := Twordp(4) + CheckI("&wp", &wp, 4) + + bv := Tbigv([2]uintptr{5, 6}) + CheckI("bv", bv, 11) + CheckI("&bv", &bv, 11) + + bp := Tbigp([2]uintptr{7, 8}) + CheckI("&bp", &bp, 15) + + t4 := Tm4{} + t3 := Tm3{&t4} + t2 := Tm2{&t3} + t1 := Tm1{t2} + CheckI("t4", t4, 40) + CheckI("&t4", &t4, 40) + CheckI("t3", t3, 40) + CheckI("&t3", &t3, 40) + CheckI("t2", t2, 40) + CheckI("&t2", &t2, 40) + CheckI("t1", t1, 40) + CheckI("&t1", &t1, 40) + + var tnil Tinter + vnil := ValueOf(&tnil).Elem() + shouldPanic("Method", func() { vnil.Method(0) }) +} + +func TestInterfaceSet(t *testing.T) { + p := &Point{3, 4} + + var s struct { + I any + P interface { + Dist(int) int + } + } + sv := ValueOf(&s).Elem() + sv.Field(0).Set(ValueOf(p)) + if q := s.I.(*Point); q != p { + t.Errorf("i: have %p want %p", q, p) + } + + pv := sv.Field(1) + pv.Set(ValueOf(p)) + if q := s.P.(*Point); q != p { + t.Errorf("i: have %p want %p", q, p) + } + + i := pv.Method(0).Call([]Value{ValueOf(10)})[0].Int() + if i != 250 { + t.Errorf("Interface Method returned %d; want 250", i) + } +} + +type T1 struct { + a string + int +} + +func TestAnonymousFields(t *testing.T) { + var field StructField + var ok bool + var t1 T1 + type1 := TypeOf(t1) + if field, ok = type1.FieldByName("int"); !ok { + t.Fatal("no field 'int'") + } + if field.Index[0] != 1 { + t.Error("field index should be 1; is", field.Index) + } +} + +type FTest struct { + s any + name string + index []int + value int +} + +type D1 struct { + d int +} +type D2 struct { + d int +} + +type S0 struct { + A, B, C int + D1 + D2 +} + +type S1 struct { + B int + S0 +} + +type S2 struct { + A int + *S1 +} + +type S1x struct { + S1 +} + +type S1y struct { + S1 +} + +type S3 struct { + S1x + S2 + D, E int + *S1y +} + +type S4 struct { + *S4 + A int +} + +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +// The X in S15.S11.S1 and S16.S11.S1 annihilate. +type S14 struct { + S15 + S16 +} + +type S15 struct { + S11 +} + +type S16 struct { + S11 +} + +var fieldTests = []FTest{ + {struct{}{}, "", nil, 0}, + {struct{}{}, "Foo", nil, 0}, + {S0{A: 'a'}, "A", []int{0}, 'a'}, + {S0{}, "D", nil, 0}, + {S1{S0: S0{A: 'a'}}, "A", []int{1, 0}, 'a'}, + {S1{B: 'b'}, "B", []int{0}, 'b'}, + {S1{}, "S0", []int{1}, 0}, + {S1{S0: S0{C: 'c'}}, "C", []int{1, 2}, 'c'}, + {S2{A: 'a'}, "A", []int{0}, 'a'}, + {S2{}, "S1", []int{1}, 0}, + {S2{S1: &S1{B: 'b'}}, "B", []int{1, 0}, 'b'}, + {S2{S1: &S1{S0: S0{C: 'c'}}}, "C", []int{1, 1, 2}, 'c'}, + {S2{}, "D", nil, 0}, + {S3{}, "S1", nil, 0}, + {S3{S2: S2{A: 'a'}}, "A", []int{1, 0}, 'a'}, + {S3{}, "B", nil, 0}, + {S3{D: 'd'}, "D", []int{2}, 0}, + {S3{E: 'e'}, "E", []int{3}, 'e'}, + {S4{A: 'a'}, "A", []int{1}, 'a'}, + {S4{}, "B", nil, 0}, + {S5{}, "X", nil, 0}, + {S5{}, "Y", []int{2, 0, 1}, 0}, + {S10{}, "X", nil, 0}, + {S10{}, "Y", []int{2, 0, 0, 1}, 0}, + {S14{}, "X", nil, 0}, +} + +func TestFieldByIndex(t *testing.T) { + for _, test := range fieldTests { + s := TypeOf(test.s) + f := s.FieldByIndex(test.index) + if f.Name != "" { + if test.index != nil { + if f.Name != test.name { + t.Errorf("%s.%s found; want %s", s.Name(), f.Name, test.name) + } + } else { + t.Errorf("%s.%s found", s.Name(), f.Name) + } + } else if len(test.index) > 0 { + t.Errorf("%s.%s not found", s.Name(), test.name) + } + + if test.value != 0 { + v := ValueOf(test.s).FieldByIndex(test.index) + if v.IsValid() { + if x, ok := v.Interface().(int); ok { + if x != test.value { + t.Errorf("%s%v is %d; want %d", s.Name(), test.index, x, test.value) + } + } else { + t.Errorf("%s%v value not an int", s.Name(), test.index) + } + } else { + t.Errorf("%s%v value not found", s.Name(), test.index) + } + } + } +} + +func TestFieldByName(t *testing.T) { + for _, test := range fieldTests { + s := TypeOf(test.s) + f, found := s.FieldByName(test.name) + if found { + if test.index != nil { + // Verify field depth and index. + if len(f.Index) != len(test.index) { + t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index) + } else { + for i, x := range f.Index { + if x != test.index[i] { + t.Errorf("%s.%s.Index[%d] is %d; want %d", s.Name(), test.name, i, x, test.index[i]) + } + } + } + } else { + t.Errorf("%s.%s found", s.Name(), f.Name) + } + } else if len(test.index) > 0 { + t.Errorf("%s.%s not found", s.Name(), test.name) + } + + if test.value != 0 { + v := ValueOf(test.s).FieldByName(test.name) + if v.IsValid() { + if x, ok := v.Interface().(int); ok { + if x != test.value { + t.Errorf("%s.%s is %d; want %d", s.Name(), test.name, x, test.value) + } + } else { + t.Errorf("%s.%s value not an int", s.Name(), test.name) + } + } else { + t.Errorf("%s.%s value not found", s.Name(), test.name) + } + } + } +} + +func TestImportPath(t *testing.T) { + tests := []struct { + t Type + path string + }{ + {TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"}, + {TypeOf(int(0)), ""}, + {TypeOf(int8(0)), ""}, + {TypeOf(int16(0)), ""}, + {TypeOf(int32(0)), ""}, + {TypeOf(int64(0)), ""}, + {TypeOf(uint(0)), ""}, + {TypeOf(uint8(0)), ""}, + {TypeOf(uint16(0)), ""}, + {TypeOf(uint32(0)), ""}, + {TypeOf(uint64(0)), ""}, + {TypeOf(uintptr(0)), ""}, + {TypeOf(float32(0)), ""}, + {TypeOf(float64(0)), ""}, + {TypeOf(complex64(0)), ""}, + {TypeOf(complex128(0)), ""}, + {TypeOf(byte(0)), ""}, + {TypeOf(rune(0)), ""}, + {TypeOf([]byte(nil)), ""}, + {TypeOf([]rune(nil)), ""}, + {TypeOf(string("")), ""}, + {TypeOf((*any)(nil)).Elem(), ""}, + {TypeOf((*byte)(nil)), ""}, + {TypeOf((*rune)(nil)), ""}, + {TypeOf((*int64)(nil)), ""}, + {TypeOf(map[string]int{}), ""}, + {TypeOf((*error)(nil)).Elem(), ""}, + {TypeOf((*Point)(nil)), ""}, + {TypeOf((*Point)(nil)).Elem(), "reflect_test"}, + } + for _, test := range tests { + if path := test.t.PkgPath(); path != test.path { + t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path) + } + } +} + +func TestFieldPkgPath(t *testing.T) { + type x int + typ := TypeOf(struct { + Exported string + unexported string + OtherPkgFields + int // issue 21702 + *x // issue 21122 + }{}) + + type pkgpathTest struct { + index []int + pkgPath string + embedded bool + exported bool + } + + checkPkgPath := func(name string, s []pkgpathTest) { + for _, test := range s { + f := typ.FieldByIndex(test.index) + if got, want := f.PkgPath, test.pkgPath; got != want { + t.Errorf("%s: Field(%d).PkgPath = %q, want %q", name, test.index, got, want) + } + if got, want := f.Anonymous, test.embedded; got != want { + t.Errorf("%s: Field(%d).Anonymous = %v, want %v", name, test.index, got, want) + } + if got, want := f.IsExported(), test.exported; got != want { + t.Errorf("%s: Field(%d).IsExported = %v, want %v", name, test.index, got, want) + } + } + } + + checkPkgPath("testStruct", []pkgpathTest{ + {[]int{0}, "", false, true}, // Exported + {[]int{1}, "reflect_test", false, false}, // unexported + {[]int{2}, "", true, true}, // OtherPkgFields + {[]int{2, 0}, "", false, true}, // OtherExported + {[]int{2, 1}, "reflect", false, false}, // otherUnexported + {[]int{3}, "reflect_test", true, false}, // int + {[]int{4}, "reflect_test", true, false}, // *x + }) + + type localOtherPkgFields OtherPkgFields + typ = TypeOf(localOtherPkgFields{}) + checkPkgPath("localOtherPkgFields", []pkgpathTest{ + {[]int{0}, "", false, true}, // OtherExported + {[]int{1}, "reflect", false, false}, // otherUnexported + }) +} + +func TestMethodPkgPath(t *testing.T) { + type I interface { + x() + X() + } + typ := TypeOf((*interface { + I + y() + Y() + })(nil)).Elem() + + tests := []struct { + name string + pkgPath string + exported bool + }{ + {"X", "", true}, + {"Y", "", true}, + {"x", "reflect_test", false}, + {"y", "reflect_test", false}, + } + + for _, test := range tests { + m, _ := typ.MethodByName(test.name) + if got, want := m.PkgPath, test.pkgPath; got != want { + t.Errorf("MethodByName(%q).PkgPath = %q, want %q", test.name, got, want) + } + if got, want := m.IsExported(), test.exported; got != want { + t.Errorf("MethodByName(%q).IsExported = %v, want %v", test.name, got, want) + } + } +} + +func TestVariadicType(t *testing.T) { + // Test example from Type documentation. + var f func(x int, y ...float64) + typ := TypeOf(f) + if typ.NumIn() == 2 && typ.In(0) == TypeOf(int(0)) { + sl := typ.In(1) + if sl.Kind() == Slice { + if sl.Elem() == TypeOf(0.0) { + // ok + return + } + } + } + + // Failed + t.Errorf("want NumIn() = 2, In(0) = int, In(1) = []float64") + s := fmt.Sprintf("have NumIn() = %d", typ.NumIn()) + for i := 0; i < typ.NumIn(); i++ { + s += fmt.Sprintf(", In(%d) = %s", i, typ.In(i)) + } + t.Error(s) +} + +type inner struct { + x int +} + +type outer struct { + y int + inner +} + +func (*inner) M() {} +func (*outer) M() {} + +func TestNestedMethods(t *testing.T) { + typ := TypeOf((*outer)(nil)) + if typ.NumMethod() != 1 || typ.Method(0).Func.UnsafePointer() != ValueOf((*outer).M).UnsafePointer() { + t.Errorf("Wrong method table for outer: (M=%p)", (*outer).M) + for i := 0; i < typ.NumMethod(); i++ { + m := typ.Method(i) + t.Errorf("\t%d: %s %p\n", i, m.Name, m.Func.UnsafePointer()) + } + } +} + +type unexp struct{} + +func (*unexp) f() (int32, int8) { return 7, 7 } +func (*unexp) g() (int64, int8) { return 8, 8 } + +type unexpI interface { + f() (int32, int8) +} + +func TestUnexportedMethods(t *testing.T) { + typ := TypeOf(new(unexp)) + if got := typ.NumMethod(); got != 0 { + t.Errorf("NumMethod=%d, want 0 satisfied methods", got) + } + + typ = TypeOf((*unexpI)(nil)) + if got := typ.Elem().NumMethod(); got != 1 { + t.Errorf("NumMethod=%d, want 1 satisfied methods", got) + } +} + +type InnerInt struct { + X int +} + +type OuterInt struct { + Y int + InnerInt +} + +func (i *InnerInt) M() int { + return i.X +} + +func TestEmbeddedMethods(t *testing.T) { + typ := TypeOf((*OuterInt)(nil)) + if typ.NumMethod() != 1 || typ.Method(0).Func.UnsafePointer() != ValueOf((*OuterInt).M).UnsafePointer() { + t.Errorf("Wrong method table for OuterInt: (m=%p)", (*OuterInt).M) + for i := 0; i < typ.NumMethod(); i++ { + m := typ.Method(i) + t.Errorf("\t%d: %s %p\n", i, m.Name, m.Func.UnsafePointer()) + } + } + + i := &InnerInt{3} + if v := ValueOf(i).Method(0).Call(nil)[0].Int(); v != 3 { + t.Errorf("i.M() = %d, want 3", v) + } + + o := &OuterInt{1, InnerInt{2}} + if v := ValueOf(o).Method(0).Call(nil)[0].Int(); v != 2 { + t.Errorf("i.M() = %d, want 2", v) + } + + f := (*OuterInt).M + if v := f(o); v != 2 { + t.Errorf("f(o) = %d, want 2", v) + } +} + +type FuncDDD func(...any) error + +func (f FuncDDD) M() {} + +func TestNumMethodOnDDD(t *testing.T) { + rv := ValueOf((FuncDDD)(nil)) + if n := rv.NumMethod(); n != 1 { + t.Fatalf("NumMethod()=%d, want 1", n) + } +} + +func TestPtrTo(t *testing.T) { + // This block of code means that the ptrToThis field of the + // reflect data for *unsafe.Pointer is non zero, see + // https://golang.org/issue/19003 + var x unsafe.Pointer + var y = &x + var z = &y + + var i int + + typ := TypeOf(z) + for i = 0; i < 100; i++ { + typ = PointerTo(typ) + } + for i = 0; i < 100; i++ { + typ = typ.Elem() + } + if typ != TypeOf(z) { + t.Errorf("after 100 PointerTo and Elem, have %s, want %s", typ, TypeOf(z)) + } +} + +func TestPtrToGC(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + pt := PointerTo(tt) + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := New(pt) + p := new(*uintptr) + *p = new(uintptr) + **p = uintptr(i) + v.Elem().Set(ValueOf(p).Convert(pt)) + x = append(x, v.Interface()) + } + runtime.GC() + + for i, xi := range x { + k := ValueOf(xi).Elem().Elem().Elem().Interface().(uintptr) + if k != uintptr(i) { + t.Errorf("lost x[%d] = %d, want %d", i, k, i) + } + } +} + +func TestAddr(t *testing.T) { + var p struct { + X, Y int + } + + v := ValueOf(&p) + v = v.Elem() + v = v.Addr() + v = v.Elem() + v = v.Field(0) + v.SetInt(2) + if p.X != 2 { + t.Errorf("Addr.Elem.Set failed to set value") + } + + // Again but take address of the ValueOf value. + // Exercises generation of PtrTypes not present in the binary. + q := &p + v = ValueOf(&q).Elem() + v = v.Addr() + v = v.Elem() + v = v.Elem() + v = v.Addr() + v = v.Elem() + v = v.Field(0) + v.SetInt(3) + if p.X != 3 { + t.Errorf("Addr.Elem.Set failed to set value") + } + + // Starting without pointer we should get changed value + // in interface. + qq := p + v = ValueOf(&qq).Elem() + v0 := v + v = v.Addr() + v = v.Elem() + v = v.Field(0) + v.SetInt(4) + if p.X != 3 { // should be unchanged from last time + t.Errorf("somehow value Set changed original p") + } + p = v0.Interface().(struct { + X, Y int + }) + if p.X != 4 { + t.Errorf("Addr.Elem.Set valued to set value in top value") + } + + // Verify that taking the address of a type gives us a pointer + // which we can convert back using the usual interface + // notation. + var s struct { + B *bool + } + ps := ValueOf(&s).Elem().Field(0).Addr().Interface() + *(ps.(**bool)) = new(bool) + if s.B == nil { + t.Errorf("Addr.Interface direct assignment failed") + } +} + +func noAlloc(t *testing.T, n int, f func(int)) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Skip("skipping; GOMAXPROCS>1") + } + i := -1 + allocs := testing.AllocsPerRun(n, func() { + f(i) + i++ + }) + if allocs > 0 { + t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs) + } +} + +func TestAllocations(t *testing.T) { + noAlloc(t, 100, func(j int) { + var i any + var v Value + + i = 42 + j + v = ValueOf(i) + if int(v.Int()) != 42+j { + panic("wrong int") + } + }) + noAlloc(t, 100, func(j int) { + var i any + var v Value + i = [3]int{j, j, j} + v = ValueOf(i) + if v.Len() != 3 { + panic("wrong length") + } + }) + noAlloc(t, 100, func(j int) { + var i any + var v Value + i = func(j int) int { return j } + v = ValueOf(i) + if v.Interface().(func(int) int)(j) != j { + panic("wrong result") + } + }) +} + +func TestSmallNegativeInt(t *testing.T) { + i := int16(-1) + v := ValueOf(i) + if v.Int() != -1 { + t.Errorf("int16(-1).Int() returned %v", v.Int()) + } +} + +func TestIndex(t *testing.T) { + xs := []byte{1, 2, 3, 4, 5, 6, 7, 8} + v := ValueOf(xs).Index(3).Interface().(byte) + if v != xs[3] { + t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3]) + } + xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80} + v = ValueOf(xa).Index(2).Interface().(byte) + if v != xa[2] { + t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2]) + } + s := "0123456789" + v = ValueOf(s).Index(3).Interface().(byte) + if v != s[3] { + t.Errorf("s.Index(3) = %v; expected %v", v, s[3]) + } +} + +func TestSlice(t *testing.T) { + xs := []int{1, 2, 3, 4, 5, 6, 7, 8} + v := ValueOf(xs).Slice(3, 5).Interface().([]int) + if len(v) != 2 { + t.Errorf("len(xs.Slice(3, 5)) = %d", len(v)) + } + if cap(v) != 5 { + t.Errorf("cap(xs.Slice(3, 5)) = %d", cap(v)) + } + if !DeepEqual(v[0:5], xs[3:]) { + t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5]) + } + xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80} + v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int) + if len(v) != 3 { + t.Errorf("len(xa.Slice(2, 5)) = %d", len(v)) + } + if cap(v) != 6 { + t.Errorf("cap(xa.Slice(2, 5)) = %d", cap(v)) + } + if !DeepEqual(v[0:6], xa[2:]) { + t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6]) + } + s := "0123456789" + vs := ValueOf(s).Slice(3, 5).Interface().(string) + if vs != s[3:5] { + t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5]) + } + + rv := ValueOf(&xs).Elem() + rv = rv.Slice(3, 4) + ptr2 := rv.UnsafePointer() + rv = rv.Slice(5, 5) + ptr3 := rv.UnsafePointer() + if ptr3 != ptr2 { + t.Errorf("xs.Slice(3,4).Slice3(5,5).UnsafePointer() = %p, want %p", ptr3, ptr2) + } +} + +func TestSlice3(t *testing.T) { + xs := []int{1, 2, 3, 4, 5, 6, 7, 8} + v := ValueOf(xs).Slice3(3, 5, 7).Interface().([]int) + if len(v) != 2 { + t.Errorf("len(xs.Slice3(3, 5, 7)) = %d", len(v)) + } + if cap(v) != 4 { + t.Errorf("cap(xs.Slice3(3, 5, 7)) = %d", cap(v)) + } + if !DeepEqual(v[0:4], xs[3:7:7]) { + t.Errorf("xs.Slice3(3, 5, 7)[0:4] = %v", v[0:4]) + } + rv := ValueOf(&xs).Elem() + shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) }) + shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) }) + shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) }) + + xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80} + v = ValueOf(&xa).Elem().Slice3(2, 5, 6).Interface().([]int) + if len(v) != 3 { + t.Errorf("len(xa.Slice(2, 5, 6)) = %d", len(v)) + } + if cap(v) != 4 { + t.Errorf("cap(xa.Slice(2, 5, 6)) = %d", cap(v)) + } + if !DeepEqual(v[0:4], xa[2:6:6]) { + t.Errorf("xs.Slice(2, 5, 6)[0:4] = %v", v[0:4]) + } + rv = ValueOf(&xa).Elem() + shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) }) + shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) }) + shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) }) + + s := "hello world" + rv = ValueOf(&s).Elem() + shouldPanic("Slice3", func() { rv.Slice3(1, 2, 3) }) + + rv = ValueOf(&xs).Elem() + rv = rv.Slice3(3, 5, 7) + ptr2 := rv.UnsafePointer() + rv = rv.Slice3(4, 4, 4) + ptr3 := rv.UnsafePointer() + if ptr3 != ptr2 { + t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).UnsafePointer() = %p, want %p", ptr3, ptr2) + } +} + +func TestSetLenCap(t *testing.T) { + xs := []int{1, 2, 3, 4, 5, 6, 7, 8} + xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80} + + vs := ValueOf(&xs).Elem() + shouldPanic("SetLen", func() { vs.SetLen(10) }) + shouldPanic("SetCap", func() { vs.SetCap(10) }) + shouldPanic("SetLen", func() { vs.SetLen(-1) }) + shouldPanic("SetCap", func() { vs.SetCap(-1) }) + shouldPanic("SetCap", func() { vs.SetCap(6) }) // smaller than len + vs.SetLen(5) + if len(xs) != 5 || cap(xs) != 8 { + t.Errorf("after SetLen(5), len, cap = %d, %d, want 5, 8", len(xs), cap(xs)) + } + vs.SetCap(6) + if len(xs) != 5 || cap(xs) != 6 { + t.Errorf("after SetCap(6), len, cap = %d, %d, want 5, 6", len(xs), cap(xs)) + } + vs.SetCap(5) + if len(xs) != 5 || cap(xs) != 5 { + t.Errorf("after SetCap(5), len, cap = %d, %d, want 5, 5", len(xs), cap(xs)) + } + shouldPanic("SetCap", func() { vs.SetCap(4) }) // smaller than len + shouldPanic("SetLen", func() { vs.SetLen(6) }) // bigger than cap + + va := ValueOf(&xa).Elem() + shouldPanic("SetLen", func() { va.SetLen(8) }) + shouldPanic("SetCap", func() { va.SetCap(8) }) +} + +func TestVariadic(t *testing.T) { + var b strings.Builder + V := ValueOf + + b.Reset() + V(fmt.Fprintf).Call([]Value{V(&b), V("%s, %d world"), V("hello"), V(42)}) + if b.String() != "hello, 42 world" { + t.Errorf("after Fprintf Call: %q != %q", b.String(), "hello 42 world") + } + + b.Reset() + V(fmt.Fprintf).CallSlice([]Value{V(&b), V("%s, %d world"), V([]any{"hello", 42})}) + if b.String() != "hello, 42 world" { + t.Errorf("after Fprintf CallSlice: %q != %q", b.String(), "hello 42 world") + } +} + +func TestFuncArg(t *testing.T) { + f1 := func(i int, f func(int) int) int { return f(i) } + f2 := func(i int) int { return i + 1 } + r := ValueOf(f1).Call([]Value{ValueOf(100), ValueOf(f2)}) + if r[0].Int() != 101 { + t.Errorf("function returned %d, want 101", r[0].Int()) + } +} + +func TestStructArg(t *testing.T) { + type padded struct { + B string + C int32 + } + var ( + gotA padded + gotB uint32 + wantA = padded{"3", 4} + wantB = uint32(5) + ) + f := func(a padded, b uint32) { + gotA, gotB = a, b + } + ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)}) + if gotA != wantA || gotB != wantB { + t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB) + } +} + +var tagGetTests = []struct { + Tag StructTag + Key string + Value string +}{ + {`protobuf:"PB(1,2)"`, `protobuf`, `PB(1,2)`}, + {`protobuf:"PB(1,2)"`, `foo`, ``}, + {`protobuf:"PB(1,2)"`, `rotobuf`, ``}, + {`protobuf:"PB(1,2)" json:"name"`, `json`, `name`}, + {`protobuf:"PB(1,2)" json:"name"`, `protobuf`, `PB(1,2)`}, + {`k0:"values contain spaces" k1:"and\ttabs"`, "k0", "values contain spaces"}, + {`k0:"values contain spaces" k1:"and\ttabs"`, "k1", "and\ttabs"}, +} + +func TestTagGet(t *testing.T) { + for _, tt := range tagGetTests { + if v := tt.Tag.Get(tt.Key); v != tt.Value { + t.Errorf("StructTag(%#q).Get(%#q) = %#q, want %#q", tt.Tag, tt.Key, v, tt.Value) + } + } +} + +func TestBytes(t *testing.T) { + shouldPanic("on int Value", func() { ValueOf(0).Bytes() }) + shouldPanic("of non-byte slice", func() { ValueOf([]string{}).Bytes() }) + + type S []byte + x := S{1, 2, 3, 4} + y := ValueOf(x).Bytes() + if !bytes.Equal(x, y) { + t.Fatalf("ValueOf(%v).Bytes() = %v", x, y) + } + if &x[0] != &y[0] { + t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0]) + } + + type A [4]byte + a := A{1, 2, 3, 4} + shouldPanic("unaddressable", func() { ValueOf(a).Bytes() }) + shouldPanic("on ptr Value", func() { ValueOf(&a).Bytes() }) + b := ValueOf(&a).Elem().Bytes() + if !bytes.Equal(a[:], y) { + t.Fatalf("ValueOf(%v).Bytes() = %v", a, b) + } + if &a[0] != &b[0] { + t.Errorf("ValueOf(%p).Bytes() = %p", &a[0], &b[0]) + } + + // Per issue #24746, it was decided that Bytes can be called on byte slices + // that normally cannot be converted from per Go language semantics. + type B byte + type SB []B + type AB [4]B + ValueOf([]B{1, 2, 3, 4}).Bytes() // should not panic + ValueOf(new([4]B)).Elem().Bytes() // should not panic + ValueOf(SB{1, 2, 3, 4}).Bytes() // should not panic + ValueOf(new(AB)).Elem().Bytes() // should not panic +} + +func TestSetBytes(t *testing.T) { + type B []byte + var x B + y := []byte{1, 2, 3, 4} + ValueOf(&x).Elem().SetBytes(y) + if !bytes.Equal(x, y) { + t.Fatalf("ValueOf(%v).Bytes() = %v", x, y) + } + if &x[0] != &y[0] { + t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0]) + } +} + +type Private struct { + x int + y **int + Z int +} + +func (p *Private) m() { +} + +type private struct { + Z int + z int + S string + A [1]Private + T []Private +} + +func (p *private) P() { +} + +type Public struct { + X int + Y **int + private +} + +func (p *Public) M() { +} + +func TestUnexported(t *testing.T) { + var pub Public + pub.S = "S" + pub.T = pub.A[:] + v := ValueOf(&pub) + isValid(v.Elem().Field(0)) + isValid(v.Elem().Field(1)) + isValid(v.Elem().Field(2)) + isValid(v.Elem().FieldByName("X")) + isValid(v.Elem().FieldByName("Y")) + isValid(v.Elem().FieldByName("Z")) + isValid(v.Type().Method(0).Func) + m, _ := v.Type().MethodByName("M") + isValid(m.Func) + m, _ = v.Type().MethodByName("P") + isValid(m.Func) + isNonNil(v.Elem().Field(0).Interface()) + isNonNil(v.Elem().Field(1).Interface()) + isNonNil(v.Elem().Field(2).Field(2).Index(0)) + isNonNil(v.Elem().FieldByName("X").Interface()) + isNonNil(v.Elem().FieldByName("Y").Interface()) + isNonNil(v.Elem().FieldByName("Z").Interface()) + isNonNil(v.Elem().FieldByName("S").Index(0).Interface()) + isNonNil(v.Type().Method(0).Func.Interface()) + m, _ = v.Type().MethodByName("P") + isNonNil(m.Func.Interface()) + + var priv Private + v = ValueOf(&priv) + isValid(v.Elem().Field(0)) + isValid(v.Elem().Field(1)) + isValid(v.Elem().FieldByName("x")) + isValid(v.Elem().FieldByName("y")) + shouldPanic("Interface", func() { v.Elem().Field(0).Interface() }) + shouldPanic("Interface", func() { v.Elem().Field(1).Interface() }) + shouldPanic("Interface", func() { v.Elem().FieldByName("x").Interface() }) + shouldPanic("Interface", func() { v.Elem().FieldByName("y").Interface() }) + shouldPanic("Method", func() { v.Type().Method(0) }) +} + +func TestSetPanic(t *testing.T) { + ok := func(f func()) { f() } + bad := func(f func()) { shouldPanic("Set", f) } + clear := func(v Value) { v.Set(Zero(v.Type())) } + + type t0 struct { + W int + } + + type t1 struct { + Y int + t0 + } + + type T2 struct { + Z int + namedT0 t0 + } + + type T struct { + X int + t1 + T2 + NamedT1 t1 + NamedT2 T2 + namedT1 t1 + namedT2 T2 + } + + // not addressable + v := ValueOf(T{}) + bad(func() { clear(v.Field(0)) }) // .X + bad(func() { clear(v.Field(1)) }) // .t1 + bad(func() { clear(v.Field(1).Field(0)) }) // .t1.Y + bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0 + bad(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W + bad(func() { clear(v.Field(2)) }) // .T2 + bad(func() { clear(v.Field(2).Field(0)) }) // .T2.Z + bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0 + bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W + bad(func() { clear(v.Field(3)) }) // .NamedT1 + bad(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y + bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0 + bad(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W + bad(func() { clear(v.Field(4)) }) // .NamedT2 + bad(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z + bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0 + bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W + bad(func() { clear(v.Field(5)) }) // .namedT1 + bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y + bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0 + bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W + bad(func() { clear(v.Field(6)) }) // .namedT2 + bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z + bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0 + bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W + + // addressable + v = ValueOf(&T{}).Elem() + ok(func() { clear(v.Field(0)) }) // .X + bad(func() { clear(v.Field(1)) }) // .t1 + ok(func() { clear(v.Field(1).Field(0)) }) // .t1.Y + bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0 + ok(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W + ok(func() { clear(v.Field(2)) }) // .T2 + ok(func() { clear(v.Field(2).Field(0)) }) // .T2.Z + bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0 + bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W + ok(func() { clear(v.Field(3)) }) // .NamedT1 + ok(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y + bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0 + ok(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W + ok(func() { clear(v.Field(4)) }) // .NamedT2 + ok(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z + bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0 + bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W + bad(func() { clear(v.Field(5)) }) // .namedT1 + bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y + bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0 + bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W + bad(func() { clear(v.Field(6)) }) // .namedT2 + bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z + bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0 + bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W +} + +type timp int + +func (t timp) W() {} +func (t timp) Y() {} +func (t timp) w() {} +func (t timp) y() {} + +func TestCallPanic(t *testing.T) { + type t0 interface { + W() + w() + } + type T1 interface { + Y() + y() + } + type T2 struct { + T1 + t0 + } + type T struct { + t0 // 0 + T1 // 1 + + NamedT0 t0 // 2 + NamedT1 T1 // 3 + NamedT2 T2 // 4 + + namedT0 t0 // 5 + namedT1 T1 // 6 + namedT2 T2 // 7 + } + ok := func(f func()) { f() } + badCall := func(f func()) { shouldPanic("Call", f) } + badMethod := func(f func()) { shouldPanic("Method", f) } + call := func(v Value) { v.Call(nil) } + + i := timp(0) + v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}}) + badCall(func() { call(v.Field(0).Method(0)) }) // .t0.W + badCall(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W + badCall(func() { call(v.Field(0).Method(1)) }) // .t0.w + badMethod(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w + ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y + ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y + badCall(func() { call(v.Field(1).Method(1)) }) // .T1.y + badMethod(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y + + ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W + ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W + badCall(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w + badMethod(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w + + ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y + ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y + badCall(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y + badMethod(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y + + ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y + ok(func() { call(v.Field(4).Field(0).Elem().Method(0)) }) // .NamedT2.T1.W + badCall(func() { call(v.Field(4).Field(1).Method(0)) }) // .NamedT2.t0.W + badCall(func() { call(v.Field(4).Field(1).Elem().Method(0)) }) // .NamedT2.t0.W + + badCall(func() { call(v.Field(5).Method(0)) }) // .namedT0.W + badCall(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W + badCall(func() { call(v.Field(5).Method(1)) }) // .namedT0.w + badMethod(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w + + badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y + badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.Y + badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.y + badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.y + + badCall(func() { call(v.Field(7).Field(0).Method(0)) }) // .namedT2.T1.Y + badCall(func() { call(v.Field(7).Field(0).Elem().Method(0)) }) // .namedT2.T1.W + badCall(func() { call(v.Field(7).Field(1).Method(0)) }) // .namedT2.t0.W + badCall(func() { call(v.Field(7).Field(1).Elem().Method(0)) }) // .namedT2.t0.W +} + +func TestValuePanic(t *testing.T) { + vo := ValueOf + shouldPanic("reflect.Value.Addr of unaddressable value", func() { vo(0).Addr() }) + shouldPanic("call of reflect.Value.Bool on float64 Value", func() { vo(0.0).Bool() }) + shouldPanic("call of reflect.Value.Bytes on string Value", func() { vo("").Bytes() }) + shouldPanic("call of reflect.Value.Call on bool Value", func() { vo(true).Call(nil) }) + shouldPanic("call of reflect.Value.CallSlice on int Value", func() { vo(0).CallSlice(nil) }) + shouldPanic("call of reflect.Value.Close on string Value", func() { vo("").Close() }) + shouldPanic("call of reflect.Value.Complex on float64 Value", func() { vo(0.0).Complex() }) + shouldPanic("call of reflect.Value.Elem on bool Value", func() { vo(false).Elem() }) + shouldPanic("call of reflect.Value.Field on int Value", func() { vo(0).Field(0) }) + shouldPanic("call of reflect.Value.Float on string Value", func() { vo("").Float() }) + shouldPanic("call of reflect.Value.Index on float64 Value", func() { vo(0.0).Index(0) }) + shouldPanic("call of reflect.Value.Int on bool Value", func() { vo(false).Int() }) + shouldPanic("call of reflect.Value.IsNil on int Value", func() { vo(0).IsNil() }) + shouldPanic("call of reflect.Value.Len on bool Value", func() { vo(false).Len() }) + shouldPanic("call of reflect.Value.MapIndex on float64 Value", func() { vo(0.0).MapIndex(vo(0.0)) }) + shouldPanic("call of reflect.Value.MapKeys on string Value", func() { vo("").MapKeys() }) + shouldPanic("call of reflect.Value.MapRange on int Value", func() { vo(0).MapRange() }) + shouldPanic("call of reflect.Value.Method on zero Value", func() { vo(nil).Method(0) }) + shouldPanic("call of reflect.Value.NumField on string Value", func() { vo("").NumField() }) + shouldPanic("call of reflect.Value.NumMethod on zero Value", func() { vo(nil).NumMethod() }) + shouldPanic("call of reflect.Value.OverflowComplex on float64 Value", func() { vo(float64(0)).OverflowComplex(0) }) + shouldPanic("call of reflect.Value.OverflowFloat on int64 Value", func() { vo(int64(0)).OverflowFloat(0) }) + shouldPanic("call of reflect.Value.OverflowInt on uint64 Value", func() { vo(uint64(0)).OverflowInt(0) }) + shouldPanic("call of reflect.Value.OverflowUint on complex64 Value", func() { vo(complex64(0)).OverflowUint(0) }) + shouldPanic("call of reflect.Value.Recv on string Value", func() { vo("").Recv() }) + shouldPanic("call of reflect.Value.Send on bool Value", func() { vo(true).Send(vo(true)) }) + shouldPanic("value of type string is not assignable to type bool", func() { vo(new(bool)).Elem().Set(vo("")) }) + shouldPanic("call of reflect.Value.SetBool on string Value", func() { vo(new(string)).Elem().SetBool(false) }) + shouldPanic("reflect.Value.SetBytes using unaddressable value", func() { vo("").SetBytes(nil) }) + shouldPanic("call of reflect.Value.SetCap on string Value", func() { vo(new(string)).Elem().SetCap(0) }) + shouldPanic("call of reflect.Value.SetComplex on string Value", func() { vo(new(string)).Elem().SetComplex(0) }) + shouldPanic("call of reflect.Value.SetFloat on string Value", func() { vo(new(string)).Elem().SetFloat(0) }) + shouldPanic("call of reflect.Value.SetInt on string Value", func() { vo(new(string)).Elem().SetInt(0) }) + shouldPanic("call of reflect.Value.SetLen on string Value", func() { vo(new(string)).Elem().SetLen(0) }) + shouldPanic("call of reflect.Value.SetString on int Value", func() { vo(new(int)).Elem().SetString("") }) + shouldPanic("reflect.Value.SetUint using unaddressable value", func() { vo(0.0).SetUint(0) }) + shouldPanic("call of reflect.Value.Slice on bool Value", func() { vo(true).Slice(1, 2) }) + shouldPanic("call of reflect.Value.Slice3 on int Value", func() { vo(0).Slice3(1, 2, 3) }) + shouldPanic("call of reflect.Value.TryRecv on bool Value", func() { vo(true).TryRecv() }) + shouldPanic("call of reflect.Value.TrySend on string Value", func() { vo("").TrySend(vo("")) }) + shouldPanic("call of reflect.Value.Uint on float64 Value", func() { vo(0.0).Uint() }) +} + +func shouldPanic(expect string, f func()) { + defer func() { + r := recover() + if r == nil { + panic("did not panic") + } + if expect != "" { + var s string + switch r := r.(type) { + case string: + s = r + case *ValueError: + s = r.Error() + default: + panic(fmt.Sprintf("panicked with unexpected type %T", r)) + } + if !strings.HasPrefix(s, "reflect") { + panic(`panic string does not start with "reflect": ` + s) + } + if !strings.Contains(s, expect) { + panic(`panic string does not contain "` + expect + `": ` + s) + } + } + }() + f() +} + +func isNonNil(x any) { + if x == nil { + panic("nil interface") + } +} + +func isValid(v Value) { + if !v.IsValid() { + panic("zero Value") + } +} + +func TestAlias(t *testing.T) { + x := string("hello") + v := ValueOf(&x).Elem() + oldvalue := v.Interface() + v.SetString("world") + newvalue := v.Interface() + + if oldvalue != "hello" || newvalue != "world" { + t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue) + } +} + +var V = ValueOf + +func EmptyInterfaceV(x any) Value { + return ValueOf(&x).Elem() +} + +func ReaderV(x io.Reader) Value { + return ValueOf(&x).Elem() +} + +func ReadWriterV(x io.ReadWriter) Value { + return ValueOf(&x).Elem() +} + +type Empty struct{} +type MyStruct struct { + x int `some:"tag"` +} +type MyStruct1 struct { + x struct { + int `some:"bar"` + } +} +type MyStruct2 struct { + x struct { + int `some:"foo"` + } +} +type MyString string +type MyBytes []byte +type MyBytesArrayPtr0 *[0]byte +type MyBytesArrayPtr *[4]byte +type MyBytesArray0 [0]byte +type MyBytesArray [4]byte +type MyRunes []int32 +type MyFunc func() +type MyByte byte + +type IntChan chan int +type IntChanRecv <-chan int +type IntChanSend chan<- int +type BytesChan chan []byte +type BytesChanRecv <-chan []byte +type BytesChanSend chan<- []byte + +var convertTests = []struct { + in Value + out Value +}{ + // numbers + /* + Edit .+1,/\*\//-1>cat >/tmp/x.go && go run /tmp/x.go + + package main + + import "fmt" + + var numbers = []string{ + "int8", "uint8", "int16", "uint16", + "int32", "uint32", "int64", "uint64", + "int", "uint", "uintptr", + "float32", "float64", + } + + func main() { + // all pairs but in an unusual order, + // to emit all the int8, uint8 cases + // before n grows too big. + n := 1 + for i, f := range numbers { + for _, g := range numbers[i:] { + fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", f, n, g, n) + n++ + if f != g { + fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", g, n, f, n) + n++ + } + } + } + } + */ + {V(int8(1)), V(int8(1))}, + {V(int8(2)), V(uint8(2))}, + {V(uint8(3)), V(int8(3))}, + {V(int8(4)), V(int16(4))}, + {V(int16(5)), V(int8(5))}, + {V(int8(6)), V(uint16(6))}, + {V(uint16(7)), V(int8(7))}, + {V(int8(8)), V(int32(8))}, + {V(int32(9)), V(int8(9))}, + {V(int8(10)), V(uint32(10))}, + {V(uint32(11)), V(int8(11))}, + {V(int8(12)), V(int64(12))}, + {V(int64(13)), V(int8(13))}, + {V(int8(14)), V(uint64(14))}, + {V(uint64(15)), V(int8(15))}, + {V(int8(16)), V(int(16))}, + {V(int(17)), V(int8(17))}, + {V(int8(18)), V(uint(18))}, + {V(uint(19)), V(int8(19))}, + {V(int8(20)), V(uintptr(20))}, + {V(uintptr(21)), V(int8(21))}, + {V(int8(22)), V(float32(22))}, + {V(float32(23)), V(int8(23))}, + {V(int8(24)), V(float64(24))}, + {V(float64(25)), V(int8(25))}, + {V(uint8(26)), V(uint8(26))}, + {V(uint8(27)), V(int16(27))}, + {V(int16(28)), V(uint8(28))}, + {V(uint8(29)), V(uint16(29))}, + {V(uint16(30)), V(uint8(30))}, + {V(uint8(31)), V(int32(31))}, + {V(int32(32)), V(uint8(32))}, + {V(uint8(33)), V(uint32(33))}, + {V(uint32(34)), V(uint8(34))}, + {V(uint8(35)), V(int64(35))}, + {V(int64(36)), V(uint8(36))}, + {V(uint8(37)), V(uint64(37))}, + {V(uint64(38)), V(uint8(38))}, + {V(uint8(39)), V(int(39))}, + {V(int(40)), V(uint8(40))}, + {V(uint8(41)), V(uint(41))}, + {V(uint(42)), V(uint8(42))}, + {V(uint8(43)), V(uintptr(43))}, + {V(uintptr(44)), V(uint8(44))}, + {V(uint8(45)), V(float32(45))}, + {V(float32(46)), V(uint8(46))}, + {V(uint8(47)), V(float64(47))}, + {V(float64(48)), V(uint8(48))}, + {V(int16(49)), V(int16(49))}, + {V(int16(50)), V(uint16(50))}, + {V(uint16(51)), V(int16(51))}, + {V(int16(52)), V(int32(52))}, + {V(int32(53)), V(int16(53))}, + {V(int16(54)), V(uint32(54))}, + {V(uint32(55)), V(int16(55))}, + {V(int16(56)), V(int64(56))}, + {V(int64(57)), V(int16(57))}, + {V(int16(58)), V(uint64(58))}, + {V(uint64(59)), V(int16(59))}, + {V(int16(60)), V(int(60))}, + {V(int(61)), V(int16(61))}, + {V(int16(62)), V(uint(62))}, + {V(uint(63)), V(int16(63))}, + {V(int16(64)), V(uintptr(64))}, + {V(uintptr(65)), V(int16(65))}, + {V(int16(66)), V(float32(66))}, + {V(float32(67)), V(int16(67))}, + {V(int16(68)), V(float64(68))}, + {V(float64(69)), V(int16(69))}, + {V(uint16(70)), V(uint16(70))}, + {V(uint16(71)), V(int32(71))}, + {V(int32(72)), V(uint16(72))}, + {V(uint16(73)), V(uint32(73))}, + {V(uint32(74)), V(uint16(74))}, + {V(uint16(75)), V(int64(75))}, + {V(int64(76)), V(uint16(76))}, + {V(uint16(77)), V(uint64(77))}, + {V(uint64(78)), V(uint16(78))}, + {V(uint16(79)), V(int(79))}, + {V(int(80)), V(uint16(80))}, + {V(uint16(81)), V(uint(81))}, + {V(uint(82)), V(uint16(82))}, + {V(uint16(83)), V(uintptr(83))}, + {V(uintptr(84)), V(uint16(84))}, + {V(uint16(85)), V(float32(85))}, + {V(float32(86)), V(uint16(86))}, + {V(uint16(87)), V(float64(87))}, + {V(float64(88)), V(uint16(88))}, + {V(int32(89)), V(int32(89))}, + {V(int32(90)), V(uint32(90))}, + {V(uint32(91)), V(int32(91))}, + {V(int32(92)), V(int64(92))}, + {V(int64(93)), V(int32(93))}, + {V(int32(94)), V(uint64(94))}, + {V(uint64(95)), V(int32(95))}, + {V(int32(96)), V(int(96))}, + {V(int(97)), V(int32(97))}, + {V(int32(98)), V(uint(98))}, + {V(uint(99)), V(int32(99))}, + {V(int32(100)), V(uintptr(100))}, + {V(uintptr(101)), V(int32(101))}, + {V(int32(102)), V(float32(102))}, + {V(float32(103)), V(int32(103))}, + {V(int32(104)), V(float64(104))}, + {V(float64(105)), V(int32(105))}, + {V(uint32(106)), V(uint32(106))}, + {V(uint32(107)), V(int64(107))}, + {V(int64(108)), V(uint32(108))}, + {V(uint32(109)), V(uint64(109))}, + {V(uint64(110)), V(uint32(110))}, + {V(uint32(111)), V(int(111))}, + {V(int(112)), V(uint32(112))}, + {V(uint32(113)), V(uint(113))}, + {V(uint(114)), V(uint32(114))}, + {V(uint32(115)), V(uintptr(115))}, + {V(uintptr(116)), V(uint32(116))}, + {V(uint32(117)), V(float32(117))}, + {V(float32(118)), V(uint32(118))}, + {V(uint32(119)), V(float64(119))}, + {V(float64(120)), V(uint32(120))}, + {V(int64(121)), V(int64(121))}, + {V(int64(122)), V(uint64(122))}, + {V(uint64(123)), V(int64(123))}, + {V(int64(124)), V(int(124))}, + {V(int(125)), V(int64(125))}, + {V(int64(126)), V(uint(126))}, + {V(uint(127)), V(int64(127))}, + {V(int64(128)), V(uintptr(128))}, + {V(uintptr(129)), V(int64(129))}, + {V(int64(130)), V(float32(130))}, + {V(float32(131)), V(int64(131))}, + {V(int64(132)), V(float64(132))}, + {V(float64(133)), V(int64(133))}, + {V(uint64(134)), V(uint64(134))}, + {V(uint64(135)), V(int(135))}, + {V(int(136)), V(uint64(136))}, + {V(uint64(137)), V(uint(137))}, + {V(uint(138)), V(uint64(138))}, + {V(uint64(139)), V(uintptr(139))}, + {V(uintptr(140)), V(uint64(140))}, + {V(uint64(141)), V(float32(141))}, + {V(float32(142)), V(uint64(142))}, + {V(uint64(143)), V(float64(143))}, + {V(float64(144)), V(uint64(144))}, + {V(int(145)), V(int(145))}, + {V(int(146)), V(uint(146))}, + {V(uint(147)), V(int(147))}, + {V(int(148)), V(uintptr(148))}, + {V(uintptr(149)), V(int(149))}, + {V(int(150)), V(float32(150))}, + {V(float32(151)), V(int(151))}, + {V(int(152)), V(float64(152))}, + {V(float64(153)), V(int(153))}, + {V(uint(154)), V(uint(154))}, + {V(uint(155)), V(uintptr(155))}, + {V(uintptr(156)), V(uint(156))}, + {V(uint(157)), V(float32(157))}, + {V(float32(158)), V(uint(158))}, + {V(uint(159)), V(float64(159))}, + {V(float64(160)), V(uint(160))}, + {V(uintptr(161)), V(uintptr(161))}, + {V(uintptr(162)), V(float32(162))}, + {V(float32(163)), V(uintptr(163))}, + {V(uintptr(164)), V(float64(164))}, + {V(float64(165)), V(uintptr(165))}, + {V(float32(166)), V(float32(166))}, + {V(float32(167)), V(float64(167))}, + {V(float64(168)), V(float32(168))}, + {V(float64(169)), V(float64(169))}, + + // truncation + {V(float64(1.5)), V(int(1))}, + + // complex + {V(complex64(1i)), V(complex64(1i))}, + {V(complex64(2i)), V(complex128(2i))}, + {V(complex128(3i)), V(complex64(3i))}, + {V(complex128(4i)), V(complex128(4i))}, + + // string + {V(string("hello")), V(string("hello"))}, + {V(string("bytes1")), V([]byte("bytes1"))}, + {V([]byte("bytes2")), V(string("bytes2"))}, + {V([]byte("bytes3")), V([]byte("bytes3"))}, + {V(string("runes♝")), V([]rune("runes♝"))}, + {V([]rune("runes♕")), V(string("runes♕"))}, + {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))}, + {V(int('a')), V(string("a"))}, + {V(int8('a')), V(string("a"))}, + {V(int16('a')), V(string("a"))}, + {V(int32('a')), V(string("a"))}, + {V(int64('a')), V(string("a"))}, + {V(uint('a')), V(string("a"))}, + {V(uint8('a')), V(string("a"))}, + {V(uint16('a')), V(string("a"))}, + {V(uint32('a')), V(string("a"))}, + {V(uint64('a')), V(string("a"))}, + {V(uintptr('a')), V(string("a"))}, + {V(int(-1)), V(string("\uFFFD"))}, + {V(int8(-2)), V(string("\uFFFD"))}, + {V(int16(-3)), V(string("\uFFFD"))}, + {V(int32(-4)), V(string("\uFFFD"))}, + {V(int64(-5)), V(string("\uFFFD"))}, + {V(int64(-1 << 32)), V(string("\uFFFD"))}, + {V(int64(1 << 32)), V(string("\uFFFD"))}, + {V(uint(0x110001)), V(string("\uFFFD"))}, + {V(uint32(0x110002)), V(string("\uFFFD"))}, + {V(uint64(0x110003)), V(string("\uFFFD"))}, + {V(uint64(1 << 32)), V(string("\uFFFD"))}, + {V(uintptr(0x110004)), V(string("\uFFFD"))}, + + // named string + {V(MyString("hello")), V(string("hello"))}, + {V(string("hello")), V(MyString("hello"))}, + {V(string("hello")), V(string("hello"))}, + {V(MyString("hello")), V(MyString("hello"))}, + {V(MyString("bytes1")), V([]byte("bytes1"))}, + {V([]byte("bytes2")), V(MyString("bytes2"))}, + {V([]byte("bytes3")), V([]byte("bytes3"))}, + {V(MyString("runes♝")), V([]rune("runes♝"))}, + {V([]rune("runes♕")), V(MyString("runes♕"))}, + {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))}, + {V([]rune("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))}, + {V(MyRunes("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))}, + {V(int('a')), V(MyString("a"))}, + {V(int8('a')), V(MyString("a"))}, + {V(int16('a')), V(MyString("a"))}, + {V(int32('a')), V(MyString("a"))}, + {V(int64('a')), V(MyString("a"))}, + {V(uint('a')), V(MyString("a"))}, + {V(uint8('a')), V(MyString("a"))}, + {V(uint16('a')), V(MyString("a"))}, + {V(uint32('a')), V(MyString("a"))}, + {V(uint64('a')), V(MyString("a"))}, + {V(uintptr('a')), V(MyString("a"))}, + {V(int(-1)), V(MyString("\uFFFD"))}, + {V(int8(-2)), V(MyString("\uFFFD"))}, + {V(int16(-3)), V(MyString("\uFFFD"))}, + {V(int32(-4)), V(MyString("\uFFFD"))}, + {V(int64(-5)), V(MyString("\uFFFD"))}, + {V(uint(0x110001)), V(MyString("\uFFFD"))}, + {V(uint32(0x110002)), V(MyString("\uFFFD"))}, + {V(uint64(0x110003)), V(MyString("\uFFFD"))}, + {V(uintptr(0x110004)), V(MyString("\uFFFD"))}, + + // named []byte + {V(string("bytes1")), V(MyBytes("bytes1"))}, + {V(MyBytes("bytes2")), V(string("bytes2"))}, + {V(MyBytes("bytes3")), V(MyBytes("bytes3"))}, + {V(MyString("bytes1")), V(MyBytes("bytes1"))}, + {V(MyBytes("bytes2")), V(MyString("bytes2"))}, + + // named []rune + {V(string("runes♝")), V(MyRunes("runes♝"))}, + {V(MyRunes("runes♕")), V(string("runes♕"))}, + {V(MyRunes("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))}, + {V(MyString("runes♝")), V(MyRunes("runes♝"))}, + {V(MyRunes("runes♕")), V(MyString("runes♕"))}, + + // slice to array + {V([]byte(nil)), V([0]byte{})}, + {V([]byte{}), V([0]byte{})}, + {V([]byte{1}), V([1]byte{1})}, + {V([]byte{1, 2}), V([2]byte{1, 2})}, + {V([]byte{1, 2, 3}), V([3]byte{1, 2, 3})}, + {V(MyBytes([]byte(nil))), V([0]byte{})}, + {V(MyBytes{}), V([0]byte{})}, + {V(MyBytes{1}), V([1]byte{1})}, + {V(MyBytes{1, 2}), V([2]byte{1, 2})}, + {V(MyBytes{1, 2, 3}), V([3]byte{1, 2, 3})}, + {V([]byte(nil)), V(MyBytesArray0{})}, + {V([]byte{}), V(MyBytesArray0([0]byte{}))}, + {V([]byte{1, 2, 3, 4}), V(MyBytesArray([4]byte{1, 2, 3, 4}))}, + {V(MyBytes{}), V(MyBytesArray0([0]byte{}))}, + {V(MyBytes{5, 6, 7, 8}), V(MyBytesArray([4]byte{5, 6, 7, 8}))}, + {V([]MyByte{}), V([0]MyByte{})}, + {V([]MyByte{1, 2}), V([2]MyByte{1, 2})}, + + // slice to array pointer + {V([]byte(nil)), V((*[0]byte)(nil))}, + {V([]byte{}), V(new([0]byte))}, + {V([]byte{7}), V(&[1]byte{7})}, + {V(MyBytes([]byte(nil))), V((*[0]byte)(nil))}, + {V(MyBytes([]byte{})), V(new([0]byte))}, + {V(MyBytes([]byte{9})), V(&[1]byte{9})}, + {V([]byte(nil)), V(MyBytesArrayPtr0(nil))}, + {V([]byte{}), V(MyBytesArrayPtr0(new([0]byte)))}, + {V([]byte{1, 2, 3, 4}), V(MyBytesArrayPtr(&[4]byte{1, 2, 3, 4}))}, + {V(MyBytes([]byte{})), V(MyBytesArrayPtr0(new([0]byte)))}, + {V(MyBytes([]byte{5, 6, 7, 8})), V(MyBytesArrayPtr(&[4]byte{5, 6, 7, 8}))}, + + {V([]byte(nil)), V((*MyBytesArray0)(nil))}, + {V([]byte{}), V((*MyBytesArray0)(new([0]byte)))}, + {V([]byte{1, 2, 3, 4}), V(&MyBytesArray{1, 2, 3, 4})}, + {V(MyBytes([]byte(nil))), V((*MyBytesArray0)(nil))}, + {V(MyBytes([]byte{})), V((*MyBytesArray0)(new([0]byte)))}, + {V(MyBytes([]byte{5, 6, 7, 8})), V(&MyBytesArray{5, 6, 7, 8})}, + {V(new([0]byte)), V(new(MyBytesArray0))}, + {V(new(MyBytesArray0)), V(new([0]byte))}, + {V(MyBytesArrayPtr0(nil)), V((*[0]byte)(nil))}, + {V((*[0]byte)(nil)), V(MyBytesArrayPtr0(nil))}, + + // named types and equal underlying types + {V(new(int)), V(new(integer))}, + {V(new(integer)), V(new(int))}, + {V(Empty{}), V(struct{}{})}, + {V(new(Empty)), V(new(struct{}))}, + {V(struct{}{}), V(Empty{})}, + {V(new(struct{})), V(new(Empty))}, + {V(Empty{}), V(Empty{})}, + {V(MyBytes{}), V([]byte{})}, + {V([]byte{}), V(MyBytes{})}, + {V((func())(nil)), V(MyFunc(nil))}, + {V((MyFunc)(nil)), V((func())(nil))}, + + // structs with different tags + {V(struct { + x int `some:"foo"` + }{}), V(struct { + x int `some:"bar"` + }{})}, + + {V(struct { + x int `some:"bar"` + }{}), V(struct { + x int `some:"foo"` + }{})}, + + {V(MyStruct{}), V(struct { + x int `some:"foo"` + }{})}, + + {V(struct { + x int `some:"foo"` + }{}), V(MyStruct{})}, + + {V(MyStruct{}), V(struct { + x int `some:"bar"` + }{})}, + + {V(struct { + x int `some:"bar"` + }{}), V(MyStruct{})}, + + {V(MyStruct1{}), V(MyStruct2{})}, + {V(MyStruct2{}), V(MyStruct1{})}, + + // can convert *byte and *MyByte + {V((*byte)(nil)), V((*MyByte)(nil))}, + {V((*MyByte)(nil)), V((*byte)(nil))}, + + // cannot convert mismatched array sizes + {V([2]byte{}), V([2]byte{})}, + {V([3]byte{}), V([3]byte{})}, + {V(MyBytesArray0{}), V([0]byte{})}, + {V([0]byte{}), V(MyBytesArray0{})}, + + // cannot convert other instances + {V((**byte)(nil)), V((**byte)(nil))}, + {V((**MyByte)(nil)), V((**MyByte)(nil))}, + {V((chan byte)(nil)), V((chan byte)(nil))}, + {V((chan MyByte)(nil)), V((chan MyByte)(nil))}, + {V(([]byte)(nil)), V(([]byte)(nil))}, + {V(([]MyByte)(nil)), V(([]MyByte)(nil))}, + {V((map[int]byte)(nil)), V((map[int]byte)(nil))}, + {V((map[int]MyByte)(nil)), V((map[int]MyByte)(nil))}, + {V((map[byte]int)(nil)), V((map[byte]int)(nil))}, + {V((map[MyByte]int)(nil)), V((map[MyByte]int)(nil))}, + {V([2]byte{}), V([2]byte{})}, + {V([2]MyByte{}), V([2]MyByte{})}, + + // other + {V((***int)(nil)), V((***int)(nil))}, + {V((***byte)(nil)), V((***byte)(nil))}, + {V((***int32)(nil)), V((***int32)(nil))}, + {V((***int64)(nil)), V((***int64)(nil))}, + {V((chan byte)(nil)), V((chan byte)(nil))}, + {V((chan MyByte)(nil)), V((chan MyByte)(nil))}, + {V((map[int]bool)(nil)), V((map[int]bool)(nil))}, + {V((map[int]byte)(nil)), V((map[int]byte)(nil))}, + {V((map[uint]bool)(nil)), V((map[uint]bool)(nil))}, + {V([]uint(nil)), V([]uint(nil))}, + {V([]int(nil)), V([]int(nil))}, + {V(new(any)), V(new(any))}, + {V(new(io.Reader)), V(new(io.Reader))}, + {V(new(io.Writer)), V(new(io.Writer))}, + + // channels + {V(IntChan(nil)), V((chan<- int)(nil))}, + {V(IntChan(nil)), V((<-chan int)(nil))}, + {V((chan int)(nil)), V(IntChanRecv(nil))}, + {V((chan int)(nil)), V(IntChanSend(nil))}, + {V(IntChanRecv(nil)), V((<-chan int)(nil))}, + {V((<-chan int)(nil)), V(IntChanRecv(nil))}, + {V(IntChanSend(nil)), V((chan<- int)(nil))}, + {V((chan<- int)(nil)), V(IntChanSend(nil))}, + {V(IntChan(nil)), V((chan int)(nil))}, + {V((chan int)(nil)), V(IntChan(nil))}, + {V((chan int)(nil)), V((<-chan int)(nil))}, + {V((chan int)(nil)), V((chan<- int)(nil))}, + {V(BytesChan(nil)), V((chan<- []byte)(nil))}, + {V(BytesChan(nil)), V((<-chan []byte)(nil))}, + {V((chan []byte)(nil)), V(BytesChanRecv(nil))}, + {V((chan []byte)(nil)), V(BytesChanSend(nil))}, + {V(BytesChanRecv(nil)), V((<-chan []byte)(nil))}, + {V((<-chan []byte)(nil)), V(BytesChanRecv(nil))}, + {V(BytesChanSend(nil)), V((chan<- []byte)(nil))}, + {V((chan<- []byte)(nil)), V(BytesChanSend(nil))}, + {V(BytesChan(nil)), V((chan []byte)(nil))}, + {V((chan []byte)(nil)), V(BytesChan(nil))}, + {V((chan []byte)(nil)), V((<-chan []byte)(nil))}, + {V((chan []byte)(nil)), V((chan<- []byte)(nil))}, + + // cannot convert other instances (channels) + {V(IntChan(nil)), V(IntChan(nil))}, + {V(IntChanRecv(nil)), V(IntChanRecv(nil))}, + {V(IntChanSend(nil)), V(IntChanSend(nil))}, + {V(BytesChan(nil)), V(BytesChan(nil))}, + {V(BytesChanRecv(nil)), V(BytesChanRecv(nil))}, + {V(BytesChanSend(nil)), V(BytesChanSend(nil))}, + + // interfaces + {V(int(1)), EmptyInterfaceV(int(1))}, + {V(string("hello")), EmptyInterfaceV(string("hello"))}, + {V(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))}, + {ReadWriterV(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))}, + {V(new(bytes.Buffer)), ReadWriterV(new(bytes.Buffer))}, +} + +func TestConvert(t *testing.T) { + canConvert := map[[2]Type]bool{} + all := map[Type]bool{} + + for _, tt := range convertTests { + t1 := tt.in.Type() + if !t1.ConvertibleTo(t1) { + t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t1) + continue + } + + t2 := tt.out.Type() + if !t1.ConvertibleTo(t2) { + t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t2) + continue + } + + all[t1] = true + all[t2] = true + canConvert[[2]Type{t1, t2}] = true + + // vout1 represents the in value converted to the in type. + v1 := tt.in + if !v1.CanConvert(t1) { + t.Errorf("ValueOf(%T(%[1]v)).CanConvert(%s) = false, want true", tt.in.Interface(), t1) + } + vout1 := v1.Convert(t1) + out1 := vout1.Interface() + if vout1.Type() != tt.in.Type() || !DeepEqual(out1, tt.in.Interface()) { + t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t1, out1, tt.in.Interface()) + } + + // vout2 represents the in value converted to the out type. + if !v1.CanConvert(t2) { + t.Errorf("ValueOf(%T(%[1]v)).CanConvert(%s) = false, want true", tt.in.Interface(), t2) + } + vout2 := v1.Convert(t2) + out2 := vout2.Interface() + if vout2.Type() != tt.out.Type() || !DeepEqual(out2, tt.out.Interface()) { + t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out2, tt.out.Interface()) + } + if got, want := vout2.Kind(), vout2.Type().Kind(); got != want { + t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) has internal kind %v want %v", tt.in.Interface(), t1, got, want) + } + + // vout3 represents a new value of the out type, set to vout2. This makes + // sure the converted value vout2 is really usable as a regular value. + vout3 := New(t2).Elem() + vout3.Set(vout2) + out3 := vout3.Interface() + if vout3.Type() != tt.out.Type() || !DeepEqual(out3, tt.out.Interface()) { + t.Errorf("Set(ValueOf(%T(%[1]v)).Convert(%s)) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out3, tt.out.Interface()) + } + + if IsRO(v1) { + t.Errorf("table entry %v is RO, should not be", v1) + } + if IsRO(vout1) { + t.Errorf("self-conversion output %v is RO, should not be", vout1) + } + if IsRO(vout2) { + t.Errorf("conversion output %v is RO, should not be", vout2) + } + if IsRO(vout3) { + t.Errorf("set(conversion output) %v is RO, should not be", vout3) + } + if !IsRO(MakeRO(v1).Convert(t1)) { + t.Errorf("RO self-conversion output %v is not RO, should be", v1) + } + if !IsRO(MakeRO(v1).Convert(t2)) { + t.Errorf("RO conversion output %v is not RO, should be", v1) + } + } + + // Assume that of all the types we saw during the tests, + // if there wasn't an explicit entry for a conversion between + // a pair of types, then it's not to be allowed. This checks for + // things like 'int64' converting to '*int'. + for t1 := range all { + for t2 := range all { + expectOK := t1 == t2 || canConvert[[2]Type{t1, t2}] || t2.Kind() == Interface && t2.NumMethod() == 0 + if ok := t1.ConvertibleTo(t2); ok != expectOK { + t.Errorf("(%s).ConvertibleTo(%s) = %v, want %v", t1, t2, ok, expectOK) + } + } + } +} + +func TestConvertPanic(t *testing.T) { + s := make([]byte, 4) + p := new([8]byte) + v := ValueOf(s) + pt := TypeOf(p) + if !v.Type().ConvertibleTo(pt) { + t.Errorf("[]byte should be convertible to *[8]byte") + } + if v.CanConvert(pt) { + t.Errorf("slice with length 4 should not be convertible to *[8]byte") + } + shouldPanic("reflect: cannot convert slice with length 4 to pointer to array with length 8", func() { + _ = v.Convert(pt) + }) + + if v.CanConvert(pt.Elem()) { + t.Errorf("slice with length 4 should not be convertible to [8]byte") + } + shouldPanic("reflect: cannot convert slice with length 4 to array with length 8", func() { + _ = v.Convert(pt.Elem()) + }) +} + +func TestConvertSlice2Array(t *testing.T) { + s := make([]int, 4) + p := [4]int{} + pt := TypeOf(p) + ov := ValueOf(s) + v := ov.Convert(pt) + // Converting a slice to non-empty array needs to return + // a non-addressable copy of the original memory. + if v.CanAddr() { + t.Fatalf("convert slice to non-empty array returns an addressable copy array") + } + for i := range s { + ov.Index(i).Set(ValueOf(i + 1)) + } + for i := range s { + if v.Index(i).Int() != 0 { + t.Fatalf("slice (%v) mutation visible in converted result (%v)", ov, v) + } + } +} + +var gFloat32 float32 + +const snan uint32 = 0x7f800001 + +func TestConvertNaNs(t *testing.T) { + // Test to see if a store followed by a load of a signaling NaN + // maintains the signaling bit. (This used to fail on the 387 port.) + gFloat32 = math.Float32frombits(snan) + runtime.Gosched() // make sure we don't optimize the store/load away + if got := math.Float32bits(gFloat32); got != snan { + t.Errorf("store/load of sNaN not faithful, got %x want %x", got, snan) + } + // Test reflect's conversion between float32s. See issue 36400. + type myFloat32 float32 + x := V(myFloat32(math.Float32frombits(snan))) + y := x.Convert(TypeOf(float32(0))) + z := y.Interface().(float32) + if got := math.Float32bits(z); got != snan { + t.Errorf("signaling nan conversion got %x, want %x", got, snan) + } +} + +type ComparableStruct struct { + X int +} + +type NonComparableStruct struct { + X int + Y map[string]int +} + +var comparableTests = []struct { + typ Type + ok bool +}{ + {TypeOf(1), true}, + {TypeOf("hello"), true}, + {TypeOf(new(byte)), true}, + {TypeOf((func())(nil)), false}, + {TypeOf([]byte{}), false}, + {TypeOf(map[string]int{}), false}, + {TypeOf(make(chan int)), true}, + {TypeOf(1.5), true}, + {TypeOf(false), true}, + {TypeOf(1i), true}, + {TypeOf(ComparableStruct{}), true}, + {TypeOf(NonComparableStruct{}), false}, + {TypeOf([10]map[string]int{}), false}, + {TypeOf([10]string{}), true}, + {TypeOf(new(any)).Elem(), true}, +} + +func TestComparable(t *testing.T) { + for _, tt := range comparableTests { + if ok := tt.typ.Comparable(); ok != tt.ok { + t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok) + } + } +} + +func TestOverflow(t *testing.T) { + if ovf := V(float64(0)).OverflowFloat(1e300); ovf { + t.Errorf("%v wrongly overflows float64", 1e300) + } + + maxFloat32 := float64((1<<24 - 1) << (127 - 23)) + if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf { + t.Errorf("%v wrongly overflows float32", maxFloat32) + } + ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52)) + if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf { + t.Errorf("%v should overflow float32", ovfFloat32) + } + if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf { + t.Errorf("%v should overflow float32", -ovfFloat32) + } + + maxInt32 := int64(0x7fffffff) + if ovf := V(int32(0)).OverflowInt(maxInt32); ovf { + t.Errorf("%v wrongly overflows int32", maxInt32) + } + if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf { + t.Errorf("%v wrongly overflows int32", -int64(1)<<31) + } + ovfInt32 := int64(1 << 31) + if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf { + t.Errorf("%v should overflow int32", ovfInt32) + } + + maxUint32 := uint64(0xffffffff) + if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf { + t.Errorf("%v wrongly overflows uint32", maxUint32) + } + ovfUint32 := uint64(1 << 32) + if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf { + t.Errorf("%v should overflow uint32", ovfUint32) + } +} + +func checkSameType(t *testing.T, x Type, y any) { + if x != TypeOf(y) || TypeOf(Zero(x).Interface()) != TypeOf(y) { + t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y)) + } +} + +func TestArrayOf(t *testing.T) { + // check construction and use of type not in binary + tests := []struct { + n int + value func(i int) any + comparable bool + want string + }{ + { + n: 0, + value: func(i int) any { type Tint int; return Tint(i) }, + comparable: true, + want: "[]", + }, + { + n: 10, + value: func(i int) any { type Tint int; return Tint(i) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) any { type Tfloat float64; return Tfloat(i) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) any { type Tstring string; return Tstring(strconv.Itoa(i)) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) any { type Tstruct struct{ V int }; return Tstruct{i} }, + comparable: true, + want: "[{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}]", + }, + { + n: 10, + value: func(i int) any { type Tint int; return []Tint{Tint(i)} }, + comparable: false, + want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]", + }, + { + n: 10, + value: func(i int) any { type Tint int; return [1]Tint{Tint(i)} }, + comparable: true, + want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]", + }, + { + n: 10, + value: func(i int) any { type Tstruct struct{ V [1]int }; return Tstruct{[1]int{i}} }, + comparable: true, + want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]", + }, + { + n: 10, + value: func(i int) any { type Tstruct struct{ V []int }; return Tstruct{[]int{i}} }, + comparable: false, + want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]", + }, + { + n: 10, + value: func(i int) any { type TstructUV struct{ U, V int }; return TstructUV{i, i} }, + comparable: true, + want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]", + }, + { + n: 10, + value: func(i int) any { + type TstructUV struct { + U int + V float64 + } + return TstructUV{i, float64(i)} + }, + comparable: true, + want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]", + }, + } + + for _, table := range tests { + at := ArrayOf(table.n, TypeOf(table.value(0))) + v := New(at).Elem() + vok := New(at).Elem() + vnot := New(at).Elem() + for i := 0; i < v.Len(); i++ { + v.Index(i).Set(ValueOf(table.value(i))) + vok.Index(i).Set(ValueOf(table.value(i))) + j := i + if i+1 == v.Len() { + j = i + 1 + } + vnot.Index(i).Set(ValueOf(table.value(j))) // make it differ only by last element + } + s := fmt.Sprint(v.Interface()) + if s != table.want { + t.Errorf("constructed array = %s, want %s", s, table.want) + } + + if table.comparable != at.Comparable() { + t.Errorf("constructed array (%#v) is comparable=%v, want=%v", v.Interface(), at.Comparable(), table.comparable) + } + if table.comparable { + if table.n > 0 { + if DeepEqual(vnot.Interface(), v.Interface()) { + t.Errorf( + "arrays (%#v) compare ok (but should not)", + v.Interface(), + ) + } + } + if !DeepEqual(vok.Interface(), v.Interface()) { + t.Errorf( + "arrays (%#v) compare NOT-ok (but should)", + v.Interface(), + ) + } + } + } + + // check that type already in binary is found + type T int + checkSameType(t, ArrayOf(5, TypeOf(T(1))), [5]T{}) +} + +func TestArrayOfGC(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := New(ArrayOf(n, tt)).Elem() + for j := 0; j < v.Len(); j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.Index(j).Set(ValueOf(p).Convert(tt)) + } + x = append(x, v.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi) + for j := 0; j < v.Len(); j++ { + k := v.Index(j).Elem().Interface() + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestArrayOfAlg(t *testing.T) { + at := ArrayOf(6, TypeOf(byte(0))) + v1 := New(at).Elem() + v2 := New(at).Elem() + if v1.Interface() != v1.Interface() { + t.Errorf("constructed array %v not equal to itself", v1.Interface()) + } + v1.Index(5).Set(ValueOf(byte(1))) + if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 { + t.Errorf("constructed arrays %v and %v should not be equal", i1, i2) + } + + at = ArrayOf(6, TypeOf([]int(nil))) + v1 = New(at).Elem() + shouldPanic("", func() { _ = v1.Interface() == v1.Interface() }) +} + +func TestArrayOfGenericAlg(t *testing.T) { + at1 := ArrayOf(5, TypeOf(string(""))) + at := ArrayOf(6, at1) + v1 := New(at).Elem() + v2 := New(at).Elem() + if v1.Interface() != v1.Interface() { + t.Errorf("constructed array %v not equal to itself", v1.Interface()) + } + + v1.Index(0).Index(0).Set(ValueOf("abc")) + v2.Index(0).Index(0).Set(ValueOf("efg")) + if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 { + t.Errorf("constructed arrays %v and %v should not be equal", i1, i2) + } + + v1.Index(0).Index(0).Set(ValueOf("abc")) + v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3])) + if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 { + t.Errorf("constructed arrays %v and %v should be equal", i1, i2) + } + + // Test hash + m := MakeMap(MapOf(at, TypeOf(int(0)))) + m.SetMapIndex(v1, ValueOf(1)) + if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() { + t.Errorf("constructed arrays %v and %v have different hashes", i1, i2) + } +} + +func TestArrayOfDirectIface(t *testing.T) { + { + type T [1]*byte + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(ArrayOf(1, PointerTo(TypeOf(int8(0))))).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 != 0 { + t.Errorf("got p1=%v. want=%v", p1, nil) + } + + if p2 != 0 { + t.Errorf("got p2=%v. want=%v", p2, nil) + } + } + { + type T [0]*byte + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(ArrayOf(0, PointerTo(TypeOf(int8(0))))).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 == 0 { + t.Errorf("got p1=%v. want=not-%v", p1, nil) + } + + if p2 == 0 { + t.Errorf("got p2=%v. want=not-%v", p2, nil) + } + } +} + +// Ensure passing in negative lengths panics. +// See https://golang.org/issue/43603 +func TestArrayOfPanicOnNegativeLength(t *testing.T) { + shouldPanic("reflect: negative length passed to ArrayOf", func() { + ArrayOf(-1, TypeOf(byte(0))) + }) +} + +func TestSliceOf(t *testing.T) { + // check construction and use of type not in binary + type T int + st := SliceOf(TypeOf(T(1))) + if got, want := st.String(), "[]reflect_test.T"; got != want { + t.Errorf("SliceOf(T(1)).String()=%q, want %q", got, want) + } + v := MakeSlice(st, 10, 10) + runtime.GC() + for i := 0; i < v.Len(); i++ { + v.Index(i).Set(ValueOf(T(i))) + runtime.GC() + } + s := fmt.Sprint(v.Interface()) + want := "[0 1 2 3 4 5 6 7 8 9]" + if s != want { + t.Errorf("constructed slice = %s, want %s", s, want) + } + + // check that type already in binary is found + type T1 int + checkSameType(t, SliceOf(TypeOf(T1(1))), []T1{}) +} + +func TestSliceOverflow(t *testing.T) { + // check that MakeSlice panics when size of slice overflows uint + const S = 1e6 + s := uint(S) + l := (1<<(unsafe.Sizeof((*byte)(nil))*8)-1)/s + 1 + if l*s >= s { + t.Fatal("slice size does not overflow") + } + var x [S]byte + st := SliceOf(TypeOf(x)) + defer func() { + err := recover() + if err == nil { + t.Fatal("slice overflow does not panic") + } + }() + MakeSlice(st, int(l), int(l)) +} + +func TestSliceOfGC(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + st := SliceOf(tt) + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := MakeSlice(st, n, n) + for j := 0; j < v.Len(); j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.Index(j).Set(ValueOf(p).Convert(tt)) + } + x = append(x, v.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi) + for j := 0; j < v.Len(); j++ { + k := v.Index(j).Elem().Interface() + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestStructOfFieldName(t *testing.T) { + // invalid field name "1nvalid" + shouldPanic("has invalid name", func() { + StructOf([]StructField{ + {Name: "Valid", Type: TypeOf("")}, + {Name: "1nvalid", Type: TypeOf("")}, + }) + }) + + // invalid field name "+" + shouldPanic("has invalid name", func() { + StructOf([]StructField{ + {Name: "Val1d", Type: TypeOf("")}, + {Name: "+", Type: TypeOf("")}, + }) + }) + + // no field name + shouldPanic("has no name", func() { + StructOf([]StructField{ + {Name: "", Type: TypeOf("")}, + }) + }) + + // verify creation of a struct with valid struct fields + validFields := []StructField{ + { + Name: "φ", + Type: TypeOf(""), + }, + { + Name: "ValidName", + Type: TypeOf(""), + }, + { + Name: "Val1dNam5", + Type: TypeOf(""), + }, + } + + validStruct := StructOf(validFields) + + const structStr = `struct { φ string; ValidName string; Val1dNam5 string }` + if got, want := validStruct.String(), structStr; got != want { + t.Errorf("StructOf(validFields).String()=%q, want %q", got, want) + } +} + +func TestStructOf(t *testing.T) { + // check construction and use of type not in binary + fields := []StructField{ + { + Name: "S", + Tag: "s", + Type: TypeOf(""), + }, + { + Name: "X", + Tag: "x", + Type: TypeOf(byte(0)), + }, + { + Name: "Y", + Type: TypeOf(uint64(0)), + }, + { + Name: "Z", + Type: TypeOf([3]uint16{}), + }, + } + + st := StructOf(fields) + v := New(st).Elem() + runtime.GC() + v.FieldByName("X").Set(ValueOf(byte(2))) + v.FieldByIndex([]int{1}).Set(ValueOf(byte(1))) + runtime.GC() + + s := fmt.Sprint(v.Interface()) + want := `{ 1 0 [0 0 0]}` + if s != want { + t.Errorf("constructed struct = %s, want %s", s, want) + } + const stStr = `struct { S string "s"; X uint8 "x"; Y uint64; Z [3]uint16 }` + if got, want := st.String(), stStr; got != want { + t.Errorf("StructOf(fields).String()=%q, want %q", got, want) + } + + // check the size, alignment and field offsets + stt := TypeOf(struct { + String string + X byte + Y uint64 + Z [3]uint16 + }{}) + if st.Size() != stt.Size() { + t.Errorf("constructed struct size = %v, want %v", st.Size(), stt.Size()) + } + if st.Align() != stt.Align() { + t.Errorf("constructed struct align = %v, want %v", st.Align(), stt.Align()) + } + if st.FieldAlign() != stt.FieldAlign() { + t.Errorf("constructed struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign()) + } + for i := 0; i < st.NumField(); i++ { + o1 := st.Field(i).Offset + o2 := stt.Field(i).Offset + if o1 != o2 { + t.Errorf("constructed struct field %v offset = %v, want %v", i, o1, o2) + } + } + + // Check size and alignment with a trailing zero-sized field. + st = StructOf([]StructField{ + { + Name: "F1", + Type: TypeOf(byte(0)), + }, + { + Name: "F2", + Type: TypeOf([0]*byte{}), + }, + }) + stt = TypeOf(struct { + G1 byte + G2 [0]*byte + }{}) + if st.Size() != stt.Size() { + t.Errorf("constructed zero-padded struct size = %v, want %v", st.Size(), stt.Size()) + } + if st.Align() != stt.Align() { + t.Errorf("constructed zero-padded struct align = %v, want %v", st.Align(), stt.Align()) + } + if st.FieldAlign() != stt.FieldAlign() { + t.Errorf("constructed zero-padded struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign()) + } + for i := 0; i < st.NumField(); i++ { + o1 := st.Field(i).Offset + o2 := stt.Field(i).Offset + if o1 != o2 { + t.Errorf("constructed zero-padded struct field %v offset = %v, want %v", i, o1, o2) + } + } + + // check duplicate names + shouldPanic("duplicate field", func() { + StructOf([]StructField{ + {Name: "string", PkgPath: "p", Type: TypeOf("")}, + {Name: "string", PkgPath: "p", Type: TypeOf("")}, + }) + }) + shouldPanic("has no name", func() { + StructOf([]StructField{ + {Type: TypeOf("")}, + {Name: "string", PkgPath: "p", Type: TypeOf("")}, + }) + }) + shouldPanic("has no name", func() { + StructOf([]StructField{ + {Type: TypeOf("")}, + {Type: TypeOf("")}, + }) + }) + // check that type already in binary is found + checkSameType(t, StructOf(fields[2:3]), struct{ Y uint64 }{}) + + // gccgo used to fail this test. + type structFieldType any + checkSameType(t, + StructOf([]StructField{ + { + Name: "F", + Type: TypeOf((*structFieldType)(nil)).Elem(), + }, + }), + struct{ F structFieldType }{}) +} + +func TestStructOfExportRules(t *testing.T) { + type S1 struct{} + type s2 struct{} + type ΦType struct{} + type φType struct{} + + testPanic := func(i int, mustPanic bool, f func()) { + defer func() { + err := recover() + if err == nil && mustPanic { + t.Errorf("test-%d did not panic", i) + } + if err != nil && !mustPanic { + t.Errorf("test-%d panicked: %v\n", i, err) + } + }() + f() + } + + tests := []struct { + field StructField + mustPanic bool + exported bool + }{ + { + field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{})}, + exported: true, + }, + { + field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil))}, + exported: true, + }, + { + field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{})}, + mustPanic: true, + }, + { + field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil))}, + mustPanic: true, + }, + { + field: StructField{Name: "Name", Type: nil, PkgPath: ""}, + mustPanic: true, + }, + { + field: StructField{Name: "", Type: TypeOf(S1{}), PkgPath: ""}, + mustPanic: true, + }, + { + field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{}), PkgPath: "other/pkg"}, + mustPanic: true, + }, + { + field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"}, + mustPanic: true, + }, + { + field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{}), PkgPath: "other/pkg"}, + mustPanic: true, + }, + { + field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"}, + mustPanic: true, + }, + { + field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "S", Type: TypeOf(S1{})}, + exported: true, + }, + { + field: StructField{Name: "S", Type: TypeOf((*S1)(nil))}, + exported: true, + }, + { + field: StructField{Name: "S", Type: TypeOf(s2{})}, + exported: true, + }, + { + field: StructField{Name: "S", Type: TypeOf((*s2)(nil))}, + exported: true, + }, + { + field: StructField{Name: "s", Type: TypeOf(S1{})}, + mustPanic: true, + }, + { + field: StructField{Name: "s", Type: TypeOf((*S1)(nil))}, + mustPanic: true, + }, + { + field: StructField{Name: "s", Type: TypeOf(s2{})}, + mustPanic: true, + }, + { + field: StructField{Name: "s", Type: TypeOf((*s2)(nil))}, + mustPanic: true, + }, + { + field: StructField{Name: "s", Type: TypeOf(S1{}), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "s", Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "s", Type: TypeOf(s2{}), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "s", Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"}, + }, + { + field: StructField{Name: "", Type: TypeOf(ΦType{})}, + mustPanic: true, + }, + { + field: StructField{Name: "", Type: TypeOf(φType{})}, + mustPanic: true, + }, + { + field: StructField{Name: "Φ", Type: TypeOf(0)}, + exported: true, + }, + { + field: StructField{Name: "φ", Type: TypeOf(0)}, + exported: false, + }, + } + + for i, test := range tests { + testPanic(i, test.mustPanic, func() { + typ := StructOf([]StructField{test.field}) + if typ == nil { + t.Errorf("test-%d: error creating struct type", i) + return + } + field := typ.Field(0) + n := field.Name + if n == "" { + panic("field.Name must not be empty") + } + exported := token.IsExported(n) + if exported != test.exported { + t.Errorf("test-%d: got exported=%v want exported=%v", i, exported, test.exported) + } + if field.PkgPath != test.field.PkgPath { + t.Errorf("test-%d: got PkgPath=%q want pkgPath=%q", i, field.PkgPath, test.field.PkgPath) + } + }) + } +} + +func TestStructOfGC(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + fields := []StructField{ + {Name: "X", Type: tt}, + {Name: "Y", Type: tt}, + } + st := StructOf(fields) + + const n = 10000 + var x []any + for i := 0; i < n; i++ { + v := New(st).Elem() + for j := 0; j < v.NumField(); j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.Field(j).Set(ValueOf(p).Convert(tt)) + } + x = append(x, v.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi) + for j := 0; j < v.NumField(); j++ { + k := v.Field(j).Elem().Interface() + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d].%c = %d, want %d", i, "XY"[j], k, i*n+j) + } + } + } +} + +func TestStructOfAlg(t *testing.T) { + st := StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf(int(0))}}) + v1 := New(st).Elem() + v2 := New(st).Elem() + if !DeepEqual(v1.Interface(), v1.Interface()) { + t.Errorf("constructed struct %v not equal to itself", v1.Interface()) + } + v1.FieldByName("X").Set(ValueOf(int(1))) + if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) { + t.Errorf("constructed structs %v and %v should not be equal", i1, i2) + } + + st = StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf([]int(nil))}}) + v1 = New(st).Elem() + shouldPanic("", func() { _ = v1.Interface() == v1.Interface() }) +} + +func TestStructOfGenericAlg(t *testing.T) { + st1 := StructOf([]StructField{ + {Name: "X", Tag: "x", Type: TypeOf(int64(0))}, + {Name: "Y", Type: TypeOf(string(""))}, + }) + st := StructOf([]StructField{ + {Name: "S0", Type: st1}, + {Name: "S1", Type: st1}, + }) + + tests := []struct { + rt Type + idx []int + }{ + { + rt: st, + idx: []int{0, 1}, + }, + { + rt: st1, + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf([0]int{})}, + {Name: "YY", Type: TypeOf("")}, + }, + ), + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf([0]int{})}, + {Name: "YY", Type: TypeOf("")}, + {Name: "ZZ", Type: TypeOf([2]int{})}, + }, + ), + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf([1]int{})}, + {Name: "YY", Type: TypeOf("")}, + }, + ), + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf([1]int{})}, + {Name: "YY", Type: TypeOf("")}, + {Name: "ZZ", Type: TypeOf([1]int{})}, + }, + ), + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf([2]int{})}, + {Name: "YY", Type: TypeOf("")}, + {Name: "ZZ", Type: TypeOf([2]int{})}, + }, + ), + idx: []int{1}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf(int64(0))}, + {Name: "YY", Type: TypeOf(byte(0))}, + {Name: "ZZ", Type: TypeOf("")}, + }, + ), + idx: []int{2}, + }, + { + rt: StructOf( + []StructField{ + {Name: "XX", Type: TypeOf(int64(0))}, + {Name: "YY", Type: TypeOf(int64(0))}, + {Name: "ZZ", Type: TypeOf("")}, + {Name: "AA", Type: TypeOf([1]int64{})}, + }, + ), + idx: []int{2}, + }, + } + + for _, table := range tests { + v1 := New(table.rt).Elem() + v2 := New(table.rt).Elem() + + if !DeepEqual(v1.Interface(), v1.Interface()) { + t.Errorf("constructed struct %v not equal to itself", v1.Interface()) + } + + v1.FieldByIndex(table.idx).Set(ValueOf("abc")) + v2.FieldByIndex(table.idx).Set(ValueOf("def")) + if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) { + t.Errorf("constructed structs %v and %v should not be equal", i1, i2) + } + + abc := "abc" + v1.FieldByIndex(table.idx).Set(ValueOf(abc)) + val := "+" + abc + "-" + v2.FieldByIndex(table.idx).Set(ValueOf(val[1:4])) + if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) { + t.Errorf("constructed structs %v and %v should be equal", i1, i2) + } + + // Test hash + m := MakeMap(MapOf(table.rt, TypeOf(int(0)))) + m.SetMapIndex(v1, ValueOf(1)) + if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() { + t.Errorf("constructed structs %#v and %#v have different hashes", i1, i2) + } + + v2.FieldByIndex(table.idx).Set(ValueOf("abc")) + if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) { + t.Errorf("constructed structs %v and %v should be equal", i1, i2) + } + + if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() { + t.Errorf("constructed structs %v and %v have different hashes", i1, i2) + } + } +} + +func TestStructOfDirectIface(t *testing.T) { + { + type T struct{ X [1]*byte } + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(StructOf([]StructField{ + { + Name: "X", + Type: ArrayOf(1, TypeOf((*int8)(nil))), + }, + })).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 != 0 { + t.Errorf("got p1=%v. want=%v", p1, nil) + } + + if p2 != 0 { + t.Errorf("got p2=%v. want=%v", p2, nil) + } + } + { + type T struct{ X [0]*byte } + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(StructOf([]StructField{ + { + Name: "X", + Type: ArrayOf(0, TypeOf((*int8)(nil))), + }, + })).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 == 0 { + t.Errorf("got p1=%v. want=not-%v", p1, nil) + } + + if p2 == 0 { + t.Errorf("got p2=%v. want=not-%v", p2, nil) + } + } +} + +type StructI int + +func (i StructI) Get() int { return int(i) } + +type StructIPtr int + +func (i *StructIPtr) Get() int { return int(*i) } +func (i *StructIPtr) Set(v int) { *(*int)(i) = v } + +type SettableStruct struct { + SettableField int +} + +func (p *SettableStruct) Set(v int) { p.SettableField = v } + +type SettablePointer struct { + SettableField *int +} + +func (p *SettablePointer) Set(v int) { *p.SettableField = v } + +func TestStructOfWithInterface(t *testing.T) { + const want = 42 + type Iface interface { + Get() int + } + type IfaceSet interface { + Set(int) + } + tests := []struct { + name string + typ Type + val Value + impl bool + }{ + { + name: "StructI", + typ: TypeOf(StructI(want)), + val: ValueOf(StructI(want)), + impl: true, + }, + { + name: "StructI", + typ: PointerTo(TypeOf(StructI(want))), + val: ValueOf(func() any { + v := StructI(want) + return &v + }()), + impl: true, + }, + { + name: "StructIPtr", + typ: PointerTo(TypeOf(StructIPtr(want))), + val: ValueOf(func() any { + v := StructIPtr(want) + return &v + }()), + impl: true, + }, + { + name: "StructIPtr", + typ: TypeOf(StructIPtr(want)), + val: ValueOf(StructIPtr(want)), + impl: false, + }, + // { + // typ: TypeOf((*Iface)(nil)).Elem(), // FIXME(sbinet): fix method.ifn/tfn + // val: ValueOf(StructI(want)), + // impl: true, + // }, + } + + for i, table := range tests { + for j := 0; j < 2; j++ { + var fields []StructField + if j == 1 { + fields = append(fields, StructField{ + Name: "Dummy", + PkgPath: "", + Type: TypeOf(int(0)), + }) + } + fields = append(fields, StructField{ + Name: table.name, + Anonymous: true, + PkgPath: "", + Type: table.typ, + }) + + // We currently do not correctly implement methods + // for embedded fields other than the first. + // Therefore, for now, we expect those methods + // to not exist. See issues 15924 and 20824. + // When those issues are fixed, this test of panic + // should be removed. + if j == 1 && table.impl { + func() { + defer func() { + if err := recover(); err == nil { + t.Errorf("test-%d-%d did not panic", i, j) + } + }() + _ = StructOf(fields) + }() + continue + } + + rt := StructOf(fields) + rv := New(rt).Elem() + rv.Field(j).Set(table.val) + + if _, ok := rv.Interface().(Iface); ok != table.impl { + if table.impl { + t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ) + } else { + t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ) + } + continue + } + + if !table.impl { + continue + } + + v := rv.Interface().(Iface).Get() + if v != want { + t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want) + } + + fct := rv.MethodByName("Get") + out := fct.Call(nil) + if !DeepEqual(out[0].Interface(), want) { + t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want) + } + } + } + + // Test an embedded nil pointer with pointer methods. + fields := []StructField{{ + Name: "StructIPtr", + Anonymous: true, + Type: PointerTo(TypeOf(StructIPtr(want))), + }} + rt := StructOf(fields) + rv := New(rt).Elem() + // This should panic since the pointer is nil. + shouldPanic("", func() { + rv.Interface().(IfaceSet).Set(want) + }) + + // Test an embedded nil pointer to a struct with pointer methods. + + fields = []StructField{{ + Name: "SettableStruct", + Anonymous: true, + Type: PointerTo(TypeOf(SettableStruct{})), + }} + rt = StructOf(fields) + rv = New(rt).Elem() + // This should panic since the pointer is nil. + shouldPanic("", func() { + rv.Interface().(IfaceSet).Set(want) + }) + + // The behavior is different if there is a second field, + // since now an interface value holds a pointer to the struct + // rather than just holding a copy of the struct. + fields = []StructField{ + { + Name: "SettableStruct", + Anonymous: true, + Type: PointerTo(TypeOf(SettableStruct{})), + }, + { + Name: "EmptyStruct", + Anonymous: true, + Type: StructOf(nil), + }, + } + // With the current implementation this is expected to panic. + // Ideally it should work and we should be able to see a panic + // if we call the Set method. + shouldPanic("", func() { + StructOf(fields) + }) + + // Embed a field that can be stored directly in an interface, + // with a second field. + fields = []StructField{ + { + Name: "SettablePointer", + Anonymous: true, + Type: TypeOf(SettablePointer{}), + }, + { + Name: "EmptyStruct", + Anonymous: true, + Type: StructOf(nil), + }, + } + // With the current implementation this is expected to panic. + // Ideally it should work and we should be able to call the + // Set and Get methods. + shouldPanic("", func() { + StructOf(fields) + }) +} + +func TestStructOfTooManyFields(t *testing.T) { + // Bug Fix: #25402 - this should not panic + tt := StructOf([]StructField{ + {Name: "Time", Type: TypeOf(time.Time{}), Anonymous: true}, + }) + + if _, present := tt.MethodByName("After"); !present { + t.Errorf("Expected method `After` to be found") + } +} + +func TestStructOfDifferentPkgPath(t *testing.T) { + fields := []StructField{ + { + Name: "f1", + PkgPath: "p1", + Type: TypeOf(int(0)), + }, + { + Name: "f2", + PkgPath: "p2", + Type: TypeOf(int(0)), + }, + } + shouldPanic("different PkgPath", func() { + StructOf(fields) + }) +} + +func TestStructOfTooLarge(t *testing.T) { + t1 := TypeOf(byte(0)) + t2 := TypeOf(int16(0)) + t4 := TypeOf(int32(0)) + t0 := ArrayOf(0, t1) + + // 2^64-3 sized type (or 2^32-3 on 32-bit archs) + bigType := StructOf([]StructField{ + {Name: "F1", Type: ArrayOf(int(^uintptr(0)>>1), t1)}, + {Name: "F2", Type: ArrayOf(int(^uintptr(0)>>1-1), t1)}, + }) + + type test struct { + shouldPanic bool + fields []StructField + } + + tests := [...]test{ + { + shouldPanic: false, // 2^64-1, ok + fields: []StructField{ + {Name: "F1", Type: bigType}, + {Name: "F2", Type: ArrayOf(2, t1)}, + }, + }, + { + shouldPanic: true, // overflow in total size + fields: []StructField{ + {Name: "F1", Type: bigType}, + {Name: "F2", Type: ArrayOf(3, t1)}, + }, + }, + { + shouldPanic: true, // overflow while aligning F2 + fields: []StructField{ + {Name: "F1", Type: bigType}, + {Name: "F2", Type: t4}, + }, + }, + { + shouldPanic: true, // overflow while adding trailing byte for zero-sized fields + fields: []StructField{ + {Name: "F1", Type: bigType}, + {Name: "F2", Type: ArrayOf(2, t1)}, + {Name: "F3", Type: t0}, + }, + }, + { + shouldPanic: true, // overflow while aligning total size + fields: []StructField{ + {Name: "F1", Type: t2}, + {Name: "F2", Type: bigType}, + }, + }, + } + + for i, tt := range tests { + func() { + defer func() { + err := recover() + if !tt.shouldPanic { + if err != nil { + t.Errorf("test %d should not panic, got %s", i, err) + } + return + } + if err == nil { + t.Errorf("test %d expected to panic", i) + return + } + s := fmt.Sprintf("%s", err) + if s != "reflect.StructOf: struct size would exceed virtual address space" { + t.Errorf("test %d wrong panic message: %s", i, s) + return + } + }() + _ = StructOf(tt.fields) + }() + } +} + +func TestChanOf(t *testing.T) { + // check construction and use of type not in binary + type T string + ct := ChanOf(BothDir, TypeOf(T(""))) + v := MakeChan(ct, 2) + runtime.GC() + v.Send(ValueOf(T("hello"))) + runtime.GC() + v.Send(ValueOf(T("world"))) + runtime.GC() + + sv1, _ := v.Recv() + sv2, _ := v.Recv() + s1 := sv1.String() + s2 := sv2.String() + if s1 != "hello" || s2 != "world" { + t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world") + } + + // check that type already in binary is found + type T1 int + checkSameType(t, ChanOf(BothDir, TypeOf(T1(1))), (chan T1)(nil)) + + // Check arrow token association in undefined chan types. + var left chan<- chan T + var right chan (<-chan T) + tLeft := ChanOf(SendDir, ChanOf(BothDir, TypeOf(T("")))) + tRight := ChanOf(BothDir, ChanOf(RecvDir, TypeOf(T("")))) + if tLeft != TypeOf(left) { + t.Errorf("chan<-chan: have %s, want %T", tLeft, left) + } + if tRight != TypeOf(right) { + t.Errorf("chan<-chan: have %s, want %T", tRight, right) + } +} + +func TestChanOfDir(t *testing.T) { + // check construction and use of type not in binary + type T string + crt := ChanOf(RecvDir, TypeOf(T(""))) + cst := ChanOf(SendDir, TypeOf(T(""))) + + // check that type already in binary is found + type T1 int + checkSameType(t, ChanOf(RecvDir, TypeOf(T1(1))), (<-chan T1)(nil)) + checkSameType(t, ChanOf(SendDir, TypeOf(T1(1))), (chan<- T1)(nil)) + + // check String form of ChanDir + if crt.ChanDir().String() != "<-chan" { + t.Errorf("chan dir: have %q, want %q", crt.ChanDir().String(), "<-chan") + } + if cst.ChanDir().String() != "chan<-" { + t.Errorf("chan dir: have %q, want %q", cst.ChanDir().String(), "chan<-") + } +} + +func TestChanOfGC(t *testing.T) { + done := make(chan bool, 1) + go func() { + select { + case <-done: + case <-time.After(5 * time.Second): + panic("deadlock in TestChanOfGC") + } + }() + + defer func() { + done <- true + }() + + type T *uintptr + tt := TypeOf(T(nil)) + ct := ChanOf(BothDir, tt) + + // NOTE: The garbage collector handles allocated channels specially, + // so we have to save pointers to channels in x; the pointer code will + // use the gc info in the newly constructed chan type. + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := MakeChan(ct, n) + for j := 0; j < n; j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.Send(ValueOf(p).Convert(tt)) + } + pv := New(ct) + pv.Elem().Set(v) + x = append(x, pv.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi).Elem() + for j := 0; j < n; j++ { + pv, _ := v.Recv() + k := pv.Elem().Interface() + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestMapOf(t *testing.T) { + // check construction and use of type not in binary + type K string + type V float64 + + v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0)))) + runtime.GC() + v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1))) + runtime.GC() + + s := fmt.Sprint(v.Interface()) + want := "map[a:1]" + if s != want { + t.Errorf("constructed map = %s, want %s", s, want) + } + + // check that type already in binary is found + checkSameType(t, MapOf(TypeOf(V(0)), TypeOf(K(""))), map[V]K(nil)) + + // check that invalid key type panics + shouldPanic("invalid key type", func() { MapOf(TypeOf((func())(nil)), TypeOf(false)) }) +} + +func TestMapOfGCKeys(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + mt := MapOf(tt, TypeOf(false)) + + // NOTE: The garbage collector handles allocated maps specially, + // so we have to save pointers to maps in x; the pointer code will + // use the gc info in the newly constructed map type. + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := MakeMap(mt) + for j := 0; j < n; j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.SetMapIndex(ValueOf(p).Convert(tt), ValueOf(true)) + } + pv := New(mt) + pv.Elem().Set(v) + x = append(x, pv.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi).Elem() + var out []int + for _, kv := range v.MapKeys() { + out = append(out, int(kv.Elem().Interface().(uintptr))) + } + sort.Ints(out) + for j, k := range out { + if k != i*n+j { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestMapOfGCValues(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + mt := MapOf(TypeOf(1), tt) + + // NOTE: The garbage collector handles allocated maps specially, + // so we have to save pointers to maps in x; the pointer code will + // use the gc info in the newly constructed map type. + const n = 100 + var x []any + for i := 0; i < n; i++ { + v := MakeMap(mt) + for j := 0; j < n; j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.SetMapIndex(ValueOf(j), ValueOf(p).Convert(tt)) + } + pv := New(mt) + pv.Elem().Set(v) + x = append(x, pv.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi).Elem() + for j := 0; j < n; j++ { + k := v.MapIndex(ValueOf(j)).Elem().Interface().(uintptr) + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestTypelinksSorted(t *testing.T) { + var last string + for i, n := range TypeLinks() { + if n < last { + t.Errorf("typelinks not sorted: %q [%d] > %q [%d]", last, i-1, n, i) + } + last = n + } +} + +func TestFuncOf(t *testing.T) { + // check construction and use of type not in binary + type K string + type V float64 + + fn := func(args []Value) []Value { + if len(args) != 1 { + t.Errorf("args == %v, want exactly one arg", args) + } else if args[0].Type() != TypeOf(K("")) { + t.Errorf("args[0] is type %v, want %v", args[0].Type(), TypeOf(K(""))) + } else if args[0].String() != "gopher" { + t.Errorf("args[0] = %q, want %q", args[0].String(), "gopher") + } + return []Value{ValueOf(V(3.14))} + } + v := MakeFunc(FuncOf([]Type{TypeOf(K(""))}, []Type{TypeOf(V(0))}, false), fn) + + outs := v.Call([]Value{ValueOf(K("gopher"))}) + if len(outs) != 1 { + t.Fatalf("v.Call returned %v, want exactly one result", outs) + } else if outs[0].Type() != TypeOf(V(0)) { + t.Fatalf("c.Call[0] is type %v, want %v", outs[0].Type(), TypeOf(V(0))) + } + f := outs[0].Float() + if f != 3.14 { + t.Errorf("constructed func returned %f, want %f", f, 3.14) + } + + // check that types already in binary are found + type T1 int + testCases := []struct { + in, out []Type + variadic bool + want any + }{ + {in: []Type{TypeOf(T1(0))}, want: (func(T1))(nil)}, + {in: []Type{TypeOf(int(0))}, want: (func(int))(nil)}, + {in: []Type{SliceOf(TypeOf(int(0)))}, variadic: true, want: (func(...int))(nil)}, + {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false)}, want: (func(int) bool)(nil)}, + {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false), TypeOf("")}, want: (func(int) (bool, string))(nil)}, + } + for _, tt := range testCases { + checkSameType(t, FuncOf(tt.in, tt.out, tt.variadic), tt.want) + } + + // check that variadic requires last element be a slice. + FuncOf([]Type{TypeOf(1), TypeOf(""), SliceOf(TypeOf(false))}, nil, true) + shouldPanic("must be slice", func() { FuncOf([]Type{TypeOf(0), TypeOf(""), TypeOf(false)}, nil, true) }) + shouldPanic("must be slice", func() { FuncOf(nil, nil, true) }) + + //testcase for #54669 + var in []Type + for i := 0; i < 51; i++ { + in = append(in, TypeOf(1)) + } + FuncOf(in, nil, false) +} + +type R0 struct { + *R1 + *R2 + *R3 + *R4 +} + +type R1 struct { + *R5 + *R6 + *R7 + *R8 +} + +type R2 R1 +type R3 R1 +type R4 R1 + +type R5 struct { + *R9 + *R10 + *R11 + *R12 +} + +type R6 R5 +type R7 R5 +type R8 R5 + +type R9 struct { + *R13 + *R14 + *R15 + *R16 +} + +type R10 R9 +type R11 R9 +type R12 R9 + +type R13 struct { + *R17 + *R18 + *R19 + *R20 +} + +type R14 R13 +type R15 R13 +type R16 R13 + +type R17 struct { + *R21 + *R22 + *R23 + *R24 +} + +type R18 R17 +type R19 R17 +type R20 R17 + +type R21 struct { + X int +} + +type R22 R21 +type R23 R21 +type R24 R21 + +func TestEmbed(t *testing.T) { + typ := TypeOf(R0{}) + f, ok := typ.FieldByName("X") + if ok { + t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index) + } +} + +func TestAllocsInterfaceBig(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + v := ValueOf(S{}) + if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 { + t.Error("allocs:", allocs) + } +} + +func TestAllocsInterfaceSmall(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + v := ValueOf(int64(0)) + if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 { + t.Error("allocs:", allocs) + } +} + +// An exhaustive is a mechanism for writing exhaustive or stochastic tests. +// The basic usage is: +// +// for x.Next() { +// ... code using x.Maybe() or x.Choice(n) to create test cases ... +// } +// +// Each iteration of the loop returns a different set of results, until all +// possible result sets have been explored. It is okay for different code paths +// to make different method call sequences on x, but there must be no +// other source of non-determinism in the call sequences. +// +// When faced with a new decision, x chooses randomly. Future explorations +// of that path will choose successive values for the result. Thus, stopping +// the loop after a fixed number of iterations gives somewhat stochastic +// testing. +// +// Example: +// +// for x.Next() { +// v := make([]bool, x.Choose(4)) +// for i := range v { +// v[i] = x.Maybe() +// } +// fmt.Println(v) +// } +// +// prints (in some order): +// +// [] +// [false] +// [true] +// [false false] +// [false true] +// ... +// [true true] +// [false false false] +// ... +// [true true true] +// [false false false false] +// ... +// [true true true true] +type exhaustive struct { + r *rand.Rand + pos int + last []choice +} + +type choice struct { + off int + n int + max int +} + +func (x *exhaustive) Next() bool { + if x.r == nil { + x.r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + x.pos = 0 + if x.last == nil { + x.last = []choice{} + return true + } + for i := len(x.last) - 1; i >= 0; i-- { + c := &x.last[i] + if c.n+1 < c.max { + c.n++ + x.last = x.last[:i+1] + return true + } + } + return false +} + +func (x *exhaustive) Choose(max int) int { + if x.pos >= len(x.last) { + x.last = append(x.last, choice{x.r.Intn(max), 0, max}) + } + c := &x.last[x.pos] + x.pos++ + if c.max != max { + panic("inconsistent use of exhaustive tester") + } + return (c.n + c.off) % max +} + +func (x *exhaustive) Maybe() bool { + return x.Choose(2) == 1 +} + +func GCFunc(args []Value) []Value { + runtime.GC() + return []Value{} +} + +func TestReflectFuncTraceback(t *testing.T) { + f := MakeFunc(TypeOf(func() {}), GCFunc) + f.Call([]Value{}) +} + +func TestReflectMethodTraceback(t *testing.T) { + p := Point{3, 4} + m := ValueOf(p).MethodByName("GCMethod") + i := ValueOf(m.Interface()).Call([]Value{ValueOf(5)})[0].Int() + if i != 8 { + t.Errorf("Call returned %d; want 8", i) + } +} + +func TestSmallZero(t *testing.T) { + type T [10]byte + typ := TypeOf(T{}) + if allocs := testing.AllocsPerRun(100, func() { Zero(typ) }); allocs > 0 { + t.Errorf("Creating small zero values caused %f allocs, want 0", allocs) + } +} + +func TestBigZero(t *testing.T) { + const size = 1 << 10 + var v [size]byte + z := Zero(ValueOf(v).Type()).Interface().([size]byte) + for i := 0; i < size; i++ { + if z[i] != 0 { + t.Fatalf("Zero object not all zero, index %d", i) + } + } +} + +func TestZeroSet(t *testing.T) { + type T [16]byte + type S struct { + a uint64 + T T + b uint64 + } + v := S{ + a: 0xaaaaaaaaaaaaaaaa, + T: T{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + b: 0xbbbbbbbbbbbbbbbb, + } + ValueOf(&v).Elem().Field(1).Set(Zero(TypeOf(T{}))) + if v != (S{ + a: 0xaaaaaaaaaaaaaaaa, + b: 0xbbbbbbbbbbbbbbbb, + }) { + t.Fatalf("Setting a field to a Zero value didn't work") + } +} + +func TestFieldByIndexNil(t *testing.T) { + type P struct { + F int + } + type T struct { + *P + } + v := ValueOf(T{}) + + v.FieldByName("P") // should be fine + + defer func() { + if err := recover(); err == nil { + t.Fatalf("no error") + } else if !strings.Contains(fmt.Sprint(err), "nil pointer to embedded struct") { + t.Fatalf(`err=%q, wanted error containing "nil pointer to embedded struct"`, err) + } + }() + v.FieldByName("F") // should panic + + t.Fatalf("did not panic") +} + +// Given +// type Outer struct { +// *Inner +// ... +// } +// the compiler generates the implementation of (*Outer).M dispatching to the embedded Inner. +// The implementation is logically: +// func (p *Outer) M() { +// (p.Inner).M() +// } +// but since the only change here is the replacement of one pointer receiver with another, +// the actual generated code overwrites the original receiver with the p.Inner pointer and +// then jumps to the M method expecting the *Inner receiver. +// +// During reflect.Value.Call, we create an argument frame and the associated data structures +// to describe it to the garbage collector, populate the frame, call reflect.call to +// run a function call using that frame, and then copy the results back out of the frame. +// The reflect.call function does a memmove of the frame structure onto the +// stack (to set up the inputs), runs the call, and the memmoves the stack back to +// the frame structure (to preserve the outputs). +// +// Originally reflect.call did not distinguish inputs from outputs: both memmoves +// were for the full stack frame. However, in the case where the called function was +// one of these wrappers, the rewritten receiver is almost certainly a different type +// than the original receiver. This is not a problem on the stack, where we use the +// program counter to determine the type information and understand that +// during (*Outer).M the receiver is an *Outer while during (*Inner).M the receiver in the same +// memory word is now an *Inner. But in the statically typed argument frame created +// by reflect, the receiver is always an *Outer. Copying the modified receiver pointer +// off the stack into the frame will store an *Inner there, and then if a garbage collection +// happens to scan that argument frame before it is discarded, it will scan the *Inner +// memory as if it were an *Outer. If the two have different memory layouts, the +// collection will interpret the memory incorrectly. +// +// One such possible incorrect interpretation is to treat two arbitrary memory words +// (Inner.P1 and Inner.P2 below) as an interface (Outer.R below). Because interpreting +// an interface requires dereferencing the itab word, the misinterpretation will try to +// deference Inner.P1, causing a crash during garbage collection. +// +// This came up in a real program in issue 7725. + +type Outer struct { + *Inner + R io.Reader +} + +type Inner struct { + X *Outer + P1 uintptr + P2 uintptr +} + +func (pi *Inner) M() { + // Clear references to pi so that the only way the + // garbage collection will find the pointer is in the + // argument frame, typed as a *Outer. + pi.X.Inner = nil + + // Set up an interface value that will cause a crash. + // P1 = 1 is a non-zero, so the interface looks non-nil. + // P2 = pi ensures that the data word points into the + // allocated heap; if not the collection skips the interface + // value as irrelevant, without dereferencing P1. + pi.P1 = 1 + pi.P2 = uintptr(unsafe.Pointer(pi)) +} + +func TestCallMethodJump(t *testing.T) { + // In reflect.Value.Call, trigger a garbage collection after reflect.call + // returns but before the args frame has been discarded. + // This is a little clumsy but makes the failure repeatable. + *CallGC = true + + p := &Outer{Inner: new(Inner)} + p.Inner.X = p + ValueOf(p).Method(0).Call(nil) + + // Stop garbage collecting during reflect.call. + *CallGC = false +} + +func TestCallArgLive(t *testing.T) { + type T struct{ X, Y *string } // pointerful aggregate + + F := func(t T) { *t.X = "ok" } + + // In reflect.Value.Call, trigger a garbage collection in reflect.call + // between marshaling argument and the actual call. + *CallGC = true + + x := new(string) + runtime.SetFinalizer(x, func(p *string) { + if *p != "ok" { + t.Errorf("x dead prematurely") + } + }) + v := T{x, nil} + + ValueOf(F).Call([]Value{ValueOf(v)}) + + // Stop garbage collecting during reflect.call. + *CallGC = false +} + +func TestMakeFuncStackCopy(t *testing.T) { + target := func(in []Value) []Value { + runtime.GC() + useStack(16) + return []Value{ValueOf(9)} + } + + var concrete func(*int, int) int + fn := MakeFunc(ValueOf(concrete).Type(), target) + ValueOf(&concrete).Elem().Set(fn) + x := concrete(nil, 7) + if x != 9 { + t.Errorf("have %#q want 9", x) + } +} + +// use about n KB of stack +func useStack(n int) { + if n == 0 { + return + } + var b [1024]byte // makes frame about 1KB + useStack(n - 1 + int(b[99])) +} + +type Impl struct{} + +func (Impl) F() {} + +func TestValueString(t *testing.T) { + rv := ValueOf(Impl{}) + if rv.String() != "" { + t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "") + } + + method := rv.Method(0) + if method.String() != "" { + t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "") + } +} + +func TestInvalid(t *testing.T) { + // Used to have inconsistency between IsValid() and Kind() != Invalid. + type T struct{ v any } + + v := ValueOf(T{}).Field(0) + if v.IsValid() != true || v.Kind() != Interface { + t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind()) + } + v = v.Elem() + if v.IsValid() != false || v.Kind() != Invalid { + t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind()) + } +} + +// Issue 8917. +func TestLargeGCProg(t *testing.T) { + fv := ValueOf(func([256]*byte) {}) + fv.Call([]Value{ValueOf([256]*byte{})}) +} + +func fieldIndexRecover(t Type, i int) (recovered any) { + defer func() { + recovered = recover() + }() + + t.Field(i) + return +} + +// Issue 15046. +func TestTypeFieldOutOfRangePanic(t *testing.T) { + typ := TypeOf(struct{ X int }{10}) + testIndices := [...]struct { + i int + mustPanic bool + }{ + 0: {-2, true}, + 1: {0, false}, + 2: {1, true}, + 3: {1 << 10, true}, + } + for i, tt := range testIndices { + recoveredErr := fieldIndexRecover(typ, tt.i) + if tt.mustPanic { + if recoveredErr == nil { + t.Errorf("#%d: fieldIndex %d expected to panic", i, tt.i) + } + } else { + if recoveredErr != nil { + t.Errorf("#%d: got err=%v, expected no panic", i, recoveredErr) + } + } + } +} + +// Issue 9179. +func TestCallGC(t *testing.T) { + f := func(a, b, c, d, e string) { + } + g := func(in []Value) []Value { + runtime.GC() + return nil + } + typ := ValueOf(f).Type() + f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string)) + f2("four", "five5", "six666", "seven77", "eight888") +} + +// Issue 18635 (function version). +func TestKeepFuncLive(t *testing.T) { + // Test that we keep makeFuncImpl live as long as it is + // referenced on the stack. + typ := TypeOf(func(i int) {}) + var f, g func(in []Value) []Value + f = func(in []Value) []Value { + clobber() + i := int(in[0].Int()) + if i > 0 { + // We can't use Value.Call here because + // runtime.call* will keep the makeFuncImpl + // alive. However, by converting it to an + // interface value and calling that, + // reflect.callReflect is the only thing that + // can keep the makeFuncImpl live. + // + // Alternate between f and g so that if we do + // reuse the memory prematurely it's more + // likely to get obviously corrupted. + MakeFunc(typ, g).Interface().(func(i int))(i - 1) + } + return nil + } + g = func(in []Value) []Value { + clobber() + i := int(in[0].Int()) + MakeFunc(typ, f).Interface().(func(i int))(i) + return nil + } + MakeFunc(typ, f).Call([]Value{ValueOf(10)}) +} + +type UnExportedFirst int + +func (i UnExportedFirst) ΦExported() {} +func (i UnExportedFirst) unexported() {} + +// Issue 21177 +func TestMethodByNameUnExportedFirst(t *testing.T) { + defer func() { + if recover() != nil { + t.Errorf("should not panic") + } + }() + typ := TypeOf(UnExportedFirst(0)) + m, _ := typ.MethodByName("ΦExported") + if m.Name != "ΦExported" { + t.Errorf("got %s, expected ΦExported", m.Name) + } +} + +// Issue 18635 (method version). +type KeepMethodLive struct{} + +func (k KeepMethodLive) Method1(i int) { + clobber() + if i > 0 { + ValueOf(k).MethodByName("Method2").Interface().(func(i int))(i - 1) + } +} + +func (k KeepMethodLive) Method2(i int) { + clobber() + ValueOf(k).MethodByName("Method1").Interface().(func(i int))(i) +} + +func TestKeepMethodLive(t *testing.T) { + // Test that we keep methodValue live as long as it is + // referenced on the stack. + KeepMethodLive{}.Method1(10) +} + +// clobber tries to clobber unreachable memory. +func clobber() { + runtime.GC() + for i := 1; i < 32; i++ { + for j := 0; j < 10; j++ { + obj := make([]*byte, i) + sink = obj + } + } + runtime.GC() +} + +func TestFuncLayout(t *testing.T) { + align := func(x uintptr) uintptr { + return (x + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) + } + var r []byte + if goarch.PtrSize == 4 { + r = []byte{0, 0, 0, 1} + } else { + r = []byte{0, 0, 1} + } + + type S struct { + a, b uintptr + c, d *byte + } + + type test struct { + rcvr, typ Type + size, argsize, retOffset uintptr + stack, gc, inRegs, outRegs []byte // pointer bitmap: 1 is pointer, 0 is scalar + intRegs, floatRegs int + floatRegSize uintptr + } + tests := []test{ + { + typ: ValueOf(func(a, b string) string { return "" }).Type(), + size: 6 * goarch.PtrSize, + argsize: 4 * goarch.PtrSize, + retOffset: 4 * goarch.PtrSize, + stack: []byte{1, 0, 1, 0, 1}, + gc: []byte{1, 0, 1, 0, 1}, + }, + { + typ: ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(), + size: align(align(3*4) + goarch.PtrSize + 2), + argsize: align(3*4) + goarch.PtrSize + 2, + retOffset: align(align(3*4) + goarch.PtrSize + 2), + stack: r, + gc: r, + }, + { + typ: ValueOf(func(a map[int]int, b uintptr, c any) {}).Type(), + size: 4 * goarch.PtrSize, + argsize: 4 * goarch.PtrSize, + retOffset: 4 * goarch.PtrSize, + stack: []byte{1, 0, 1, 1}, + gc: []byte{1, 0, 1, 1}, + }, + { + typ: ValueOf(func(a S) {}).Type(), + size: 4 * goarch.PtrSize, + argsize: 4 * goarch.PtrSize, + retOffset: 4 * goarch.PtrSize, + stack: []byte{0, 0, 1, 1}, + gc: []byte{0, 0, 1, 1}, + }, + { + rcvr: ValueOf((*byte)(nil)).Type(), + typ: ValueOf(func(a uintptr, b *int) {}).Type(), + size: 3 * goarch.PtrSize, + argsize: 3 * goarch.PtrSize, + retOffset: 3 * goarch.PtrSize, + stack: []byte{1, 0, 1}, + gc: []byte{1, 0, 1}, + }, + { + typ: ValueOf(func(a uintptr) {}).Type(), + size: goarch.PtrSize, + argsize: goarch.PtrSize, + retOffset: goarch.PtrSize, + stack: []byte{}, + gc: []byte{}, + }, + { + typ: ValueOf(func() uintptr { return 0 }).Type(), + size: goarch.PtrSize, + argsize: 0, + retOffset: 0, + stack: []byte{}, + gc: []byte{}, + }, + { + rcvr: ValueOf(uintptr(0)).Type(), + typ: ValueOf(func(a uintptr) {}).Type(), + size: 2 * goarch.PtrSize, + argsize: 2 * goarch.PtrSize, + retOffset: 2 * goarch.PtrSize, + stack: []byte{1}, + gc: []byte{1}, + // Note: this one is tricky, as the receiver is not a pointer. But we + // pass the receiver by reference to the autogenerated pointer-receiver + // version of the function. + }, + // TODO(mknyszek): Add tests for non-zero register count. + } + for _, lt := range tests { + name := lt.typ.String() + if lt.rcvr != nil { + name = lt.rcvr.String() + "." + name + } + t.Run(name, func(t *testing.T) { + defer SetArgRegs(SetArgRegs(lt.intRegs, lt.floatRegs, lt.floatRegSize)) + + typ, argsize, retOffset, stack, gc, inRegs, outRegs, ptrs := FuncLayout(lt.typ, lt.rcvr) + if typ.Size() != lt.size { + t.Errorf("funcLayout(%v, %v).size=%d, want %d", lt.typ, lt.rcvr, typ.Size(), lt.size) + } + if argsize != lt.argsize { + t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.typ, lt.rcvr, argsize, lt.argsize) + } + if retOffset != lt.retOffset { + t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.typ, lt.rcvr, retOffset, lt.retOffset) + } + if !bytes.Equal(stack, lt.stack) { + t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.typ, lt.rcvr, stack, lt.stack) + } + if !bytes.Equal(gc, lt.gc) { + t.Errorf("funcLayout(%v, %v).gc=%v, want %v", lt.typ, lt.rcvr, gc, lt.gc) + } + if !bytes.Equal(inRegs, lt.inRegs) { + t.Errorf("funcLayout(%v, %v).inRegs=%v, want %v", lt.typ, lt.rcvr, inRegs, lt.inRegs) + } + if !bytes.Equal(outRegs, lt.outRegs) { + t.Errorf("funcLayout(%v, %v).outRegs=%v, want %v", lt.typ, lt.rcvr, outRegs, lt.outRegs) + } + if ptrs && len(stack) == 0 || !ptrs && len(stack) > 0 { + t.Errorf("funcLayout(%v, %v) pointers flag=%v, want %v", lt.typ, lt.rcvr, ptrs, !ptrs) + } + }) + } +} + +// trimBitmap removes trailing 0 elements from b and returns the result. +func trimBitmap(b []byte) []byte { + for len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + return b +} + +func verifyGCBits(t *testing.T, typ Type, bits []byte) { + heapBits := GCBits(New(typ).Interface()) + + // Trim scalars at the end, as bits might end in zero, + // e.g. with rep(2, lit(1, 0)). + bits = trimBitmap(bits) + + if bytes.HasPrefix(heapBits, bits) { + // Just the prefix matching is OK. + // + // The Go runtime's pointer/scalar iterator generates pointers beyond + // the size of the type, up to the size of the size class. This space + // is safe for the GC to scan since it's zero, and GCBits checks to + // make sure that's true. But we need to handle the fact that the bitmap + // may be larger than we expect. + return + } + _, _, line, _ := runtime.Caller(1) + t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits) +} + +func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) { + // Creating a slice causes the runtime to repeat a bitmap, + // which exercises a different path from making the compiler + // repeat a bitmap for a small array or executing a repeat in + // a GC program. + val := MakeSlice(typ, 0, cap) + data := NewAt(typ.Elem(), val.UnsafePointer()) + heapBits := GCBits(data.Interface()) + // Repeat the bitmap for the slice size, trimming scalars in + // the last element. + bits = trimBitmap(rep(cap, bits)) + if bytes.Equal(heapBits, bits) { + return + } + if len(heapBits) > len(bits) && bytes.Equal(heapBits[:len(bits)], bits) { + // Just the prefix matching is OK. + return + } + _, _, line, _ := runtime.Caller(1) + t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits) +} + +func TestGCBits(t *testing.T) { + verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1}) + + // Building blocks for types seen by the compiler (like [2]Xscalar). + // The compiler will create the type structures for the derived types, + // including their GC metadata. + type Xscalar struct{ x uintptr } + type Xptr struct{ x *byte } + type Xptrscalar struct { + *byte + uintptr + } + type Xscalarptr struct { + uintptr + *byte + } + type Xbigptrscalar struct { + _ [100]*byte + _ [100]uintptr + } + + var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type + { + // Building blocks for types constructed by reflect. + // This code is in a separate block so that code below + // cannot accidentally refer to these. + // The compiler must NOT see types derived from these + // (for example, [2]Scalar must NOT appear in the program), + // or else reflect will use it instead of having to construct one. + // The goal is to test the construction. + type Scalar struct{ x uintptr } + type Ptr struct{ x *byte } + type Ptrscalar struct { + *byte + uintptr + } + type Scalarptr struct { + uintptr + *byte + } + type Bigptrscalar struct { + _ [100]*byte + _ [100]uintptr + } + type Int64 int64 + Tscalar = TypeOf(Scalar{}) + Tint64 = TypeOf(Int64(0)) + Tptr = TypeOf(Ptr{}) + Tscalarptr = TypeOf(Scalarptr{}) + Tptrscalar = TypeOf(Ptrscalar{}) + Tbigptrscalar = TypeOf(Bigptrscalar{}) + } + + empty := []byte{} + + verifyGCBits(t, TypeOf(Xscalar{}), empty) + verifyGCBits(t, Tscalar, empty) + verifyGCBits(t, TypeOf(Xptr{}), lit(1)) + verifyGCBits(t, Tptr, lit(1)) + verifyGCBits(t, TypeOf(Xscalarptr{}), lit(0, 1)) + verifyGCBits(t, Tscalarptr, lit(0, 1)) + verifyGCBits(t, TypeOf(Xptrscalar{}), lit(1)) + verifyGCBits(t, Tptrscalar, lit(1)) + + verifyGCBits(t, TypeOf([0]Xptr{}), empty) + verifyGCBits(t, ArrayOf(0, Tptr), empty) + verifyGCBits(t, TypeOf([1]Xptrscalar{}), lit(1)) + verifyGCBits(t, ArrayOf(1, Tptrscalar), lit(1)) + verifyGCBits(t, TypeOf([2]Xscalar{}), empty) + verifyGCBits(t, ArrayOf(2, Tscalar), empty) + verifyGCBits(t, TypeOf([10000]Xscalar{}), empty) + verifyGCBits(t, ArrayOf(10000, Tscalar), empty) + verifyGCBits(t, TypeOf([2]Xptr{}), lit(1, 1)) + verifyGCBits(t, ArrayOf(2, Tptr), lit(1, 1)) + verifyGCBits(t, TypeOf([10000]Xptr{}), rep(10000, lit(1))) + verifyGCBits(t, ArrayOf(10000, Tptr), rep(10000, lit(1))) + verifyGCBits(t, TypeOf([2]Xscalarptr{}), lit(0, 1, 0, 1)) + verifyGCBits(t, ArrayOf(2, Tscalarptr), lit(0, 1, 0, 1)) + verifyGCBits(t, TypeOf([10000]Xscalarptr{}), rep(10000, lit(0, 1))) + verifyGCBits(t, ArrayOf(10000, Tscalarptr), rep(10000, lit(0, 1))) + verifyGCBits(t, TypeOf([2]Xptrscalar{}), lit(1, 0, 1)) + verifyGCBits(t, ArrayOf(2, Tptrscalar), lit(1, 0, 1)) + verifyGCBits(t, TypeOf([10000]Xptrscalar{}), rep(10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(10000, Tptrscalar), rep(10000, lit(1, 0))) + verifyGCBits(t, TypeOf([1][10000]Xptrscalar{}), rep(10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(1, ArrayOf(10000, Tptrscalar)), rep(10000, lit(1, 0))) + verifyGCBits(t, TypeOf([2][10000]Xptrscalar{}), rep(2*10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(2, ArrayOf(10000, Tptrscalar)), rep(2*10000, lit(1, 0))) + verifyGCBits(t, TypeOf([4]Xbigptrscalar{}), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1)))) + verifyGCBits(t, ArrayOf(4, Tbigptrscalar), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1)))) + + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 0, empty) + verifyGCBitsSlice(t, SliceOf(Tptr), 0, empty) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 1, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 1, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 2, lit(0)) + verifyGCBitsSlice(t, SliceOf(Tscalar), 2, lit(0)) + verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 10000, lit(0)) + verifyGCBitsSlice(t, SliceOf(Tscalar), 10000, lit(0)) + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 2, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptr), 2, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 10000, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptr), 10000, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 2, lit(0, 1)) + verifyGCBitsSlice(t, SliceOf(Tscalarptr), 2, lit(0, 1)) + verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 10000, lit(0, 1)) + verifyGCBitsSlice(t, SliceOf(Tscalarptr), 10000, lit(0, 1)) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 2, lit(1, 0)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 2, lit(1, 0)) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 10000, lit(1, 0)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 10000, lit(1, 0)) + verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 1, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 1, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 2, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 2, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, TypeOf([]Xbigptrscalar{}), 4, join(rep(100, lit(1)), rep(100, lit(0)))) + verifyGCBitsSlice(t, SliceOf(Tbigptrscalar), 4, join(rep(100, lit(1)), rep(100, lit(0)))) + + verifyGCBits(t, TypeOf((chan [100]Xscalar)(nil)), lit(1)) + verifyGCBits(t, ChanOf(BothDir, ArrayOf(100, Tscalar)), lit(1)) + + verifyGCBits(t, TypeOf((func([10000]Xscalarptr))(nil)), lit(1)) + verifyGCBits(t, FuncOf([]Type{ArrayOf(10000, Tscalarptr)}, nil, false), lit(1)) + + verifyGCBits(t, TypeOf((map[[10000]Xscalarptr]Xscalar)(nil)), lit(1)) + verifyGCBits(t, MapOf(ArrayOf(10000, Tscalarptr), Tscalar), lit(1)) + + verifyGCBits(t, TypeOf((*[10000]Xscalar)(nil)), lit(1)) + verifyGCBits(t, PointerTo(ArrayOf(10000, Tscalar)), lit(1)) + + verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1)) + verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1)) + + hdr := make([]byte, bucketCount/goarch.PtrSize) + + verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) { + verifyGCBits(t, MapBucketOf(k, e), want) + verifyGCBits(t, CachedBucketOf(TypeOf(m)), want) + } + verifyMapBucket(t, + Tscalar, Tptr, + map[Xscalar]Xptr(nil), + join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1))) + verifyMapBucket(t, + Tscalarptr, Tptr, + map[Xscalarptr]Xptr(nil), + join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1))) + verifyMapBucket(t, Tint64, Tptr, + map[int64]Xptr(nil), + join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1))) + verifyMapBucket(t, + Tscalar, Tscalar, + map[Xscalar]Xscalar(nil), + empty) + verifyMapBucket(t, + ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar), + map[[2]Xscalarptr][3]Xptrscalar(nil), + join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar), + map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil), + join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar), + map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil), + join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar), + map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil), + join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar), + map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil), + join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1))) +} + +func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) } +func join(b ...[]byte) []byte { return bytes.Join(b, nil) } +func lit(x ...byte) []byte { return x } + +func TestTypeOfTypeOf(t *testing.T) { + // Check that all the type constructors return concrete *rtype implementations. + // It's difficult to test directly because the reflect package is only at arm's length. + // The easiest thing to do is just call a function that crashes if it doesn't get an *rtype. + check := func(name string, typ Type) { + if underlying := TypeOf(typ).String(); underlying != "*reflect.rtype" { + t.Errorf("%v returned %v, not *reflect.rtype", name, underlying) + } + } + + type T struct{ int } + check("TypeOf", TypeOf(T{})) + + check("ArrayOf", ArrayOf(10, TypeOf(T{}))) + check("ChanOf", ChanOf(BothDir, TypeOf(T{}))) + check("FuncOf", FuncOf([]Type{TypeOf(T{})}, nil, false)) + check("MapOf", MapOf(TypeOf(T{}), TypeOf(T{}))) + check("PtrTo", PointerTo(TypeOf(T{}))) + check("SliceOf", SliceOf(TypeOf(T{}))) +} + +type XM struct{ _ bool } + +func (*XM) String() string { return "" } + +func TestPtrToMethods(t *testing.T) { + var y struct{ XM } + yp := New(TypeOf(y)).Interface() + _, ok := yp.(fmt.Stringer) + if !ok { + t.Fatal("does not implement Stringer, but should") + } +} + +func TestMapAlloc(t *testing.T) { + m := ValueOf(make(map[int]int, 10)) + k := ValueOf(5) + v := ValueOf(7) + allocs := testing.AllocsPerRun(100, func() { + m.SetMapIndex(k, v) + }) + if allocs > 0.5 { + t.Errorf("allocs per map assignment: want 0 got %f", allocs) + } + + const size = 1000 + tmp := 0 + val := ValueOf(&tmp).Elem() + allocs = testing.AllocsPerRun(100, func() { + mv := MakeMapWithSize(TypeOf(map[int]int{}), size) + // Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets. + for i := 0; i < size/2; i++ { + val.SetInt(int64(i)) + mv.SetMapIndex(val, val) + } + }) + if allocs > 10 { + t.Errorf("allocs per map assignment: want at most 10 got %f", allocs) + } + // Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set + // the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the + // map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added. +} + +func TestChanAlloc(t *testing.T) { + // Note: for a chan int, the return Value must be allocated, so we + // use a chan *int instead. + c := ValueOf(make(chan *int, 1)) + v := ValueOf(new(int)) + allocs := testing.AllocsPerRun(100, func() { + c.Send(v) + _, _ = c.Recv() + }) + if allocs < 0.5 || allocs > 1.5 { + t.Errorf("allocs per chan send/recv: want 1 got %f", allocs) + } + // Note: there is one allocation in reflect.recv which seems to be + // a limitation of escape analysis. If that is ever fixed the + // allocs < 0.5 condition will trigger and this test should be fixed. +} + +type TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678 int + +type nameTest struct { + v any + want string +} + +var nameTests = []nameTest{ + {(*int32)(nil), "int32"}, + {(*D1)(nil), "D1"}, + {(*[]D1)(nil), ""}, + {(*chan D1)(nil), ""}, + {(*func() D1)(nil), ""}, + {(*<-chan D1)(nil), ""}, + {(*chan<- D1)(nil), ""}, + {(*any)(nil), ""}, + {(*interface { + F() + })(nil), ""}, + {(*TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678)(nil), "TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678"}, +} + +func TestNames(t *testing.T) { + for _, test := range nameTests { + typ := TypeOf(test.v).Elem() + if got := typ.Name(); got != test.want { + t.Errorf("%v Name()=%q, want %q", typ, got, test.want) + } + } +} + +func TestExported(t *testing.T) { + type ΦExported struct{} + type φUnexported struct{} + type BigP *big + type P int + type p *P + type P2 p + type p3 p + + type exportTest struct { + v any + want bool + } + exportTests := []exportTest{ + {D1{}, true}, + {(*D1)(nil), true}, + {big{}, false}, + {(*big)(nil), false}, + {(BigP)(nil), true}, + {(*BigP)(nil), true}, + {ΦExported{}, true}, + {φUnexported{}, false}, + {P(0), true}, + {(p)(nil), false}, + {(P2)(nil), true}, + {(p3)(nil), false}, + } + + for i, test := range exportTests { + typ := TypeOf(test.v) + if got := IsExported(typ); got != test.want { + t.Errorf("%d: %s exported=%v, want %v", i, typ.Name(), got, test.want) + } + } +} + +func TestTypeStrings(t *testing.T) { + type stringTest struct { + typ Type + want string + } + stringTests := []stringTest{ + {TypeOf(func(int) {}), "func(int)"}, + {FuncOf([]Type{TypeOf(int(0))}, nil, false), "func(int)"}, + {TypeOf(XM{}), "reflect_test.XM"}, + {TypeOf(new(XM)), "*reflect_test.XM"}, + {TypeOf(new(XM).String), "func() string"}, + {TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"}, + {ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"}, + {MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"}, + {ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"}, + {ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"}, + } + + for i, test := range stringTests { + if got, want := test.typ.String(), test.want; got != want { + t.Errorf("type %d String()=%q, want %q", i, got, want) + } + } +} + +func TestOffsetLock(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + i := i + wg.Add(1) + go func() { + for j := 0; j < 50; j++ { + ResolveReflectName(fmt.Sprintf("OffsetLockName:%d:%d", i, j)) + } + wg.Done() + }() + } + wg.Wait() +} + +func TestSwapper(t *testing.T) { + type I int + var a, b, c I + type pair struct { + x, y int + } + type pairPtr struct { + x, y int + p *I + } + type S string + + tests := []struct { + in any + i, j int + want any + }{ + { + in: []int{1, 20, 300}, + i: 0, + j: 2, + want: []int{300, 20, 1}, + }, + { + in: []uintptr{1, 20, 300}, + i: 0, + j: 2, + want: []uintptr{300, 20, 1}, + }, + { + in: []int16{1, 20, 300}, + i: 0, + j: 2, + want: []int16{300, 20, 1}, + }, + { + in: []int8{1, 20, 100}, + i: 0, + j: 2, + want: []int8{100, 20, 1}, + }, + { + in: []*I{&a, &b, &c}, + i: 0, + j: 2, + want: []*I{&c, &b, &a}, + }, + { + in: []string{"eric", "sergey", "larry"}, + i: 0, + j: 2, + want: []string{"larry", "sergey", "eric"}, + }, + { + in: []S{"eric", "sergey", "larry"}, + i: 0, + j: 2, + want: []S{"larry", "sergey", "eric"}, + }, + { + in: []pair{{1, 2}, {3, 4}, {5, 6}}, + i: 0, + j: 2, + want: []pair{{5, 6}, {3, 4}, {1, 2}}, + }, + { + in: []pairPtr{{1, 2, &a}, {3, 4, &b}, {5, 6, &c}}, + i: 0, + j: 2, + want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}}, + }, + } + + for i, tt := range tests { + inStr := fmt.Sprint(tt.in) + Swapper(tt.in)(tt.i, tt.j) + if !DeepEqual(tt.in, tt.want) { + t.Errorf("%d. swapping %v and %v of %v = %v; want %v", i, tt.i, tt.j, inStr, tt.in, tt.want) + } + } +} + +// TestUnaddressableField tests that the reflect package will not allow +// a type from another package to be used as a named type with an +// unexported field. +// +// This ensures that unexported fields cannot be modified by other packages. +func TestUnaddressableField(t *testing.T) { + var b Buffer // type defined in reflect, a different package + var localBuffer struct { + buf []byte + } + lv := ValueOf(&localBuffer).Elem() + rv := ValueOf(b) + shouldPanic("Set", func() { + lv.Set(rv) + }) +} + +type Tint int + +type Tint2 = Tint + +type Talias1 struct { + byte + uint8 + int + int32 + rune +} + +type Talias2 struct { + Tint + Tint2 +} + +func TestAliasNames(t *testing.T) { + t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5} + out := fmt.Sprintf("%#v", t1) + want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}" + if out != want { + t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want) + } + + t2 := Talias2{Tint: 1, Tint2: 2} + out = fmt.Sprintf("%#v", t2) + want = "reflect_test.Talias2{Tint:1, Tint2:2}" + if out != want { + t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want) + } +} + +func TestIssue22031(t *testing.T) { + type s []struct{ C int } + + type t1 struct{ s } + type t2 struct{ f s } + + tests := []Value{ + ValueOf(t1{s{{}}}).Field(0).Index(0).Field(0), + ValueOf(t2{s{{}}}).Field(0).Index(0).Field(0), + } + + for i, test := range tests { + if test.CanSet() { + t.Errorf("%d: CanSet: got true, want false", i) + } + } +} + +type NonExportedFirst int + +func (i NonExportedFirst) ΦExported() {} +func (i NonExportedFirst) nonexported() int { panic("wrong") } + +func TestIssue22073(t *testing.T) { + m := ValueOf(NonExportedFirst(0)).Method(0) + + if got := m.Type().NumOut(); got != 0 { + t.Errorf("NumOut: got %v, want 0", got) + } + + // Shouldn't panic. + m.Call(nil) +} + +func TestMapIterNonEmptyMap(t *testing.T) { + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want { + t.Errorf("iterator returned %s (after sorting), want %s", got, want) + } +} + +func TestMapIterNilMap(t *testing.T) { + var m map[string]int + iter := ValueOf(m).MapRange() + if got, want := iterateToString(iter), `[]`; got != want { + t.Errorf("non-empty result iteratoring nil map: %s", got) + } +} + +func TestMapIterReset(t *testing.T) { + iter := new(MapIter) + + // Use of zero iterator should panic. + func() { + defer func() { recover() }() + iter.Next() + t.Error("Next did not panic") + }() + + // Reset to new Map should work. + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter.Reset(ValueOf(m)) + if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want { + t.Errorf("iterator returned %s (after sorting), want %s", got, want) + } + + // Reset to Zero value should work, but iterating over it should panic. + iter.Reset(Value{}) + func() { + defer func() { recover() }() + iter.Next() + t.Error("Next did not panic") + }() + + // Reset to a different Map with different types should work. + m2 := map[int]string{1: "one", 2: "two", 3: "three"} + iter.Reset(ValueOf(m2)) + if got, want := iterateToString(iter), `[1: one, 2: two, 3: three]`; got != want { + t.Errorf("iterator returned %s (after sorting), want %s", got, want) + } + + // Check that Reset, Next, and SetKey/SetValue play nicely together. + m3 := map[uint64]uint64{ + 1 << 0: 1 << 1, + 1 << 1: 1 << 2, + 1 << 2: 1 << 3, + } + kv := New(TypeOf(uint64(0))).Elem() + for i := 0; i < 5; i++ { + var seenk, seenv uint64 + iter.Reset(ValueOf(m3)) + for iter.Next() { + kv.SetIterKey(iter) + seenk ^= kv.Uint() + kv.SetIterValue(iter) + seenv ^= kv.Uint() + } + if seenk != 0b111 { + t.Errorf("iteration yielded keys %b, want %b", seenk, 0b111) + } + if seenv != 0b1110 { + t.Errorf("iteration yielded values %b, want %b", seenv, 0b1110) + } + } + + // Reset should not allocate. + n := int(testing.AllocsPerRun(10, func() { + iter.Reset(ValueOf(m2)) + iter.Reset(Value{}) + })) + if n > 0 { + t.Errorf("MapIter.Reset allocated %d times", n) + } +} + +func TestMapIterSafety(t *testing.T) { + // Using a zero MapIter causes a panic, but not a crash. + func() { + defer func() { recover() }() + new(MapIter).Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + new(MapIter).Value() + t.Fatal("Value did not panic") + }() + func() { + defer func() { recover() }() + new(MapIter).Next() + t.Fatal("Next did not panic") + }() + + // Calling Key/Value on a MapIter before Next + // causes a panic, but not a crash. + var m map[string]int + iter := ValueOf(m).MapRange() + + func() { + defer func() { recover() }() + iter.Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + iter.Value() + t.Fatal("Value did not panic") + }() + + // Calling Next, Key, or Value on an exhausted iterator + // causes a panic, but not a crash. + iter.Next() // -> false + func() { + defer func() { recover() }() + iter.Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + iter.Value() + t.Fatal("Value did not panic") + }() + func() { + defer func() { recover() }() + iter.Next() + t.Fatal("Next did not panic") + }() +} + +func TestMapIterNext(t *testing.T) { + // The first call to Next should reflect any + // insertions to the map since the iterator was created. + m := map[string]int{} + iter := ValueOf(m).MapRange() + m["one"] = 1 + if got, want := iterateToString(iter), `[one: 1]`; got != want { + t.Errorf("iterator returned deleted elements: got %s, want %s", got, want) + } +} + +func TestMapIterDelete0(t *testing.T) { + // Delete all elements before first iteration. + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + delete(m, "one") + delete(m, "two") + delete(m, "three") + if got, want := iterateToString(iter), `[]`; got != want { + t.Errorf("iterator returned deleted elements: got %s, want %s", got, want) + } +} + +func TestMapIterDelete1(t *testing.T) { + // Delete all elements after first iteration. + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + var got []string + for iter.Next() { + got = append(got, fmt.Sprint(iter.Key(), iter.Value())) + delete(m, "one") + delete(m, "two") + delete(m, "three") + } + if len(got) != 1 { + t.Errorf("iterator returned wrong number of elements: got %d, want 1", len(got)) + } +} + +// iterateToString returns the set of elements +// returned by an iterator in readable form. +func iterateToString(it *MapIter) string { + var got []string + for it.Next() { + line := fmt.Sprintf("%v: %v", it.Key(), it.Value()) + got = append(got, line) + } + sort.Strings(got) + return "[" + strings.Join(got, ", ") + "]" +} + +func TestConvertibleTo(t *testing.T) { + t1 := ValueOf(example1.MyStruct{}).Type() + t2 := ValueOf(example2.MyStruct{}).Type() + + // Shouldn't raise stack overflow + if t1.ConvertibleTo(t2) { + t.Fatalf("(%s).ConvertibleTo(%s) = true, want false", t1, t2) + } + + t3 := ValueOf([]example1.MyStruct{}).Type() + t4 := ValueOf([]example2.MyStruct{}).Type() + + if t3.ConvertibleTo(t4) { + t.Fatalf("(%s).ConvertibleTo(%s) = true, want false", t3, t4) + } +} + +func TestSetIter(t *testing.T) { + data := map[string]int{ + "foo": 1, + "bar": 2, + "baz": 3, + } + + m := ValueOf(data) + i := m.MapRange() + k := New(TypeOf("")).Elem() + v := New(TypeOf(0)).Elem() + shouldPanic("Value.SetIterKey called before Next", func() { + k.SetIterKey(i) + }) + shouldPanic("Value.SetIterValue called before Next", func() { + v.SetIterValue(i) + }) + data2 := map[string]int{} + for i.Next() { + k.SetIterKey(i) + v.SetIterValue(i) + data2[k.Interface().(string)] = v.Interface().(int) + } + if !DeepEqual(data, data2) { + t.Errorf("maps not equal, got %v want %v", data2, data) + } + shouldPanic("Value.SetIterKey called on exhausted iterator", func() { + k.SetIterKey(i) + }) + shouldPanic("Value.SetIterValue called on exhausted iterator", func() { + v.SetIterValue(i) + }) + + i.Reset(m) + i.Next() + shouldPanic("Value.SetIterKey using unaddressable value", func() { + ValueOf("").SetIterKey(i) + }) + shouldPanic("Value.SetIterValue using unaddressable value", func() { + ValueOf(0).SetIterValue(i) + }) + shouldPanic("value of type string is not assignable to type int", func() { + New(TypeOf(0)).Elem().SetIterKey(i) + }) + shouldPanic("value of type int is not assignable to type string", func() { + New(TypeOf("")).Elem().SetIterValue(i) + }) + + // Make sure assignment conversion works. + var x any + y := ValueOf(&x).Elem() + y.SetIterKey(i) + if _, ok := data[x.(string)]; !ok { + t.Errorf("got key %s which is not in map", x) + } + y.SetIterValue(i) + if x.(int) < 1 || x.(int) > 3 { + t.Errorf("got value %d which is not in map", x) + } + + // Try some key/value types which are direct interfaces. + a := 88 + b := 99 + pp := map[*int]*int{ + &a: &b, + } + i = ValueOf(pp).MapRange() + i.Next() + y.SetIterKey(i) + if got := *y.Interface().(*int); got != a { + t.Errorf("pointer incorrect: got %d want %d", got, a) + } + y.SetIterValue(i) + if got := *y.Interface().(*int); got != b { + t.Errorf("pointer incorrect: got %d want %d", got, b) + } + + // Make sure we panic assigning from an unexported field. + m = ValueOf(struct{ m map[string]int }{data}).Field(0) + for iter := m.MapRange(); iter.Next(); { + shouldPanic("using value obtained using unexported field", func() { + k.SetIterKey(iter) + }) + shouldPanic("using value obtained using unexported field", func() { + v.SetIterValue(iter) + }) + } +} + +func TestMethodCallValueCodePtr(t *testing.T) { + m := ValueOf(Point{}).Method(1) + want := MethodValueCallCodePtr() + if got := uintptr(m.UnsafePointer()); got != want { + t.Errorf("methodValueCall code pointer mismatched, want: %v, got: %v", want, got) + } + if got := m.Pointer(); got != want { + t.Errorf("methodValueCall code pointer mismatched, want: %v, got: %v", want, got) + } +} + +type A struct{} +type B[T any] struct{} + +func TestIssue50208(t *testing.T) { + want1 := "B[reflect_test.A]" + if got := TypeOf(new(B[A])).Elem().Name(); got != want1 { + t.Errorf("name of type parameter mismatched, want:%s, got:%s", want1, got) + } + want2 := "B[reflect_test.B[reflect_test.A]]" + if got := TypeOf(new(B[B[A]])).Elem().Name(); got != want2 { + t.Errorf("name of type parameter mismatched, want:%s, got:%s", want2, got) + } +} + +func TestNegativeKindString(t *testing.T) { + x := -1 + s := Kind(x).String() + want := "kind-1" + if s != want { + t.Fatalf("Kind(-1).String() = %q, want %q", s, want) + } +} + +type ( + namedBool bool + namedBytes []byte +) + +func TestValue_Cap(t *testing.T) { + a := &[3]int{1, 2, 3} + v := ValueOf(a) + if v.Cap() != cap(a) { + t.Errorf("Cap = %d want %d", v.Cap(), cap(a)) + } + + a = nil + v = ValueOf(a) + if v.Cap() != cap(a) { + t.Errorf("Cap = %d want %d", v.Cap(), cap(a)) + } + + getError := func(f func()) (errorStr string) { + defer func() { + e := recover() + if str, ok := e.(string); ok { + errorStr = str + } + }() + f() + return + } + e := getError(func() { + var ptr *int + ValueOf(ptr).Cap() + }) + wantStr := "reflect: call of reflect.Value.Cap on ptr to non-array Value" + if e != wantStr { + t.Errorf("error is %q, want %q", e, wantStr) + } +} + +func TestValue_Len(t *testing.T) { + a := &[3]int{1, 2, 3} + v := ValueOf(a) + if v.Len() != len(a) { + t.Errorf("Len = %d want %d", v.Len(), len(a)) + } + + a = nil + v = ValueOf(a) + if v.Len() != len(a) { + t.Errorf("Len = %d want %d", v.Len(), len(a)) + } + + getError := func(f func()) (errorStr string) { + defer func() { + e := recover() + if str, ok := e.(string); ok { + errorStr = str + } + }() + f() + return + } + e := getError(func() { + var ptr *int + ValueOf(ptr).Len() + }) + wantStr := "reflect: call of reflect.Value.Len on ptr to non-array Value" + if e != wantStr { + t.Errorf("error is %q, want %q", e, wantStr) + } +} + +func TestValue_Comparable(t *testing.T) { + var a int + var s []int + var i interface{} = a + var iSlice interface{} = s + var iArrayFalse interface{} = [2]interface{}{1, map[int]int{}} + var iArrayTrue interface{} = [2]interface{}{1, struct{ I interface{} }{1}} + var testcases = []struct { + value Value + comparable bool + deref bool + }{ + { + ValueOf(32), + true, + false, + }, + { + ValueOf(int8(1)), + true, + false, + }, + { + ValueOf(int16(1)), + true, + false, + }, + { + ValueOf(int32(1)), + true, + false, + }, + { + ValueOf(int64(1)), + true, + false, + }, + { + ValueOf(uint8(1)), + true, + false, + }, + { + ValueOf(uint16(1)), + true, + false, + }, + { + ValueOf(uint32(1)), + true, + false, + }, + { + ValueOf(uint64(1)), + true, + false, + }, + { + ValueOf(float32(1)), + true, + false, + }, + { + ValueOf(float64(1)), + true, + false, + }, + { + ValueOf(complex(float32(1), float32(1))), + true, + false, + }, + { + ValueOf(complex(float64(1), float64(1))), + true, + false, + }, + { + ValueOf("abc"), + true, + false, + }, + { + ValueOf(true), + true, + false, + }, + { + ValueOf(map[int]int{}), + false, + false, + }, + { + ValueOf([]int{}), + false, + false, + }, + { + Value{}, + false, + false, + }, + { + ValueOf(&a), + true, + false, + }, + { + ValueOf(&s), + true, + false, + }, + { + ValueOf(&i), + true, + true, + }, + { + ValueOf(&iSlice), + false, + true, + }, + { + ValueOf([2]int{}), + true, + false, + }, + { + ValueOf([2]map[int]int{}), + false, + false, + }, + { + ValueOf([0]func(){}), + false, + false, + }, + { + ValueOf([2]struct{ I interface{} }{{1}, {1}}), + true, + false, + }, + { + ValueOf([2]struct{ I interface{} }{{[]int{}}, {1}}), + false, + false, + }, + { + ValueOf([2]interface{}{1, struct{ I int }{1}}), + true, + false, + }, + { + ValueOf([2]interface{}{[1]interface{}{map[int]int{}}, struct{ I int }{1}}), + false, + false, + }, + { + ValueOf(&iArrayFalse), + false, + true, + }, + { + ValueOf(&iArrayTrue), + true, + true, + }, + } + + for _, cas := range testcases { + v := cas.value + if cas.deref { + v = v.Elem() + } + got := v.Comparable() + if got != cas.comparable { + t.Errorf("%T.Comparable = %t, want %t", v, got, cas.comparable) + } + } +} + +type ValueEqualTest struct { + v, u any + eq bool + vDeref, uDeref bool +} + +var equalI interface{} = 1 +var equalSlice interface{} = []int{1} +var nilInterface interface{} +var mapInterface interface{} = map[int]int{} + +var valueEqualTests = []ValueEqualTest{ + { + Value{}, Value{}, + true, + false, false, + }, + { + true, true, + true, + false, false, + }, + { + 1, 1, + true, + false, false, + }, + { + int8(1), int8(1), + true, + false, false, + }, + { + int16(1), int16(1), + true, + false, false, + }, + { + int32(1), int32(1), + true, + false, false, + }, + { + int64(1), int64(1), + true, + false, false, + }, + { + uint(1), uint(1), + true, + false, false, + }, + { + uint8(1), uint8(1), + true, + false, false, + }, + { + uint16(1), uint16(1), + true, + false, false, + }, + { + uint32(1), uint32(1), + true, + false, false, + }, + { + uint64(1), uint64(1), + true, + false, false, + }, + { + float32(1), float32(1), + true, + false, false, + }, + { + float64(1), float64(1), + true, + false, false, + }, + { + complex(1, 1), complex(1, 1), + true, + false, false, + }, + { + complex128(1 + 1i), complex128(1 + 1i), + true, + false, false, + }, + { + func() {}, nil, + false, + false, false, + }, + { + &equalI, 1, + true, + true, false, + }, + { + (chan int)(nil), nil, + false, + false, false, + }, + { + (chan int)(nil), (chan int)(nil), + true, + false, false, + }, + { + &equalI, &equalI, + true, + false, false, + }, + { + struct{ i int }{1}, struct{ i int }{1}, + true, + false, false, + }, + { + struct{ i int }{1}, struct{ i int }{2}, + false, + false, false, + }, + { + &nilInterface, &nilInterface, + true, + true, true, + }, + { + 1, ValueOf(struct{ i int }{1}).Field(0), + true, + false, false, + }, +} + +func TestValue_Equal(t *testing.T) { + for _, test := range valueEqualTests { + var v, u Value + if vv, ok := test.v.(Value); ok { + v = vv + } else { + v = ValueOf(test.v) + } + + if uu, ok := test.u.(Value); ok { + u = uu + } else { + u = ValueOf(test.u) + } + if test.vDeref { + v = v.Elem() + } + + if test.uDeref { + u = u.Elem() + } + + if r := v.Equal(u); r != test.eq { + t.Errorf("%s == %s got %t, want %t", v.Type(), u.Type(), r, test.eq) + } + } +} + +func TestValue_EqualNonComparable(t *testing.T) { + var invalid = Value{} // ValueOf(nil) + var values = []Value{ + // Value of slice is non-comparable. + ValueOf([]int(nil)), + ValueOf(([]int{})), + + // Value of map is non-comparable. + ValueOf(map[int]int(nil)), + ValueOf((map[int]int{})), + + // Value of func is non-comparable. + ValueOf(((func())(nil))), + ValueOf(func() {}), + + // Value of struct is non-comparable because of non-comparable elements. + ValueOf((NonComparableStruct{})), + + // Value of array is non-comparable because of non-comparable elements. + ValueOf([0]map[int]int{}), + ValueOf([0]func(){}), + ValueOf(([1]struct{ I interface{} }{{[]int{}}})), + ValueOf(([1]interface{}{[1]interface{}{map[int]int{}}})), + } + for _, value := range values { + // Panic when reflect.Value.Equal using two valid non-comparable values. + shouldPanic("are not comparable", func() { value.Equal(value) }) + + // If one is non-comparable and the other is invalid, the expected result is always false. + if r := value.Equal(invalid); r != false { + t.Errorf("%s == invalid got %t, want false", value.Type(), r) + } + } +} + +func TestInitFuncTypes(t *testing.T) { + n := 100 + var wg sync.WaitGroup + + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer wg.Done() + ipT := TypeOf(net.IP{}) + for i := 0; i < ipT.NumMethod(); i++ { + _ = ipT.Method(i) + } + }() + } + wg.Wait() +} + +func TestClear(t *testing.T) { + m := make(map[string]any, len(valueTests)) + for _, tt := range valueTests { + m[tt.s] = tt.i + } + mapTestFn := func(v Value) bool { v.Clear(); return v.Len() == 0 } + + s := make([]*pair, len(valueTests)) + for i := range s { + s[i] = &valueTests[i] + } + sliceTestFn := func(v Value) bool { + v.Clear() + for i := 0; i < v.Len(); i++ { + if !v.Index(i).IsZero() { + return false + } + } + return true + } + + panicTestFn := func(v Value) bool { shouldPanic("reflect.Value.Clear", func() { v.Clear() }); return true } + + tests := []struct { + name string + value Value + testFunc func(v Value) bool + }{ + {"map", ValueOf(m), mapTestFn}, + {"slice no pointer", ValueOf([]int{1, 2, 3, 4, 5}), sliceTestFn}, + {"slice has pointer", ValueOf(s), sliceTestFn}, + {"non-map/slice", ValueOf(1), panicTestFn}, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + if !tc.testFunc(tc.value) { + t.Errorf("unexpected result for value.Clear(): %value", tc.value) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/arena.go b/platform/dbops/binaries/go/go/src/reflect/arena.go new file mode 100644 index 0000000000000000000000000000000000000000..cac1a1da5eaaeff243eea1d2f2894740dbc48410 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/arena.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.arenas + +package reflect + +import "arena" + +// ArenaNew returns a Value representing a pointer to a new zero value for the +// specified type, allocating storage for it in the provided arena. That is, +// the returned Value's Type is PointerTo(typ). +func ArenaNew(a *arena.Arena, typ Type) Value { + return ValueOf(arena_New(a, PointerTo(typ))) +} + +func arena_New(a *arena.Arena, typ any) any diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_386.s b/platform/dbops/binaries/go/go/src/reflect/asm_386.s new file mode 100644 index 0000000000000000000000000000000000000000..5bedea5807e6efcc8acf0b5429606b0067284988 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_386.s @@ -0,0 +1,38 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No argsize here, gc generates argsize info at call site. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVL DX, 0(SP) + LEAL argframe+0(FP), CX + MOVL CX, 4(SP) + MOVB $0, 16(SP) + LEAL 16(SP), AX + MOVL AX, 8(SP) + MOVL $0, 12(SP) + CALL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No argsize here, gc generates argsize info at call site. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVL DX, 0(SP) + LEAL argframe+0(FP), CX + MOVL CX, 4(SP) + MOVB $0, 16(SP) + LEAL 16(SP), AX + MOVL AX, 8(SP) + MOVL $0, 12(SP) + CALL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_amd64.s b/platform/dbops/binaries/go/go/src/reflect/asm_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..d21d498063b55e382a1e473fcb5e6a501be88d36 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_amd64.s @@ -0,0 +1,79 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. +#define LOCAL_RETVALID 32 +#define LOCAL_REGARGS 40 + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +// This frame contains two locals. See the comment above LOCAL_RETVALID. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$312 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + LEAQ LOCAL_REGARGS(SP), R12 + CALL runtime·spillArgs(SB) + MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area + MOVQ DX, 0(SP) + MOVQ R12, 8(SP) + CALL ·moveMakeFuncArgPtrs(SB) + MOVQ 24(SP), DX + MOVQ DX, 0(SP) + LEAQ argframe+0(FP), CX + MOVQ CX, 8(SP) + MOVB $0, LOCAL_RETVALID(SP) + LEAQ LOCAL_RETVALID(SP), AX + MOVQ AX, 16(SP) + LEAQ LOCAL_REGARGS(SP), AX + MOVQ AX, 24(SP) + CALL ·callReflect(SB) + LEAQ LOCAL_REGARGS(SP), R12 + CALL runtime·unspillArgs(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +// This frame contains two locals. See the comment above LOCAL_RETVALID. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$312 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + LEAQ LOCAL_REGARGS(SP), R12 + CALL runtime·spillArgs(SB) + MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area + MOVQ DX, 0(SP) + MOVQ R12, 8(SP) + CALL ·moveMakeFuncArgPtrs(SB) + MOVQ 24(SP), DX + MOVQ DX, 0(SP) + LEAQ argframe+0(FP), CX + MOVQ CX, 8(SP) + MOVB $0, LOCAL_RETVALID(SP) + LEAQ LOCAL_RETVALID(SP), AX + MOVQ AX, 16(SP) + LEAQ LOCAL_REGARGS(SP), AX + MOVQ AX, 24(SP) + CALL ·callMethod(SB) + LEAQ LOCAL_REGARGS(SP), R12 + CALL runtime·unspillArgs(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_arm.s b/platform/dbops/binaries/go/go/src/reflect/asm_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..057c941f5915715fb042cf376b49589c41ba100c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_arm.s @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// makeFuncStub is jumped to by the code generated by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No argsize here, gc generates argsize info at call site. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVW R7, 4(R13) + MOVW $argframe+0(FP), R1 + MOVW R1, 8(R13) + MOVW $0, R1 + MOVB R1, 20(R13) + ADD $20, R13, R1 + MOVW R1, 12(R13) + MOVW $0, R1 + MOVW R1, 16(R13) + BL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No argsize here, gc generates argsize info at call site. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVW R7, 4(R13) + MOVW $argframe+0(FP), R1 + MOVW R1, 8(R13) + MOVW $0, R1 + MOVB R1, 20(R13) + ADD $20, R13, R1 + MOVW R1, 12(R13) + MOVW $0, R1 + MOVW R1, 16(R13) + BL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_arm64.s b/platform/dbops/binaries/go/go/src/reflect/asm_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..5e91e62aa145251afc8058150ef64b9f1508f265 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_arm64.s @@ -0,0 +1,79 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. +#define LOCAL_RETVALID 40 +#define LOCAL_REGARGS 48 + +// The frame size of the functions below is +// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432. + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + ADD $LOCAL_REGARGS, RSP, R20 + CALL runtime·spillArgs(SB) + MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area + MOVD R26, R0 + MOVD R20, R1 + CALL ·moveMakeFuncArgPtrs(SB) + MOVD 32(RSP), R26 + MOVD R26, 8(RSP) + MOVD $argframe+0(FP), R3 + MOVD R3, 16(RSP) + MOVB $0, LOCAL_RETVALID(RSP) + ADD $LOCAL_RETVALID, RSP, R3 + MOVD R3, 24(RSP) + ADD $LOCAL_REGARGS, RSP, R3 + MOVD R3, 32(RSP) + CALL ·callReflect(SB) + ADD $LOCAL_REGARGS, RSP, R20 + CALL runtime·unspillArgs(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + ADD $LOCAL_REGARGS, RSP, R20 + CALL runtime·spillArgs(SB) + MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area + MOVD R26, R0 + MOVD R20, R1 + CALL ·moveMakeFuncArgPtrs(SB) + MOVD 32(RSP), R26 + MOVD R26, 8(RSP) + MOVD $argframe+0(FP), R3 + MOVD R3, 16(RSP) + MOVB $0, LOCAL_RETVALID(RSP) + ADD $LOCAL_RETVALID, RSP, R3 + MOVD R3, 24(RSP) + ADD $LOCAL_REGARGS, RSP, R3 + MOVD R3, 32(RSP) + CALL ·callMethod(SB) + ADD $LOCAL_REGARGS, RSP, R20 + CALL runtime·unspillArgs(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_loong64.s b/platform/dbops/binaries/go/go/src/reflect/asm_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..520f0afdd51625f2b375188ac6a30c4c00a63e37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_loong64.s @@ -0,0 +1,89 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +#define REGCTXT R29 + +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. +#define LOCAL_RETVALID 40 +#define LOCAL_REGARGS 48 + +// The frame size of the functions below is +// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432. + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + ADDV $LOCAL_REGARGS, R3, R25 // spillArgs using R25 + JAL runtime·spillArgs(SB) + MOVV REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS + +#ifdef GOEXPERIMENT_regabiargs + MOVV REGCTXT, R4 + MOVV R25, R5 +#else + MOVV REGCTXT, 8(R3) + MOVV R25, 16(R3) +#endif + JAL ·moveMakeFuncArgPtrs(SB) + MOVV 32(R3), REGCTXT // restore REGCTXT + + MOVV REGCTXT, 8(R3) + MOVV $argframe+0(FP), R20 + MOVV R20, 16(R3) + MOVV R0, LOCAL_RETVALID(R3) + ADDV $LOCAL_RETVALID, R3, R20 + MOVV R20, 24(R3) + ADDV $LOCAL_REGARGS, R3, R20 + MOVV R20, 32(R3) + JAL ·callReflect(SB) + ADDV $LOCAL_REGARGS, R3, R25 //unspillArgs using R25 + JAL runtime·unspillArgs(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + ADDV $LOCAL_REGARGS, R3, R25 // spillArgs using R25 + JAL runtime·spillArgs(SB) + MOVV REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS +#ifdef GOEXPERIMENT_regabiargs + MOVV REGCTXT, R4 + MOVV R25, R5 +#else + MOVV REGCTXT, 8(R3) + MOVV R25, 16(R3) +#endif + JAL ·moveMakeFuncArgPtrs(SB) + MOVV 32(R3), REGCTXT // restore REGCTXT + MOVV REGCTXT, 8(R3) + MOVV $argframe+0(FP), R20 + MOVV R20, 16(R3) + MOVB R0, LOCAL_RETVALID(R3) + ADDV $LOCAL_RETVALID, R3, R20 + MOVV R20, 24(R3) + ADDV $LOCAL_REGARGS, R3, R20 + MOVV R20, 32(R3) // frame size to 32+SP as callreflect args) + JAL ·callMethod(SB) + ADDV $LOCAL_REGARGS, R3, R25 // unspillArgs using R25 + JAL runtime·unspillArgs(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_mips64x.s b/platform/dbops/binaries/go/go/src/reflect/asm_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..f21e34df1bc248080e94c77013f6d3edeb3b77a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_mips64x.s @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "textflag.h" +#include "funcdata.h" + +#define REGCTXT R22 + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + MOVV REGCTXT, 8(R29) + MOVV $argframe+0(FP), R1 + MOVV R1, 16(R29) + MOVB R0, 40(R29) + ADDV $40, R29, R1 + MOVV R1, 24(R29) + MOVV R0, 32(R29) + JAL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + MOVV REGCTXT, 8(R29) + MOVV $argframe+0(FP), R1 + MOVV R1, 16(R29) + MOVB R0, 40(R29) + ADDV $40, R29, R1 + MOVV R1, 24(R29) + MOVV R0, 32(R29) + JAL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_mipsx.s b/platform/dbops/binaries/go/go/src/reflect/asm_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..636c8a5c71c3d701940e6d00d7acd7ccea90592e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_mipsx.s @@ -0,0 +1,42 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" +#include "funcdata.h" + +#define REGCTXT R22 + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVW REGCTXT, 4(R29) + MOVW $argframe+0(FP), R1 + MOVW R1, 8(R29) + MOVB R0, 20(R29) + ADD $20, R29, R1 + MOVW R1, 12(R29) + MOVW R0, 16(R29) + JAL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20 + NO_LOCAL_POINTERS + MOVW REGCTXT, 4(R29) + MOVW $argframe+0(FP), R1 + MOVW R1, 8(R29) + MOVB R0, 20(R29) + ADD $20, R29, R1 + MOVW R1, 12(R29) + MOVW R0, 16(R29) + JAL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_ppc64x.s b/platform/dbops/binaries/go/go/src/reflect/asm_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..3b529be6853529a960bca62df456b07d77c1fb20 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_ppc64x.s @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" +#include "funcdata.h" +#include "asm_ppc64x.h" + +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. + +#define LOCAL_RETVALID 32+FIXED_FRAME +#define LOCAL_REGARGS 40+FIXED_FRAME + +// The frame size of the functions below is +// 32 (args of callReflect) + 8 (bool + padding) + 296 (abi.RegArgs) = 336. + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$336 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + ADD $LOCAL_REGARGS, R1, R20 + CALL runtime·spillArgs(SB) + MOVD R11, FIXED_FRAME+32(R1) // save R11 + MOVD R11, FIXED_FRAME+0(R1) // arg for moveMakeFuncArgPtrs + MOVD R20, FIXED_FRAME+8(R1) // arg for local args + CALL ·moveMakeFuncArgPtrs(SB) + MOVD FIXED_FRAME+32(R1), R11 // restore R11 ctxt + MOVD R11, FIXED_FRAME+0(R1) // ctxt (arg0) + MOVD $argframe+0(FP), R3 // save arg to callArg + MOVD R3, FIXED_FRAME+8(R1) // frame (arg1) + ADD $LOCAL_RETVALID, R1, R3 // addr of return flag + MOVB R0, (R3) // clear flag + MOVD R3, FIXED_FRAME+16(R1) // addr retvalid (arg2) + ADD $LOCAL_REGARGS, R1, R3 + MOVD R3, FIXED_FRAME+24(R1) // abiregargs (arg3) + BL ·callReflect(SB) + ADD $LOCAL_REGARGS, R1, R20 // set address of spill area + CALL runtime·unspillArgs(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$336 + NO_LOCAL_POINTERS + // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this + // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID. + ADD $LOCAL_REGARGS, R1, R20 + CALL runtime·spillArgs(SB) + MOVD R11, FIXED_FRAME+0(R1) // arg0 ctxt + MOVD R11, FIXED_FRAME+32(R1) // save for later + MOVD R20, FIXED_FRAME+8(R1) // arg1 abiregargs + CALL ·moveMakeFuncArgPtrs(SB) + MOVD FIXED_FRAME+32(R1), R11 // restore ctxt + MOVD R11, FIXED_FRAME+0(R1) // set as arg0 + MOVD $argframe+0(FP), R3 // frame pointer + MOVD R3, FIXED_FRAME+8(R1) // set as arg1 + ADD $LOCAL_RETVALID, R1, R3 + MOVB $0, (R3) // clear ret flag + MOVD R3, FIXED_FRAME+16(R1) // addr of return flag + ADD $LOCAL_REGARGS, R1, R3 // addr of abiregargs + MOVD R3, FIXED_FRAME+24(R1) // set as arg3 + BL ·callMethod(SB) + ADD $LOCAL_REGARGS, R1, R20 + CALL runtime·unspillArgs(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_riscv64.s b/platform/dbops/binaries/go/go/src/reflect/asm_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..1200b4d08eac27f53c6458834f86e6783aa1790b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_riscv64.s @@ -0,0 +1,76 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. +#define LOCAL_RETVALID 40 +#define LOCAL_REGARGS 48 + +// The frame size of the functions below is +// 32 (args of callReflect/callMethod) + (8 bool with padding) + 392 (abi.RegArgs) = 432. + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + ADD $LOCAL_REGARGS, SP, X25 // spillArgs using X25 + CALL runtime·spillArgs(SB) + MOV CTXT, 32(SP) // save CTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS + MOV CTXT, 8(SP) + MOV X25, 16(SP) + CALL ·moveMakeFuncArgPtrs(SB) + MOV 32(SP), CTXT // restore CTXT + + MOV CTXT, 8(SP) + MOV $argframe+0(FP), T0 + MOV T0, 16(SP) + MOV ZERO, LOCAL_RETVALID(SP) + ADD $LOCAL_RETVALID, SP, T1 + MOV T1, 24(SP) + ADD $LOCAL_REGARGS, SP, T1 + MOV T1, 32(SP) + CALL ·callReflect(SB) + ADD $LOCAL_REGARGS, SP, X25 // unspillArgs using X25 + CALL runtime·unspillArgs(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432 + NO_LOCAL_POINTERS + ADD $LOCAL_REGARGS, SP, X25 // spillArgs using X25 + CALL runtime·spillArgs(SB) + MOV CTXT, 32(SP) // save CTXT + MOV CTXT, 8(SP) + MOV X25, 16(SP) + CALL ·moveMakeFuncArgPtrs(SB) + MOV 32(SP), CTXT // restore CTXT + MOV CTXT, 8(SP) + MOV $argframe+0(FP), T0 + MOV T0, 16(SP) + MOV ZERO, LOCAL_RETVALID(SP) + ADD $LOCAL_RETVALID, SP, T1 + MOV T1, 24(SP) + ADD $LOCAL_REGARGS, SP, T1 + MOV T1, 32(SP) // frame size to 32+SP as callreflect args + CALL ·callMethod(SB) + ADD $LOCAL_REGARGS, SP, X25 // unspillArgs using X25 + CALL runtime·unspillArgs(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_s390x.s b/platform/dbops/binaries/go/go/src/reflect/asm_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..4bd6613004f656e99430eacacbf5fec276d0eed5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_s390x.s @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here, runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + MOVD R12, 8(R15) + MOVD $argframe+0(FP), R3 + MOVD R3, 16(R15) + MOVB $0, 40(R15) + ADD $40, R15, R3 + MOVD R3, 24(R15) + MOVD $0, 32(R15) + BL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + MOVD R12, 8(R15) + MOVD $argframe+0(FP), R3 + MOVD R3, 16(R15) + MOVB $0, 40(R15) + ADD $40, R15, R3 + MOVD R3, 24(R15) + MOVD $0, 32(R15) + BL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/asm_wasm.s b/platform/dbops/binaries/go/go/src/reflect/asm_wasm.s new file mode 100644 index 0000000000000000000000000000000000000000..71abe6700e4998f541eb2d4ef7d19aa2b9d889af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/asm_wasm.s @@ -0,0 +1,52 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// makeFuncStub is the code half of the function returned by MakeFunc. +// See the comment on the declaration of makeFuncStub in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + + MOVD CTXT, 0(SP) + + Get SP + Get SP + I64ExtendI32U + I64Const $argframe+0(FP) + I64Add + I64Store $8 + + MOVB $0, 32(SP) + MOVD $32(SP), 16(SP) + MOVD $0, 24(SP) + + CALL ·callReflect(SB) + RET + +// methodValueCall is the code half of the function returned by makeMethodValue. +// See the comment on the declaration of methodValueCall in makefunc.go +// for more details. +// No arg size here; runtime pulls arg map out of the func value. +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 + NO_LOCAL_POINTERS + + MOVD CTXT, 0(SP) + + Get SP + Get SP + I64ExtendI32U + I64Const $argframe+0(FP) + I64Add + I64Store $8 + + MOVB $0, 32(SP) + MOVD $32(SP), 16(SP) + MOVD $0, 24(SP) + + CALL ·callMethod(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/reflect/benchmark_test.go b/platform/dbops/binaries/go/go/src/reflect/benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2e701b062edf6ff5bc48730959feacae3884e347 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/benchmark_test.go @@ -0,0 +1,428 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "fmt" + . "reflect" + "strconv" + "testing" +) + +var sourceAll = struct { + Bool Value + String Value + Bytes Value + NamedBytes Value + BytesArray Value + SliceAny Value + MapStringAny Value +}{ + Bool: ValueOf(new(bool)).Elem(), + String: ValueOf(new(string)).Elem(), + Bytes: ValueOf(new([]byte)).Elem(), + NamedBytes: ValueOf(new(namedBytes)).Elem(), + BytesArray: ValueOf(new([32]byte)).Elem(), + SliceAny: ValueOf(new([]any)).Elem(), + MapStringAny: ValueOf(new(map[string]any)).Elem(), +} + +var sinkAll struct { + RawBool bool + RawString string + RawBytes []byte + RawInt int +} + +func BenchmarkBool(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawBool = sourceAll.Bool.Bool() + } +} + +func BenchmarkString(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawString = sourceAll.String.String() + } +} + +func BenchmarkBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawBytes = sourceAll.Bytes.Bytes() + } +} + +func BenchmarkNamedBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawBytes = sourceAll.NamedBytes.Bytes() + } +} + +func BenchmarkBytesArray(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawBytes = sourceAll.BytesArray.Bytes() + } +} + +func BenchmarkSliceLen(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawInt = sourceAll.SliceAny.Len() + } +} + +func BenchmarkMapLen(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawInt = sourceAll.MapStringAny.Len() + } +} + +func BenchmarkStringLen(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawInt = sourceAll.String.Len() + } +} + +func BenchmarkArrayLen(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawInt = sourceAll.BytesArray.Len() + } +} + +func BenchmarkSliceCap(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkAll.RawInt = sourceAll.SliceAny.Cap() + } +} + +func BenchmarkDeepEqual(b *testing.B) { + for _, bb := range deepEqualPerfTests { + b.Run(ValueOf(bb.x).Type().String(), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + sink = DeepEqual(bb.x, bb.y) + } + }) + } +} + +func BenchmarkMapsDeepEqual(b *testing.B) { + m1 := map[int]int{ + 1: 1, 2: 2, + } + m2 := map[int]int{ + 1: 1, 2: 2, + } + for i := 0; i < b.N; i++ { + DeepEqual(m1, m2) + } +} + +func BenchmarkIsZero(b *testing.B) { + type Int4 struct { + a, b, c, d int + } + type Int1024 struct { + a [1024]int + } + type Int512 struct { + a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16 [16]S + } + s := struct { + ArrayComparable [4]T + ArrayIncomparable [4]_Complex + StructComparable T + StructIncomparable _Complex + ArrayInt_4 [4]int + ArrayInt_1024 [1024]int + ArrayInt_1024_NoZero [1024]int + Struct4Int Int4 + ArrayStruct4Int_1024 [256]Int4 + ArrayChanInt_1024 [1024]chan int + StructInt_512 Int512 + }{} + s.ArrayInt_1024_NoZero[512] = 1 + source := ValueOf(s) + + for i := 0; i < source.NumField(); i++ { + name := source.Type().Field(i).Name + value := source.Field(i) + b.Run(name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + sink = value.IsZero() + } + }) + } +} + +func BenchmarkSetZero(b *testing.B) { + source := ValueOf(new(struct { + Bool bool + Int int64 + Uint uint64 + Float float64 + Complex complex128 + Array [4]Value + Chan chan Value + Func func() Value + Interface interface{ String() string } + Map map[string]Value + Pointer *Value + Slice []Value + String string + Struct Value + })).Elem() + + for i := 0; i < source.NumField(); i++ { + name := source.Type().Field(i).Name + value := source.Field(i) + zero := Zero(value.Type()) + b.Run(name+"/Direct", func(b *testing.B) { + for i := 0; i < b.N; i++ { + value.SetZero() + } + }) + b.Run(name+"/CachedZero", func(b *testing.B) { + for i := 0; i < b.N; i++ { + value.Set(zero) + } + }) + b.Run(name+"/NewZero", func(b *testing.B) { + for i := 0; i < b.N; i++ { + value.Set(Zero(value.Type())) + } + }) + } +} + +func BenchmarkSelect(b *testing.B) { + channel := make(chan int) + close(channel) + var cases []SelectCase + for i := 0; i < 8; i++ { + cases = append(cases, SelectCase{ + Dir: SelectRecv, + Chan: ValueOf(channel), + }) + } + for _, numCases := range []int{1, 4, 8} { + b.Run(strconv.Itoa(numCases), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _, _ = Select(cases[:numCases]) + } + }) + } +} + +func BenchmarkCall(b *testing.B) { + fv := ValueOf(func(a, b string) {}) + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + args := []Value{ValueOf("a"), ValueOf("b")} + for pb.Next() { + fv.Call(args) + } + }) +} + +type myint int64 + +func (i *myint) inc() { + *i = *i + 1 +} + +func BenchmarkCallMethod(b *testing.B) { + b.ReportAllocs() + z := new(myint) + + v := ValueOf(z.inc) + for i := 0; i < b.N; i++ { + v.Call(nil) + } +} + +func BenchmarkCallArgCopy(b *testing.B) { + byteArray := func(n int) Value { + return Zero(ArrayOf(n, TypeOf(byte(0)))) + } + sizes := [...]struct { + fv Value + arg Value + }{ + {ValueOf(func(a [128]byte) {}), byteArray(128)}, + {ValueOf(func(a [256]byte) {}), byteArray(256)}, + {ValueOf(func(a [1024]byte) {}), byteArray(1024)}, + {ValueOf(func(a [4096]byte) {}), byteArray(4096)}, + {ValueOf(func(a [65536]byte) {}), byteArray(65536)}, + } + for _, size := range sizes { + bench := func(b *testing.B) { + args := []Value{size.arg} + b.SetBytes(int64(size.arg.Len())) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + size.fv.Call(args) + } + }) + } + name := fmt.Sprintf("size=%v", size.arg.Len()) + b.Run(name, bench) + } +} + +func BenchmarkPtrTo(b *testing.B) { + // Construct a type with a zero ptrToThis. + type T struct{ int } + t := SliceOf(TypeOf(T{})) + ptrToThis := ValueOf(t).Elem().FieldByName("PtrToThis") + if !ptrToThis.IsValid() { + b.Skipf("%v has no ptrToThis field; was it removed from rtype?", t) // TODO fix this at top of refactoring + // b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t) + } + if ptrToThis.Int() != 0 { + b.Fatalf("%v.ptrToThis unexpectedly nonzero", t) + } + b.ResetTimer() + + // Now benchmark calling PointerTo on it: we'll have to hit the ptrMap cache on + // every call. + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + PointerTo(t) + } + }) +} + +type B1 struct { + X int + Y int + Z int +} + +func BenchmarkFieldByName1(b *testing.B) { + t := TypeOf(B1{}) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + t.FieldByName("Z") + } + }) +} + +func BenchmarkFieldByName2(b *testing.B) { + t := TypeOf(S3{}) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + t.FieldByName("B") + } + }) +} + +func BenchmarkFieldByName3(b *testing.B) { + t := TypeOf(R0{}) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + t.FieldByName("X") + } + }) +} + +type S struct { + i1 int64 + i2 int64 +} + +func BenchmarkInterfaceBig(b *testing.B) { + v := ValueOf(S{}) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + v.Interface() + } + }) + b.StopTimer() +} + +func BenchmarkInterfaceSmall(b *testing.B) { + v := ValueOf(int64(0)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + v.Interface() + } + }) +} + +func BenchmarkNew(b *testing.B) { + v := TypeOf(XM{}) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + New(v) + } + }) +} + +func BenchmarkMap(b *testing.B) { + type V *int + type S string + value := ValueOf((V)(nil)) + stringKeys := []string{} + mapOfStrings := map[string]V{} + uint64Keys := []uint64{} + mapOfUint64s := map[uint64]V{} + userStringKeys := []S{} + mapOfUserStrings := map[S]V{} + for i := 0; i < 100; i++ { + stringKey := fmt.Sprintf("key%d", i) + stringKeys = append(stringKeys, stringKey) + mapOfStrings[stringKey] = nil + + uint64Key := uint64(i) + uint64Keys = append(uint64Keys, uint64Key) + mapOfUint64s[uint64Key] = nil + + userStringKey := S(fmt.Sprintf("key%d", i)) + userStringKeys = append(userStringKeys, userStringKey) + mapOfUserStrings[userStringKey] = nil + } + + tests := []struct { + label string + m, keys, value Value + }{ + {"StringKeys", ValueOf(mapOfStrings), ValueOf(stringKeys), value}, + {"Uint64Keys", ValueOf(mapOfUint64s), ValueOf(uint64Keys), value}, + {"UserStringKeys", ValueOf(mapOfUserStrings), ValueOf(userStringKeys), value}, + } + + for _, tt := range tests { + b.Run(tt.label, func(b *testing.B) { + b.Run("MapIndex", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + for j := tt.keys.Len() - 1; j >= 0; j-- { + tt.m.MapIndex(tt.keys.Index(j)) + } + } + }) + b.Run("SetMapIndex", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + for j := tt.keys.Len() - 1; j >= 0; j-- { + tt.m.SetMapIndex(tt.keys.Index(j), tt.value) + } + } + }) + }) + } +} + +func BenchmarkMapIterNext(b *testing.B) { + m := ValueOf(map[string]int{"a": 0, "b": 1, "c": 2, "d": 3}) + it := m.MapRange() + for i := 0; i < b.N; i++ { + for it.Next() { + } + it.Reset(m) + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/deepequal.go b/platform/dbops/binaries/go/go/src/reflect/deepequal.go new file mode 100644 index 0000000000000000000000000000000000000000..961e17011839bd58b72ec29f6a8a70d1a5b33307 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/deepequal.go @@ -0,0 +1,239 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Deep equality test via reflection + +package reflect + +import ( + "internal/bytealg" + "unsafe" +) + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 unsafe.Pointer + a2 unsafe.Pointer + typ Type +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool { + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + + // We want to avoid putting more in the visited map than we need to. + // For any possible reference cycle that might be encountered, + // hard(v1, v2) needs to return true for at least one of the types in the cycle, + // and it's safe and valid to get Value's internal pointer. + hard := func(v1, v2 Value) bool { + switch v1.Kind() { + case Pointer: + if v1.typ().PtrBytes == 0 { + // not-in-heap pointers can't be cyclic. + // At least, all of our current uses of runtime/internal/sys.NotInHeap + // have that property. The runtime ones aren't cyclic (and we don't use + // DeepEqual on them anyway), and the cgo-generated ones are + // all empty structs. + return false + } + fallthrough + case Map, Slice, Interface: + // Nil pointers cannot be cyclic. Avoid putting them in the visited map. + return !v1.IsNil() && !v2.IsNil() + } + return false + } + + if hard(v1, v2) { + // For a Pointer or Map value, we need to check flagIndir, + // which we do by calling the pointer method. + // For Slice or Interface, flagIndir is always set, + // and using v.ptr suffices. + ptrval := func(v Value) unsafe.Pointer { + switch v.Kind() { + case Pointer, Map: + return v.pointer() + default: + return v.ptr + } + } + addr1 := ptrval(v1) + addr2 := ptrval(v2) + if uintptr(addr1) > uintptr(addr2) { + // Canonicalize order to reduce number of entries in visited. + // Assumes non-moving garbage collector. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are already seen. + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case Array: + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited) { + return false + } + } + return true + case Slice: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.UnsafePointer() == v2.UnsafePointer() { + return true + } + // Special case for []byte, which is common. + if v1.Type().Elem().Kind() == Uint8 { + return bytealg.Equal(v1.Bytes(), v2.Bytes()) + } + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited) { + return false + } + } + return true + case Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return deepValueEqual(v1.Elem(), v2.Elem(), visited) + case Pointer: + if v1.UnsafePointer() == v2.UnsafePointer() { + return true + } + return deepValueEqual(v1.Elem(), v2.Elem(), visited) + case Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !deepValueEqual(v1.Field(i), v2.Field(i), visited) { + return false + } + } + return true + case Map: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.UnsafePointer() == v2.UnsafePointer() { + return true + } + iter := v1.MapRange() + for iter.Next() { + val1 := iter.Value() + val2 := v2.MapIndex(iter.Key()) + if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) { + return false + } + } + return true + case Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + case Int, Int8, Int16, Int32, Int64: + return v1.Int() == v2.Int() + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return v1.Uint() == v2.Uint() + case String: + return v1.String() == v2.String() + case Bool: + return v1.Bool() == v2.Bool() + case Float32, Float64: + return v1.Float() == v2.Float() + case Complex64, Complex128: + return v1.Complex() == v2.Complex() + default: + // Normal equality suffices + return valueInterface(v1, false) == valueInterface(v2, false) + } +} + +// DeepEqual reports whether x and y are “deeply equal,” defined as follows. +// Two values of identical type are deeply equal if one of the following cases applies. +// Values of distinct types are never deeply equal. +// +// Array values are deeply equal when their corresponding elements are deeply equal. +// +// Struct values are deeply equal if their corresponding fields, +// both exported and unexported, are deeply equal. +// +// Func values are deeply equal if both are nil; otherwise they are not deeply equal. +// +// Interface values are deeply equal if they hold deeply equal concrete values. +// +// Map values are deeply equal when all of the following are true: +// they are both nil or both non-nil, they have the same length, +// and either they are the same map object or their corresponding keys +// (matched using Go equality) map to deeply equal values. +// +// Pointer values are deeply equal if they are equal using Go's == operator +// or if they point to deeply equal values. +// +// Slice values are deeply equal when all of the following are true: +// they are both nil or both non-nil, they have the same length, +// and either they point to the same initial entry of the same underlying array +// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal. +// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil)) +// are not deeply equal. +// +// Other values - numbers, bools, strings, and channels - are deeply equal +// if they are equal using Go's == operator. +// +// In general DeepEqual is a recursive relaxation of Go's == operator. +// However, this idea is impossible to implement without some inconsistency. +// Specifically, it is possible for a value to be unequal to itself, +// either because it is of func type (uncomparable in general) +// or because it is a floating-point NaN value (not equal to itself in floating-point comparison), +// or because it is an array, struct, or interface containing +// such a value. +// On the other hand, pointer values are always equal to themselves, +// even if they point at or contain such problematic values, +// because they compare equal using Go's == operator, and that +// is a sufficient condition to be deeply equal, regardless of content. +// DeepEqual has been defined so that the same short-cut applies +// to slices and maps: if x and y are the same slice or the same map, +// they are deeply equal regardless of content. +// +// As DeepEqual traverses the data values it may find a cycle. The +// second and subsequent times that DeepEqual compares two pointer +// values that have been compared before, it treats the values as +// equal rather than examining the values to which they point. +// This ensures that DeepEqual terminates. +func DeepEqual(x, y any) bool { + if x == nil || y == nil { + return x == y + } + v1 := ValueOf(x) + v2 := ValueOf(y) + if v1.Type() != v2.Type() { + return false + } + return deepValueEqual(v1, v2, make(map[visit]bool)) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/example_test.go b/platform/dbops/binaries/go/go/src/reflect/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b4f3b2932f78c368492d5d46d86319776676ce77 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/example_test.go @@ -0,0 +1,209 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "reflect" +) + +func ExampleKind() { + for _, v := range []any{"hi", 42, func() {}} { + switch v := reflect.ValueOf(v); v.Kind() { + case reflect.String: + fmt.Println(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Println(v.Int()) + default: + fmt.Printf("unhandled kind %s", v.Kind()) + } + } + + // Output: + // hi + // 42 + // unhandled kind func +} + +func ExampleMakeFunc() { + // swap is the implementation passed to MakeFunc. + // It must work in terms of reflect.Values so that it is possible + // to write code without knowing beforehand what the types + // will be. + swap := func(in []reflect.Value) []reflect.Value { + return []reflect.Value{in[1], in[0]} + } + + // makeSwap expects fptr to be a pointer to a nil function. + // It sets that pointer to a new function created with MakeFunc. + // When the function is invoked, reflect turns the arguments + // into Values, calls swap, and then turns swap's result slice + // into the values returned by the new function. + makeSwap := func(fptr any) { + // fptr is a pointer to a function. + // Obtain the function value itself (likely nil) as a reflect.Value + // so that we can query its type and then set the value. + fn := reflect.ValueOf(fptr).Elem() + + // Make a function of the right type. + v := reflect.MakeFunc(fn.Type(), swap) + + // Assign it to the value fn represents. + fn.Set(v) + } + + // Make and call a swap function for ints. + var intSwap func(int, int) (int, int) + makeSwap(&intSwap) + fmt.Println(intSwap(0, 1)) + + // Make and call a swap function for float64s. + var floatSwap func(float64, float64) (float64, float64) + makeSwap(&floatSwap) + fmt.Println(floatSwap(2.72, 3.14)) + + // Output: + // 1 0 + // 3.14 2.72 +} + +func ExampleStructTag() { + type S struct { + F string `species:"gopher" color:"blue"` + } + + s := S{} + st := reflect.TypeOf(s) + field := st.Field(0) + fmt.Println(field.Tag.Get("color"), field.Tag.Get("species")) + + // Output: + // blue gopher +} + +func ExampleStructTag_Lookup() { + type S struct { + F0 string `alias:"field_0"` + F1 string `alias:""` + F2 string + } + + s := S{} + st := reflect.TypeOf(s) + for i := 0; i < st.NumField(); i++ { + field := st.Field(i) + if alias, ok := field.Tag.Lookup("alias"); ok { + if alias == "" { + fmt.Println("(blank)") + } else { + fmt.Println(alias) + } + } else { + fmt.Println("(not specified)") + } + } + + // Output: + // field_0 + // (blank) + // (not specified) +} + +func ExampleTypeOf() { + // As interface types are only used for static typing, a + // common idiom to find the reflection Type for an interface + // type Foo is to use a *Foo value. + writerType := reflect.TypeOf((*io.Writer)(nil)).Elem() + + fileType := reflect.TypeOf((*os.File)(nil)) + fmt.Println(fileType.Implements(writerType)) + + // Output: + // true +} + +func ExampleStructOf() { + typ := reflect.StructOf([]reflect.StructField{ + { + Name: "Height", + Type: reflect.TypeOf(float64(0)), + Tag: `json:"height"`, + }, + { + Name: "Age", + Type: reflect.TypeOf(int(0)), + Tag: `json:"age"`, + }, + }) + + v := reflect.New(typ).Elem() + v.Field(0).SetFloat(0.4) + v.Field(1).SetInt(2) + s := v.Addr().Interface() + + w := new(bytes.Buffer) + if err := json.NewEncoder(w).Encode(s); err != nil { + panic(err) + } + + fmt.Printf("value: %+v\n", s) + fmt.Printf("json: %s", w.Bytes()) + + r := bytes.NewReader([]byte(`{"height":1.5,"age":10}`)) + if err := json.NewDecoder(r).Decode(s); err != nil { + panic(err) + } + fmt.Printf("value: %+v\n", s) + + // Output: + // value: &{Height:0.4 Age:2} + // json: {"height":0.4,"age":2} + // value: &{Height:1.5 Age:10} +} + +func ExampleValue_FieldByIndex() { + // This example shows a case in which the name of a promoted field + // is hidden by another field: FieldByName will not work, so + // FieldByIndex must be used instead. + type user struct { + firstName string + lastName string + } + + type data struct { + user + firstName string + lastName string + } + + u := data{ + user: user{"Embedded John", "Embedded Doe"}, + firstName: "John", + lastName: "Doe", + } + + s := reflect.ValueOf(u).FieldByIndex([]int{0, 1}) + fmt.Println("embedded last name:", s) + + // Output: + // embedded last name: Embedded Doe +} + +func ExampleValue_FieldByName() { + type user struct { + firstName string + lastName string + } + u := user{firstName: "John", lastName: "Doe"} + s := reflect.ValueOf(u) + + fmt.Println("Name:", s.FieldByName("firstName")) + // Output: + // Name: John +} diff --git a/platform/dbops/binaries/go/go/src/reflect/export_test.go b/platform/dbops/binaries/go/go/src/reflect/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1648eb362490ce090d0f694063fc9ab64ec8adad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/export_test.go @@ -0,0 +1,168 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "internal/abi" + "internal/goarch" + "sync" + "unsafe" +) + +// MakeRO returns a copy of v with the read-only flag set. +func MakeRO(v Value) Value { + v.flag |= flagStickyRO + return v +} + +// IsRO reports whether v's read-only flag is set. +func IsRO(v Value) bool { + return v.flag&flagStickyRO != 0 +} + +var CallGC = &callGC + +// FuncLayout calls funcLayout and returns a subset of the results for testing. +// +// Bitmaps like stack, gc, inReg, and outReg are expanded such that each bit +// takes up one byte, so that writing out test cases is a little clearer. +// If ptrs is false, gc will be nil. +func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack, gc, inReg, outReg []byte, ptrs bool) { + var ft *abi.Type + var abid abiDesc + if rcvr != nil { + ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.common())), rcvr.common()) + } else { + ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) + } + // Extract size information. + argSize = abid.stackCallArgsSize + retOffset = abid.retOffset + frametype = toType(ft) + + // Expand stack pointer bitmap into byte-map. + for i := uint32(0); i < abid.stackPtrs.n; i++ { + stack = append(stack, abid.stackPtrs.data[i/8]>>(i%8)&1) + } + + // Expand register pointer bitmaps into byte-maps. + bool2byte := func(b bool) byte { + if b { + return 1 + } + return 0 + } + for i := 0; i < intArgRegs; i++ { + inReg = append(inReg, bool2byte(abid.inRegPtrs.Get(i))) + outReg = append(outReg, bool2byte(abid.outRegPtrs.Get(i))) + } + if ft.Kind_&kindGCProg != 0 { + panic("can't handle gc programs") + } + + // Expand frame type's GC bitmap into byte-map. + ptrs = ft.PtrBytes != 0 + if ptrs { + nptrs := ft.PtrBytes / goarch.PtrSize + gcdata := ft.GcSlice(0, (nptrs+7)/8) + for i := uintptr(0); i < nptrs; i++ { + gc = append(gc, gcdata[i/8]>>(i%8)&1) + } + } + return +} + +func TypeLinks() []string { + var r []string + sections, offset := typelinks() + for i, offs := range offset { + rodata := sections[i] + for _, off := range offs { + typ := (*rtype)(resolveTypeOff(rodata, off)) + r = append(r, typ.String()) + } + } + return r +} + +var GCBits = gcbits + +func gcbits(any) []byte // provided by runtime + +func MapBucketOf(x, y Type) Type { + return toType(bucketOf(x.common(), y.common())) +} + +func CachedBucketOf(m Type) Type { + t := m.(*rtype) + if Kind(t.t.Kind_&kindMask) != Map { + panic("not map") + } + tt := (*mapType)(unsafe.Pointer(t)) + return toType(tt.Bucket) +} + +type EmbedWithUnexpMeth struct{} + +func (EmbedWithUnexpMeth) f() {} + +type pinUnexpMeth interface { + f() +} + +var pinUnexpMethI = pinUnexpMeth(EmbedWithUnexpMeth{}) + +func FirstMethodNameBytes(t Type) *byte { + _ = pinUnexpMethI + + ut := t.uncommon() + if ut == nil { + panic("type has no methods") + } + m := ut.Methods()[0] + mname := t.(*rtype).nameOff(m.Name) + if *mname.DataChecked(0, "name flag field")&(1<<2) == 0 { + panic("method name does not have pkgPath *string") + } + return mname.Bytes +} + +type OtherPkgFields struct { + OtherExported int + otherUnexported int +} + +func IsExported(t Type) bool { + typ := t.(*rtype) + n := typ.nameOff(typ.t.Str) + return n.IsExported() +} + +func ResolveReflectName(s string) { + resolveReflectName(newName(s, "", false, false)) +} + +type Buffer struct { + buf []byte +} + +func clearLayoutCache() { + layoutCache = sync.Map{} +} + +func SetArgRegs(ints, floats int, floatSize uintptr) (oldInts, oldFloats int, oldFloatSize uintptr) { + oldInts = intArgRegs + oldFloats = floatArgRegs + oldFloatSize = floatRegSize + intArgRegs = ints + floatArgRegs = floats + floatRegSize = floatSize + clearLayoutCache() + return +} + +var MethodValueCallCodePtr = methodValueCallCodePtr + +var InternalIsZero = isZero diff --git a/platform/dbops/binaries/go/go/src/reflect/float32reg_generic.go b/platform/dbops/binaries/go/go/src/reflect/float32reg_generic.go new file mode 100644 index 0000000000000000000000000000000000000000..23ad4bf285b24873342e5e565228d2c66a08203f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/float32reg_generic.go @@ -0,0 +1,23 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !ppc64 && !ppc64le && !riscv64 + +package reflect + +import "unsafe" + +// This file implements a straightforward conversion of a float32 +// value into its representation in a register. This conversion +// applies for amd64 and arm64. It is also chosen for the case of +// zero argument registers, but is not used. + +func archFloat32FromReg(reg uint64) float32 { + i := uint32(reg) + return *(*float32)(unsafe.Pointer(&i)) +} + +func archFloat32ToReg(val float32) uint64 { + return uint64(*(*uint32)(unsafe.Pointer(&val))) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/float32reg_ppc64x.s b/platform/dbops/binaries/go/go/src/reflect/float32reg_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..a4deb18427af0f81441a2e247fe7304facc471b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/float32reg_ppc64x.s @@ -0,0 +1,30 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +// On PPC64, the float32 becomes a float64 +// when loaded in a register, different from +// other platforms. These functions are +// needed to ensure correct conversions on PPC64. + +// Convert float32->uint64 +TEXT ·archFloat32ToReg(SB),NOSPLIT,$0-16 + FMOVS val+0(FP), F1 + FMOVD F1, ret+8(FP) + RET + +// Convert uint64->float32 +TEXT ·archFloat32FromReg(SB),NOSPLIT,$0-12 + FMOVD reg+0(FP), F1 + // Normally a float64->float32 conversion + // would need rounding, but that is not needed + // here since the uint64 was originally converted + // from float32, and should be avoided to + // preserve SNaN values. + FMOVS F1, ret+8(FP) + RET + diff --git a/platform/dbops/binaries/go/go/src/reflect/float32reg_riscv64.s b/platform/dbops/binaries/go/go/src/reflect/float32reg_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..8fcf3ba3e92f455f9b15761ceaad0727ab99d0de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/float32reg_riscv64.s @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// riscv64 allows 32-bit floats to live in the bottom +// part of the register, it expects them to be NaN-boxed. +// These functions are needed to ensure correct conversions +// on riscv64. + +// Convert float32->uint64 +TEXT ·archFloat32ToReg(SB),NOSPLIT,$0-16 + MOVF val+0(FP), F1 + MOVD F1, ret+8(FP) + RET + +// Convert uint64->float32 +TEXT ·archFloat32FromReg(SB),NOSPLIT,$0-12 + // Normally a float64->float32 conversion + // would need rounding, but riscv64 store valid + // float32 in the lower 32 bits, thus we only need to + // unboxed the NaN-box by store a float32. + MOVD reg+0(FP), F1 + MOVF F1, ret+8(FP) + RET + diff --git a/platform/dbops/binaries/go/go/src/reflect/makefunc.go b/platform/dbops/binaries/go/go/src/reflect/makefunc.go new file mode 100644 index 0000000000000000000000000000000000000000..2ed7f3890588f4e10e80f2f2b9bd8c35e4c139e5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/makefunc.go @@ -0,0 +1,176 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// MakeFunc implementation. + +package reflect + +import ( + "internal/abi" + "unsafe" +) + +// makeFuncImpl is the closure value implementing the function +// returned by MakeFunc. +// The first three words of this type must be kept in sync with +// methodValue and runtime.reflectMethodValue. +// Any changes should be reflected in all three. +type makeFuncImpl struct { + makeFuncCtxt + ftyp *funcType + fn func([]Value) []Value +} + +// MakeFunc returns a new function of the given Type +// that wraps the function fn. When called, that new function +// does the following: +// +// - converts its arguments to a slice of Values. +// - runs results := fn(args). +// - returns the results as a slice of Values, one per formal result. +// +// The implementation fn can assume that the argument Value slice +// has the number and type of arguments given by typ. +// If typ describes a variadic function, the final Value is itself +// a slice representing the variadic arguments, as in the +// body of a variadic function. The result Value slice returned by fn +// must have the number and type of results given by typ. +// +// The Value.Call method allows the caller to invoke a typed function +// in terms of Values; in contrast, MakeFunc allows the caller to implement +// a typed function in terms of Values. +// +// The Examples section of the documentation includes an illustration +// of how to use MakeFunc to build a swap function for different types. +func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value { + if typ.Kind() != Func { + panic("reflect: call of MakeFunc with non-Func type") + } + + t := typ.common() + ftyp := (*funcType)(unsafe.Pointer(t)) + + code := abi.FuncPCABI0(makeFuncStub) + + // makeFuncImpl contains a stack map for use by the runtime + _, _, abid := funcLayout(ftyp, nil) + + impl := &makeFuncImpl{ + makeFuncCtxt: makeFuncCtxt{ + fn: code, + stack: abid.stackPtrs, + argLen: abid.stackCallArgsSize, + regPtrs: abid.inRegPtrs, + }, + ftyp: ftyp, + fn: fn, + } + + return Value{t, unsafe.Pointer(impl), flag(Func)} +} + +// makeFuncStub is an assembly function that is the code half of +// the function returned from MakeFunc. It expects a *callReflectFunc +// as its context register, and its job is to invoke callReflect(ctxt, frame) +// where ctxt is the context register and frame is a pointer to the first +// word in the passed-in argument frame. +func makeFuncStub() + +// The first 3 words of this type must be kept in sync with +// makeFuncImpl and runtime.reflectMethodValue. +// Any changes should be reflected in all three. +type methodValue struct { + makeFuncCtxt + method int + rcvr Value +} + +// makeMethodValue converts v from the rcvr+method index representation +// of a method value to an actual method func value, which is +// basically the receiver value with a special bit set, into a true +// func value - a value holding an actual func. The output is +// semantically equivalent to the input as far as the user of package +// reflect can tell, but the true func representation can be handled +// by code like Convert and Interface and Assign. +func makeMethodValue(op string, v Value) Value { + if v.flag&flagMethod == 0 { + panic("reflect: internal error: invalid use of makeMethodValue") + } + + // Ignoring the flagMethod bit, v describes the receiver, not the method type. + fl := v.flag & (flagRO | flagAddr | flagIndir) + fl |= flag(v.typ().Kind()) + rcvr := Value{v.typ(), v.ptr, fl} + + // v.Type returns the actual type of the method value. + ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype))) + + code := methodValueCallCodePtr() + + // methodValue contains a stack map for use by the runtime + _, _, abid := funcLayout(ftyp, nil) + fv := &methodValue{ + makeFuncCtxt: makeFuncCtxt{ + fn: code, + stack: abid.stackPtrs, + argLen: abid.stackCallArgsSize, + regPtrs: abid.inRegPtrs, + }, + method: int(v.flag) >> flagMethodShift, + rcvr: rcvr, + } + + // Cause panic if method is not appropriate. + // The panic would still happen during the call if we omit this, + // but we want Interface() and other operations to fail early. + methodReceiver(op, fv.rcvr, fv.method) + + return Value{ftyp.Common(), unsafe.Pointer(fv), v.flag&flagRO | flag(Func)} +} + +func methodValueCallCodePtr() uintptr { + return abi.FuncPCABI0(methodValueCall) +} + +// methodValueCall is an assembly function that is the code half of +// the function returned from makeMethodValue. It expects a *methodValue +// as its context register, and its job is to invoke callMethod(ctxt, frame) +// where ctxt is the context register and frame is a pointer to the first +// word in the passed-in argument frame. +func methodValueCall() + +// This structure must be kept in sync with runtime.reflectMethodValue. +// Any changes should be reflected in all both. +type makeFuncCtxt struct { + fn uintptr + stack *bitVector // ptrmap for both stack args and results + argLen uintptr // just args + regPtrs abi.IntArgRegBitmap +} + +// moveMakeFuncArgPtrs uses ctxt.regPtrs to copy integer pointer arguments +// in args.Ints to args.Ptrs where the GC can see them. +// +// This is similar to what reflectcallmove does in the runtime, except +// that happens on the return path, whereas this happens on the call path. +// +// nosplit because pointers are being held in uintptr slots in args, so +// having our stack scanned now could lead to accidentally freeing +// memory. +// +//go:nosplit +func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) { + for i, arg := range args.Ints { + // Avoid write barriers! Because our write barrier enqueues what + // was there before, we might enqueue garbage. + if ctxt.regPtrs.Get(i) { + *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg + } else { + // We *must* zero this space ourselves because it's defined in + // assembly code and the GC will scan these pointers. Otherwise, + // there will be garbage here. + *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0 + } + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/nih_test.go b/platform/dbops/binaries/go/go/src/reflect/nih_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f503939299f6a50c7da72b659b00dc6b84cb268c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/nih_test.go @@ -0,0 +1,38 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package reflect_test + +import ( + . "reflect" + "runtime/cgo" + "testing" + "unsafe" +) + +type nih struct { + _ cgo.Incomplete + x int +} + +var global_nih = nih{x: 7} + +func TestNotInHeapDeref(t *testing.T) { + // See issue 48399. + v := ValueOf((*nih)(nil)) + v.Elem() + shouldPanic("reflect: call of reflect.Value.Field on zero Value", func() { v.Elem().Field(0) }) + + v = ValueOf(&global_nih) + if got := v.Elem().Field(1).Int(); got != 7 { + t.Fatalf("got %d, want 7", got) + } + + v = ValueOf((*nih)(unsafe.Pointer(new(int)))) + shouldPanic("reflect: reflect.Value.Elem on an invalid notinheap pointer", func() { v.Elem() }) + shouldPanic("reflect: reflect.Value.Pointer on an invalid notinheap pointer", func() { v.Pointer() }) + shouldPanic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer", func() { v.UnsafePointer() }) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/set_test.go b/platform/dbops/binaries/go/go/src/reflect/set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..028c051cfbd5fbb03c925fa8a2a68433a1356e52 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/set_test.go @@ -0,0 +1,227 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "bytes" + "go/ast" + "go/token" + "io" + . "reflect" + "strings" + "testing" + "unsafe" +) + +func TestImplicitMapConversion(t *testing.T) { + // Test implicit conversions in MapIndex and SetMapIndex. + { + // direct + m := make(map[int]int) + mv := ValueOf(m) + mv.SetMapIndex(ValueOf(1), ValueOf(2)) + x, ok := m[1] + if x != 2 { + t.Errorf("#1 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m) + } + if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 { + t.Errorf("#1 MapIndex(1) = %d", n) + } + } + { + // convert interface key + m := make(map[any]int) + mv := ValueOf(m) + mv.SetMapIndex(ValueOf(1), ValueOf(2)) + x, ok := m[1] + if x != 2 { + t.Errorf("#2 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m) + } + if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 { + t.Errorf("#2 MapIndex(1) = %d", n) + } + } + { + // convert interface value + m := make(map[int]any) + mv := ValueOf(m) + mv.SetMapIndex(ValueOf(1), ValueOf(2)) + x, ok := m[1] + if x != 2 { + t.Errorf("#3 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m) + } + if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 { + t.Errorf("#3 MapIndex(1) = %d", n) + } + } + { + // convert both interface key and interface value + m := make(map[any]any) + mv := ValueOf(m) + mv.SetMapIndex(ValueOf(1), ValueOf(2)) + x, ok := m[1] + if x != 2 { + t.Errorf("#4 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m) + } + if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 { + t.Errorf("#4 MapIndex(1) = %d", n) + } + } + { + // convert both, with non-empty interfaces + m := make(map[io.Reader]io.Writer) + mv := ValueOf(m) + b1 := new(bytes.Buffer) + b2 := new(bytes.Buffer) + mv.SetMapIndex(ValueOf(b1), ValueOf(b2)) + x, ok := m[b1] + if x != b2 { + t.Errorf("#5 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m) + } + if p := mv.MapIndex(ValueOf(b1)).Elem().UnsafePointer(); p != unsafe.Pointer(b2) { + t.Errorf("#5 MapIndex(b1) = %#x want %p", p, b2) + } + } + { + // convert channel direction + m := make(map[<-chan int]chan int) + mv := ValueOf(m) + c1 := make(chan int) + c2 := make(chan int) + mv.SetMapIndex(ValueOf(c1), ValueOf(c2)) + x, ok := m[c1] + if x != c2 { + t.Errorf("#6 after SetMapIndex(c1, c2): %p (!= %p), %t (map=%v)", x, c2, ok, m) + } + if p := mv.MapIndex(ValueOf(c1)).UnsafePointer(); p != ValueOf(c2).UnsafePointer() { + t.Errorf("#6 MapIndex(c1) = %#x want %p", p, c2) + } + } + { + // convert identical underlying types + type MyBuffer bytes.Buffer + m := make(map[*MyBuffer]*bytes.Buffer) + mv := ValueOf(m) + b1 := new(MyBuffer) + b2 := new(bytes.Buffer) + mv.SetMapIndex(ValueOf(b1), ValueOf(b2)) + x, ok := m[b1] + if x != b2 { + t.Errorf("#7 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m) + } + if p := mv.MapIndex(ValueOf(b1)).UnsafePointer(); p != unsafe.Pointer(b2) { + t.Errorf("#7 MapIndex(b1) = %#x want %p", p, b2) + } + } + +} + +func TestImplicitSetConversion(t *testing.T) { + // Assume TestImplicitMapConversion covered the basics. + // Just make sure conversions are being applied at all. + var r io.Reader + b := new(bytes.Buffer) + rv := ValueOf(&r).Elem() + rv.Set(ValueOf(b)) + if r != b { + t.Errorf("after Set: r=%T(%v)", r, r) + } +} + +func TestImplicitSendConversion(t *testing.T) { + c := make(chan io.Reader, 10) + b := new(bytes.Buffer) + ValueOf(c).Send(ValueOf(b)) + if bb := <-c; bb != b { + t.Errorf("Received %p != %p", bb, b) + } +} + +func TestImplicitCallConversion(t *testing.T) { + // Arguments must be assignable to parameter types. + fv := ValueOf(io.WriteString) + b := new(strings.Builder) + fv.Call([]Value{ValueOf(b), ValueOf("hello world")}) + if b.String() != "hello world" { + t.Errorf("After call: string=%q want %q", b.String(), "hello world") + } +} + +func TestImplicitAppendConversion(t *testing.T) { + // Arguments must be assignable to the slice's element type. + s := []io.Reader{} + sv := ValueOf(&s).Elem() + b := new(bytes.Buffer) + sv.Set(Append(sv, ValueOf(b))) + if len(s) != 1 || s[0] != b { + t.Errorf("after append: s=%v want [%p]", s, b) + } +} + +var implementsTests = []struct { + x any + t any + b bool +}{ + {new(*bytes.Buffer), new(io.Reader), true}, + {new(bytes.Buffer), new(io.Reader), false}, + {new(*bytes.Buffer), new(io.ReaderAt), false}, + {new(*ast.Ident), new(ast.Expr), true}, + {new(*notAnExpr), new(ast.Expr), false}, + {new(*ast.Ident), new(notASTExpr), false}, + {new(notASTExpr), new(ast.Expr), false}, + {new(ast.Expr), new(notASTExpr), false}, + {new(*notAnExpr), new(notASTExpr), true}, +} + +type notAnExpr struct{} + +func (notAnExpr) Pos() token.Pos { return token.NoPos } +func (notAnExpr) End() token.Pos { return token.NoPos } +func (notAnExpr) exprNode() {} + +type notASTExpr interface { + Pos() token.Pos + End() token.Pos + exprNode() +} + +func TestImplements(t *testing.T) { + for _, tt := range implementsTests { + xv := TypeOf(tt.x).Elem() + xt := TypeOf(tt.t).Elem() + if b := xv.Implements(xt); b != tt.b { + t.Errorf("(%s).Implements(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b) + } + } +} + +var assignableTests = []struct { + x any + t any + b bool +}{ + {new(chan int), new(<-chan int), true}, + {new(<-chan int), new(chan int), false}, + {new(*int), new(IntPtr), true}, + {new(IntPtr), new(*int), true}, + {new(IntPtr), new(IntPtr1), false}, + {new(Ch), new(<-chan any), true}, + // test runs implementsTests too +} + +type IntPtr *int +type IntPtr1 *int +type Ch <-chan any + +func TestAssignableTo(t *testing.T) { + for _, tt := range append(assignableTests, implementsTests...) { + xv := TypeOf(tt.x).Elem() + xt := TypeOf(tt.t).Elem() + if b := xv.AssignableTo(xt); b != tt.b { + t.Errorf("(%s).AssignableTo(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/stubs_ppc64x.go b/platform/dbops/binaries/go/go/src/reflect/stubs_ppc64x.go new file mode 100644 index 0000000000000000000000000000000000000000..06c8bf5483bbe5baeee34a05c557e8c47613e1a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/stubs_ppc64x.go @@ -0,0 +1,10 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64le || ppc64 + +package reflect + +func archFloat32FromReg(reg uint64) float32 +func archFloat32ToReg(val float32) uint64 diff --git a/platform/dbops/binaries/go/go/src/reflect/stubs_riscv64.go b/platform/dbops/binaries/go/go/src/reflect/stubs_riscv64.go new file mode 100644 index 0000000000000000000000000000000000000000..a72ebab9706f02fad021dd300333c74d9d1b7229 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/stubs_riscv64.go @@ -0,0 +1,8 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +func archFloat32FromReg(reg uint64) float32 +func archFloat32ToReg(val float32) uint64 diff --git a/platform/dbops/binaries/go/go/src/reflect/swapper.go b/platform/dbops/binaries/go/go/src/reflect/swapper.go new file mode 100644 index 0000000000000000000000000000000000000000..1e8f4ed16364892a5eb342feeb11dac7de664f5d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/swapper.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "internal/abi" + "internal/goarch" + "internal/unsafeheader" + "unsafe" +) + +// Swapper returns a function that swaps the elements in the provided +// slice. +// +// Swapper panics if the provided interface is not a slice. +func Swapper(slice any) func(i, j int) { + v := ValueOf(slice) + if v.Kind() != Slice { + panic(&ValueError{Method: "Swapper", Kind: v.Kind()}) + } + // Fast path for slices of size 0 and 1. Nothing to swap. + switch v.Len() { + case 0: + return func(i, j int) { panic("reflect: slice index out of range") } + case 1: + return func(i, j int) { + if i != 0 || j != 0 { + panic("reflect: slice index out of range") + } + } + } + + typ := v.Type().Elem().common() + size := typ.Size() + hasPtr := typ.PtrBytes != 0 + + // Some common & small cases, without using memmove: + if hasPtr { + if size == goarch.PtrSize { + ps := *(*[]unsafe.Pointer)(v.ptr) + return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] } + } + if typ.Kind() == abi.String { + ss := *(*[]string)(v.ptr) + return func(i, j int) { ss[i], ss[j] = ss[j], ss[i] } + } + } else { + switch size { + case 8: + is := *(*[]int64)(v.ptr) + return func(i, j int) { is[i], is[j] = is[j], is[i] } + case 4: + is := *(*[]int32)(v.ptr) + return func(i, j int) { is[i], is[j] = is[j], is[i] } + case 2: + is := *(*[]int16)(v.ptr) + return func(i, j int) { is[i], is[j] = is[j], is[i] } + case 1: + is := *(*[]int8)(v.ptr) + return func(i, j int) { is[i], is[j] = is[j], is[i] } + } + } + + s := (*unsafeheader.Slice)(v.ptr) + tmp := unsafe_New(typ) // swap scratch space + + return func(i, j int) { + if uint(i) >= uint(s.Len) || uint(j) >= uint(s.Len) { + panic("reflect: slice index out of range") + } + val1 := arrayAt(s.Data, i, size, "i < s.Len") + val2 := arrayAt(s.Data, j, size, "j < s.Len") + typedmemmove(typ, tmp, val1) + typedmemmove(typ, val1, val2) + typedmemmove(typ, val2, tmp) + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/tostring_test.go b/platform/dbops/binaries/go/go/src/reflect/tostring_test.go new file mode 100644 index 0000000000000000000000000000000000000000..193484a01bc4eb9548d8510db8a91bd92d367243 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/tostring_test.go @@ -0,0 +1,95 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Formatting of reflection types and values for debugging. +// Not defined as methods so they do not need to be linked into most binaries; +// the functions are not used by the library itself, only in tests. + +package reflect_test + +import ( + . "reflect" + "strconv" +) + +// valueToString returns a textual representation of the reflection value val. +// For debugging only. +func valueToString(val Value) string { + var str string + if !val.IsValid() { + return "" + } + typ := val.Type() + switch val.Kind() { + case Int, Int8, Int16, Int32, Int64: + return strconv.FormatInt(val.Int(), 10) + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return strconv.FormatUint(val.Uint(), 10) + case Float32, Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, 64) + case Complex64, Complex128: + c := val.Complex() + return strconv.FormatFloat(real(c), 'g', -1, 64) + "+" + strconv.FormatFloat(imag(c), 'g', -1, 64) + "i" + case String: + return val.String() + case Bool: + if val.Bool() { + return "true" + } else { + return "false" + } + case Pointer: + v := val + str = typ.String() + "(" + if v.IsNil() { + str += "0" + } else { + str += "&" + valueToString(v.Elem()) + } + str += ")" + return str + case Array, Slice: + v := val + str += typ.String() + str += "{" + for i := 0; i < v.Len(); i++ { + if i > 0 { + str += ", " + } + str += valueToString(v.Index(i)) + } + str += "}" + return str + case Map: + t := typ + str = t.String() + str += "{" + str += "" + str += "}" + return str + case Chan: + str = typ.String() + return str + case Struct: + t := typ + v := val + str += t.String() + str += "{" + for i, n := 0, v.NumField(); i < n; i++ { + if i > 0 { + str += ", " + } + str += valueToString(v.Field(i)) + } + str += "}" + return str + case Interface: + return typ.String() + "(" + valueToString(val.Elem()) + ")" + case Func: + v := val + return typ.String() + "(" + strconv.FormatUint(uint64(v.Pointer()), 10) + ")" + default: + panic("valueToString: can't print type " + typ.String()) + } +} diff --git a/platform/dbops/binaries/go/go/src/reflect/type.go b/platform/dbops/binaries/go/go/src/reflect/type.go new file mode 100644 index 0000000000000000000000000000000000000000..89c50155302346e42b29f2f50aa23dcd3821c960 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/type.go @@ -0,0 +1,2885 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflect implements run-time reflection, allowing a program to +// manipulate objects with arbitrary types. The typical use is to take a value +// with static type interface{} and extract its dynamic type information by +// calling TypeOf, which returns a Type. +// +// A call to ValueOf returns a Value representing the run-time data. +// Zero takes a Type and returns a Value representing a zero value +// for that type. +// +// See "The Laws of Reflection" for an introduction to reflection in Go: +// https://golang.org/doc/articles/laws_of_reflection.html +package reflect + +import ( + "internal/abi" + "internal/goarch" + "strconv" + "sync" + "unicode" + "unicode/utf8" + "unsafe" +) + +// Type is the representation of a Go type. +// +// Not all methods apply to all kinds of types. Restrictions, +// if any, are noted in the documentation for each method. +// Use the Kind method to find out the kind of type before +// calling kind-specific methods. Calling a method +// inappropriate to the kind of type causes a run-time panic. +// +// Type values are comparable, such as with the == operator, +// so they can be used as map keys. +// Two Type values are equal if they represent identical types. +type Type interface { + // Methods applicable to all types. + + // Align returns the alignment in bytes of a value of + // this type when allocated in memory. + Align() int + + // FieldAlign returns the alignment in bytes of a value of + // this type when used as a field in a struct. + FieldAlign() int + + // Method returns the i'th method in the type's method set. + // It panics if i is not in the range [0, NumMethod()). + // + // For a non-interface type T or *T, the returned Method's Type and Func + // fields describe a function whose first argument is the receiver, + // and only exported methods are accessible. + // + // For an interface type, the returned Method's Type field gives the + // method signature, without a receiver, and the Func field is nil. + // + // Methods are sorted in lexicographic order. + Method(int) Method + + // MethodByName returns the method with that name in the type's + // method set and a boolean indicating if the method was found. + // + // For a non-interface type T or *T, the returned Method's Type and Func + // fields describe a function whose first argument is the receiver. + // + // For an interface type, the returned Method's Type field gives the + // method signature, without a receiver, and the Func field is nil. + MethodByName(string) (Method, bool) + + // NumMethod returns the number of methods accessible using Method. + // + // For a non-interface type, it returns the number of exported methods. + // + // For an interface type, it returns the number of exported and unexported methods. + NumMethod() int + + // Name returns the type's name within its package for a defined type. + // For other (non-defined) types it returns the empty string. + Name() string + + // PkgPath returns a defined type's package path, that is, the import path + // that uniquely identifies the package, such as "encoding/base64". + // If the type was predeclared (string, error) or not defined (*T, struct{}, + // []int, or A where A is an alias for a non-defined type), the package path + // will be the empty string. + PkgPath() string + + // Size returns the number of bytes needed to store + // a value of the given type; it is analogous to unsafe.Sizeof. + Size() uintptr + + // String returns a string representation of the type. + // The string representation may use shortened package names + // (e.g., base64 instead of "encoding/base64") and is not + // guaranteed to be unique among types. To test for type identity, + // compare the Types directly. + String() string + + // Kind returns the specific kind of this type. + Kind() Kind + + // Implements reports whether the type implements the interface type u. + Implements(u Type) bool + + // AssignableTo reports whether a value of the type is assignable to type u. + AssignableTo(u Type) bool + + // ConvertibleTo reports whether a value of the type is convertible to type u. + // Even if ConvertibleTo returns true, the conversion may still panic. + // For example, a slice of type []T is convertible to *[N]T, + // but the conversion will panic if its length is less than N. + ConvertibleTo(u Type) bool + + // Comparable reports whether values of this type are comparable. + // Even if Comparable returns true, the comparison may still panic. + // For example, values of interface type are comparable, + // but the comparison will panic if their dynamic type is not comparable. + Comparable() bool + + // Methods applicable only to some types, depending on Kind. + // The methods allowed for each kind are: + // + // Int*, Uint*, Float*, Complex*: Bits + // Array: Elem, Len + // Chan: ChanDir, Elem + // Func: In, NumIn, Out, NumOut, IsVariadic. + // Map: Key, Elem + // Pointer: Elem + // Slice: Elem + // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField + + // Bits returns the size of the type in bits. + // It panics if the type's Kind is not one of the + // sized or unsized Int, Uint, Float, or Complex kinds. + Bits() int + + // ChanDir returns a channel type's direction. + // It panics if the type's Kind is not Chan. + ChanDir() ChanDir + + // IsVariadic reports whether a function type's final input parameter + // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's + // implicit actual type []T. + // + // For concreteness, if t represents func(x int, y ... float64), then + // + // t.NumIn() == 2 + // t.In(0) is the reflect.Type for "int" + // t.In(1) is the reflect.Type for "[]float64" + // t.IsVariadic() == true + // + // IsVariadic panics if the type's Kind is not Func. + IsVariadic() bool + + // Elem returns a type's element type. + // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. + Elem() Type + + // Field returns a struct type's i'th field. + // It panics if the type's Kind is not Struct. + // It panics if i is not in the range [0, NumField()). + Field(i int) StructField + + // FieldByIndex returns the nested field corresponding + // to the index sequence. It is equivalent to calling Field + // successively for each index i. + // It panics if the type's Kind is not Struct. + FieldByIndex(index []int) StructField + + // FieldByName returns the struct field with the given name + // and a boolean indicating if the field was found. + // If the returned field is promoted from an embedded struct, + // then Offset in the returned StructField is the offset in + // the embedded struct. + FieldByName(name string) (StructField, bool) + + // FieldByNameFunc returns the struct field with a name + // that satisfies the match function and a boolean indicating if + // the field was found. + // + // FieldByNameFunc considers the fields in the struct itself + // and then the fields in any embedded structs, in breadth first order, + // stopping at the shallowest nesting depth containing one or more + // fields satisfying the match function. If multiple fields at that depth + // satisfy the match function, they cancel each other + // and FieldByNameFunc returns no match. + // This behavior mirrors Go's handling of name lookup in + // structs containing embedded fields. + // + // If the returned field is promoted from an embedded struct, + // then Offset in the returned StructField is the offset in + // the embedded struct. + FieldByNameFunc(match func(string) bool) (StructField, bool) + + // In returns the type of a function type's i'th input parameter. + // It panics if the type's Kind is not Func. + // It panics if i is not in the range [0, NumIn()). + In(i int) Type + + // Key returns a map type's key type. + // It panics if the type's Kind is not Map. + Key() Type + + // Len returns an array type's length. + // It panics if the type's Kind is not Array. + Len() int + + // NumField returns a struct type's field count. + // It panics if the type's Kind is not Struct. + NumField() int + + // NumIn returns a function type's input parameter count. + // It panics if the type's Kind is not Func. + NumIn() int + + // NumOut returns a function type's output parameter count. + // It panics if the type's Kind is not Func. + NumOut() int + + // Out returns the type of a function type's i'th output parameter. + // It panics if the type's Kind is not Func. + // It panics if i is not in the range [0, NumOut()). + Out(i int) Type + + common() *abi.Type + uncommon() *uncommonType +} + +// BUG(rsc): FieldByName and related functions consider struct field names to be equal +// if the names are equal, even if they are unexported names originating +// in different packages. The practical effect of this is that the result of +// t.FieldByName("x") is not well defined if the struct type t contains +// multiple fields named x (embedded from different packages). +// FieldByName may return one of the fields named x or may report that there are none. +// See https://golang.org/issue/4876 for more details. + +/* + * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). + * A few are known to ../runtime/type.go to convey to debuggers. + * They are also known to ../runtime/type.go. + */ + +// A Kind represents the specific kind of type that a [Type] represents. +// The zero Kind is not a valid kind. +type Kind uint + +const ( + Invalid Kind = iota + Bool + Int + Int8 + Int16 + Int32 + Int64 + Uint + Uint8 + Uint16 + Uint32 + Uint64 + Uintptr + Float32 + Float64 + Complex64 + Complex128 + Array + Chan + Func + Interface + Map + Pointer + Slice + String + Struct + UnsafePointer +) + +// Ptr is the old name for the [Pointer] kind. +const Ptr = Pointer + +// uncommonType is present only for defined types or types with methods +// (if T is a defined type, the uncommonTypes for T and *T have methods). +// Using a pointer to this struct reduces the overall size required +// to describe a non-defined type with no methods. +type uncommonType = abi.UncommonType + +// Embed this type to get common/uncommon +type common struct { + abi.Type +} + +// rtype is the common implementation of most values. +// It is embedded in other struct types. +type rtype struct { + t abi.Type +} + +func (t *rtype) common() *abi.Type { + return &t.t +} + +func (t *rtype) uncommon() *abi.UncommonType { + return t.t.Uncommon() +} + +type aNameOff = abi.NameOff +type aTypeOff = abi.TypeOff +type aTextOff = abi.TextOff + +// ChanDir represents a channel type's direction. +type ChanDir int + +const ( + RecvDir ChanDir = 1 << iota // <-chan + SendDir // chan<- + BothDir = RecvDir | SendDir // chan +) + +// arrayType represents a fixed array type. +type arrayType = abi.ArrayType + +// chanType represents a channel type. +type chanType = abi.ChanType + +// funcType represents a function type. +// +// A *rtype for each in and out parameter is stored in an array that +// directly follows the funcType (and possibly its uncommonType). So +// a function type with one method, one input, and one output is: +// +// struct { +// funcType +// uncommonType +// [2]*rtype // [0] is in, [1] is out +// } +type funcType = abi.FuncType + +// interfaceType represents an interface type. +type interfaceType struct { + abi.InterfaceType // can embed directly because not a public type. +} + +func (t *interfaceType) nameOff(off aNameOff) abi.Name { + return toRType(&t.Type).nameOff(off) +} + +func nameOffFor(t *abi.Type, off aNameOff) abi.Name { + return toRType(t).nameOff(off) +} + +func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type { + return toRType(t).typeOff(off) +} + +func (t *interfaceType) typeOff(off aTypeOff) *abi.Type { + return toRType(&t.Type).typeOff(off) +} + +func (t *interfaceType) common() *abi.Type { + return &t.Type +} + +func (t *interfaceType) uncommon() *abi.UncommonType { + return t.Uncommon() +} + +// mapType represents a map type. +type mapType struct { + abi.MapType +} + +// ptrType represents a pointer type. +type ptrType struct { + abi.PtrType +} + +// sliceType represents a slice type. +type sliceType struct { + abi.SliceType +} + +// Struct field +type structField = abi.StructField + +// structType represents a struct type. +type structType struct { + abi.StructType +} + +func pkgPath(n abi.Name) string { + if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 { + return "" + } + i, l := n.ReadVarint(1) + off := 1 + i + l + if n.HasTag() { + i2, l2 := n.ReadVarint(off) + off += i2 + l2 + } + var nameOff int32 + // Note that this field may not be aligned in memory, + // so we cannot use a direct int32 assignment here. + copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:]) + pkgPathName := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))} + return pkgPathName.Name() +} + +func newName(n, tag string, exported, embedded bool) abi.Name { + return abi.NewName(n, tag, exported, embedded) +} + +/* + * The compiler knows the exact layout of all the data structures above. + * The compiler does not know about the data structures and methods below. + */ + +// Method represents a single method. +type Method struct { + // Name is the method name. + Name string + + // PkgPath is the package path that qualifies a lower case (unexported) + // method name. It is empty for upper case (exported) method names. + // The combination of PkgPath and Name uniquely identifies a method + // in a method set. + // See https://golang.org/ref/spec#Uniqueness_of_identifiers + PkgPath string + + Type Type // method type + Func Value // func with receiver as first argument + Index int // index for Type.Method +} + +// IsExported reports whether the method is exported. +func (m Method) IsExported() bool { + return m.PkgPath == "" +} + +const ( + kindDirectIface = 1 << 5 + kindGCProg = 1 << 6 // Type.gc points to GC program + kindMask = (1 << 5) - 1 +) + +// String returns the name of k. +func (k Kind) String() string { + if uint(k) < uint(len(kindNames)) { + return kindNames[uint(k)] + } + return "kind" + strconv.Itoa(int(k)) +} + +var kindNames = []string{ + Invalid: "invalid", + Bool: "bool", + Int: "int", + Int8: "int8", + Int16: "int16", + Int32: "int32", + Int64: "int64", + Uint: "uint", + Uint8: "uint8", + Uint16: "uint16", + Uint32: "uint32", + Uint64: "uint64", + Uintptr: "uintptr", + Float32: "float32", + Float64: "float64", + Complex64: "complex64", + Complex128: "complex128", + Array: "array", + Chan: "chan", + Func: "func", + Interface: "interface", + Map: "map", + Pointer: "ptr", + Slice: "slice", + String: "string", + Struct: "struct", + UnsafePointer: "unsafe.Pointer", +} + +// resolveNameOff resolves a name offset from a base pointer. +// The (*rtype).nameOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +// +//go:noescape +func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer + +// resolveTypeOff resolves an *rtype offset from a base type. +// The (*rtype).typeOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +// +//go:noescape +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + +// resolveTextOff resolves a function pointer offset from a base type. +// The (*rtype).textOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +// +//go:noescape +func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + +// addReflectOff adds a pointer to the reflection lookup map in the runtime. +// It returns a new ID that can be used as a typeOff or textOff, and will +// be resolved correctly. Implemented in the runtime package. +// +//go:noescape +func addReflectOff(ptr unsafe.Pointer) int32 + +// resolveReflectName adds a name to the reflection lookup map in the runtime. +// It returns a new nameOff that can be used to refer to the pointer. +func resolveReflectName(n abi.Name) aNameOff { + return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes))) +} + +// resolveReflectType adds a *rtype to the reflection lookup map in the runtime. +// It returns a new typeOff that can be used to refer to the pointer. +func resolveReflectType(t *abi.Type) aTypeOff { + return aTypeOff(addReflectOff(unsafe.Pointer(t))) +} + +// resolveReflectText adds a function pointer to the reflection lookup map in +// the runtime. It returns a new textOff that can be used to refer to the +// pointer. +func resolveReflectText(ptr unsafe.Pointer) aTextOff { + return aTextOff(addReflectOff(ptr)) +} + +func (t *rtype) nameOff(off aNameOff) abi.Name { + return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} +} + +func (t *rtype) typeOff(off aTypeOff) *abi.Type { + return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off))) +} + +func (t *rtype) textOff(off aTextOff) unsafe.Pointer { + return resolveTextOff(unsafe.Pointer(t), int32(off)) +} + +func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer { + return toRType(t).textOff(off) +} + +func (t *rtype) String() string { + s := t.nameOff(t.t.Str).Name() + if t.t.TFlag&abi.TFlagExtraStar != 0 { + return s[1:] + } + return s +} + +func (t *rtype) Size() uintptr { return t.t.Size() } + +func (t *rtype) Bits() int { + if t == nil { + panic("reflect: Bits of nil Type") + } + k := t.Kind() + if k < Int || k > Complex128 { + panic("reflect: Bits of non-arithmetic Type " + t.String()) + } + return int(t.t.Size_) * 8 +} + +func (t *rtype) Align() int { return t.t.Align() } + +func (t *rtype) FieldAlign() int { return t.t.FieldAlign() } + +func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) } + +func (t *rtype) exportedMethods() []abi.Method { + ut := t.uncommon() + if ut == nil { + return nil + } + return ut.ExportedMethods() +} + +func (t *rtype) NumMethod() int { + if t.Kind() == Interface { + tt := (*interfaceType)(unsafe.Pointer(t)) + return tt.NumMethod() + } + return len(t.exportedMethods()) +} + +func (t *rtype) Method(i int) (m Method) { + if t.Kind() == Interface { + tt := (*interfaceType)(unsafe.Pointer(t)) + return tt.Method(i) + } + methods := t.exportedMethods() + if i < 0 || i >= len(methods) { + panic("reflect: Method index out of range") + } + p := methods[i] + pname := t.nameOff(p.Name) + m.Name = pname.Name() + fl := flag(Func) + mtyp := t.typeOff(p.Mtyp) + ft := (*funcType)(unsafe.Pointer(mtyp)) + in := make([]Type, 0, 1+ft.NumIn()) + in = append(in, t) + for _, arg := range ft.InSlice() { + in = append(in, toRType(arg)) + } + out := make([]Type, 0, ft.NumOut()) + for _, ret := range ft.OutSlice() { + out = append(out, toRType(ret)) + } + mt := FuncOf(in, out, ft.IsVariadic()) + m.Type = mt + tfn := t.textOff(p.Tfn) + fn := unsafe.Pointer(&tfn) + m.Func = Value{&mt.(*rtype).t, fn, fl} + + m.Index = i + return m +} + +func (t *rtype) MethodByName(name string) (m Method, ok bool) { + if t.Kind() == Interface { + tt := (*interfaceType)(unsafe.Pointer(t)) + return tt.MethodByName(name) + } + ut := t.uncommon() + if ut == nil { + return Method{}, false + } + + methods := ut.ExportedMethods() + + // We are looking for the first index i where the string becomes >= s. + // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name). + i, j := 0, len(methods) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !(t.nameOff(methods[h].Name).Name() >= name) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + if i < len(methods) && name == t.nameOff(methods[i].Name).Name() { + return t.Method(i), true + } + + return Method{}, false +} + +func (t *rtype) PkgPath() string { + if t.t.TFlag&abi.TFlagNamed == 0 { + return "" + } + ut := t.uncommon() + if ut == nil { + return "" + } + return t.nameOff(ut.PkgPath).Name() +} + +func pkgPathFor(t *abi.Type) string { + return toRType(t).PkgPath() +} + +func (t *rtype) Name() string { + if !t.t.HasName() { + return "" + } + s := t.String() + i := len(s) - 1 + sqBrackets := 0 + for i >= 0 && (s[i] != '.' || sqBrackets != 0) { + switch s[i] { + case ']': + sqBrackets++ + case '[': + sqBrackets-- + } + i-- + } + return s[i+1:] +} + +func nameFor(t *abi.Type) string { + return toRType(t).Name() +} + +func (t *rtype) ChanDir() ChanDir { + if t.Kind() != Chan { + panic("reflect: ChanDir of non-chan type " + t.String()) + } + tt := (*abi.ChanType)(unsafe.Pointer(t)) + return ChanDir(tt.Dir) +} + +func toRType(t *abi.Type) *rtype { + return (*rtype)(unsafe.Pointer(t)) +} + +func elem(t *abi.Type) *abi.Type { + et := t.Elem() + if et != nil { + return et + } + panic("reflect: Elem of invalid type " + stringFor(t)) +} + +func (t *rtype) Elem() Type { + return toType(elem(t.common())) +} + +func (t *rtype) Field(i int) StructField { + if t.Kind() != Struct { + panic("reflect: Field of non-struct type " + t.String()) + } + tt := (*structType)(unsafe.Pointer(t)) + return tt.Field(i) +} + +func (t *rtype) FieldByIndex(index []int) StructField { + if t.Kind() != Struct { + panic("reflect: FieldByIndex of non-struct type " + t.String()) + } + tt := (*structType)(unsafe.Pointer(t)) + return tt.FieldByIndex(index) +} + +func (t *rtype) FieldByName(name string) (StructField, bool) { + if t.Kind() != Struct { + panic("reflect: FieldByName of non-struct type " + t.String()) + } + tt := (*structType)(unsafe.Pointer(t)) + return tt.FieldByName(name) +} + +func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { + if t.Kind() != Struct { + panic("reflect: FieldByNameFunc of non-struct type " + t.String()) + } + tt := (*structType)(unsafe.Pointer(t)) + return tt.FieldByNameFunc(match) +} + +func (t *rtype) Key() Type { + if t.Kind() != Map { + panic("reflect: Key of non-map type " + t.String()) + } + tt := (*mapType)(unsafe.Pointer(t)) + return toType(tt.Key) +} + +func (t *rtype) Len() int { + if t.Kind() != Array { + panic("reflect: Len of non-array type " + t.String()) + } + tt := (*arrayType)(unsafe.Pointer(t)) + return int(tt.Len) +} + +func (t *rtype) NumField() int { + if t.Kind() != Struct { + panic("reflect: NumField of non-struct type " + t.String()) + } + tt := (*structType)(unsafe.Pointer(t)) + return len(tt.Fields) +} + +func (t *rtype) In(i int) Type { + if t.Kind() != Func { + panic("reflect: In of non-func type " + t.String()) + } + tt := (*abi.FuncType)(unsafe.Pointer(t)) + return toType(tt.InSlice()[i]) +} + +func (t *rtype) NumIn() int { + if t.Kind() != Func { + panic("reflect: NumIn of non-func type " + t.String()) + } + tt := (*abi.FuncType)(unsafe.Pointer(t)) + return tt.NumIn() +} + +func (t *rtype) NumOut() int { + if t.Kind() != Func { + panic("reflect: NumOut of non-func type " + t.String()) + } + tt := (*abi.FuncType)(unsafe.Pointer(t)) + return tt.NumOut() +} + +func (t *rtype) Out(i int) Type { + if t.Kind() != Func { + panic("reflect: Out of non-func type " + t.String()) + } + tt := (*abi.FuncType)(unsafe.Pointer(t)) + return toType(tt.OutSlice()[i]) +} + +func (t *rtype) IsVariadic() bool { + if t.Kind() != Func { + panic("reflect: IsVariadic of non-func type " + t.String()) + } + tt := (*abi.FuncType)(unsafe.Pointer(t)) + return tt.IsVariadic() +} + +// add returns p+x. +// +// The whySafe string is ignored, so that the function still inlines +// as efficiently as p+x, but all call sites should use the string to +// record why the addition is safe, which is to say why the addition +// does not cause x to advance to the very end of p's allocation +// and therefore point incorrectly at the next block in memory. +func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +func (d ChanDir) String() string { + switch d { + case SendDir: + return "chan<-" + case RecvDir: + return "<-chan" + case BothDir: + return "chan" + } + return "ChanDir" + strconv.Itoa(int(d)) +} + +// Method returns the i'th method in the type's method set. +func (t *interfaceType) Method(i int) (m Method) { + if i < 0 || i >= len(t.Methods) { + return + } + p := &t.Methods[i] + pname := t.nameOff(p.Name) + m.Name = pname.Name() + if !pname.IsExported() { + m.PkgPath = pkgPath(pname) + if m.PkgPath == "" { + m.PkgPath = t.PkgPath.Name() + } + } + m.Type = toType(t.typeOff(p.Typ)) + m.Index = i + return +} + +// NumMethod returns the number of interface methods in the type's method set. +func (t *interfaceType) NumMethod() int { return len(t.Methods) } + +// MethodByName method with the given name in the type's method set. +func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { + if t == nil { + return + } + var p *abi.Imethod + for i := range t.Methods { + p = &t.Methods[i] + if t.nameOff(p.Name).Name() == name { + return t.Method(i), true + } + } + return +} + +// A StructField describes a single field in a struct. +type StructField struct { + // Name is the field name. + Name string + + // PkgPath is the package path that qualifies a lower case (unexported) + // field name. It is empty for upper case (exported) field names. + // See https://golang.org/ref/spec#Uniqueness_of_identifiers + PkgPath string + + Type Type // field type + Tag StructTag // field tag string + Offset uintptr // offset within struct, in bytes + Index []int // index sequence for Type.FieldByIndex + Anonymous bool // is an embedded field +} + +// IsExported reports whether the field is exported. +func (f StructField) IsExported() bool { + return f.PkgPath == "" +} + +// A StructTag is the tag string in a struct field. +// +// By convention, tag strings are a concatenation of +// optionally space-separated key:"value" pairs. +// Each key is a non-empty string consisting of non-control +// characters other than space (U+0020 ' '), quote (U+0022 '"'), +// and colon (U+003A ':'). Each value is quoted using U+0022 '"' +// characters and Go string literal syntax. +type StructTag string + +// Get returns the value associated with key in the tag string. +// If there is no such key in the tag, Get returns the empty string. +// If the tag does not have the conventional format, the value +// returned by Get is unspecified. To determine whether a tag is +// explicitly set to the empty string, use Lookup. +func (tag StructTag) Get(key string) string { + v, _ := tag.Lookup(key) + return v +} + +// Lookup returns the value associated with key in the tag string. +// If the key is present in the tag the value (which may be empty) +// is returned. Otherwise the returned value will be the empty string. +// The ok return value reports whether the value was explicitly set in +// the tag string. If the tag does not have the conventional format, +// the value returned by Lookup is unspecified. +func (tag StructTag) Lookup(key string) (value string, ok bool) { + // When modifying this code, also update the validateStructTag code + // in cmd/vet/structtag.go. + + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + if key == name { + value, err := strconv.Unquote(qvalue) + if err != nil { + break + } + return value, true + } + } + return "", false +} + +// Field returns the i'th struct field. +func (t *structType) Field(i int) (f StructField) { + if i < 0 || i >= len(t.Fields) { + panic("reflect: Field index out of bounds") + } + p := &t.Fields[i] + f.Type = toType(p.Typ) + f.Name = p.Name.Name() + f.Anonymous = p.Embedded() + if !p.Name.IsExported() { + f.PkgPath = t.PkgPath.Name() + } + if tag := p.Name.Tag(); tag != "" { + f.Tag = StructTag(tag) + } + f.Offset = p.Offset + + // NOTE(rsc): This is the only allocation in the interface + // presented by a reflect.Type. It would be nice to avoid, + // at least in the common cases, but we need to make sure + // that misbehaving clients of reflect cannot affect other + // uses of reflect. One possibility is CL 5371098, but we + // postponed that ugliness until there is a demonstrated + // need for the performance. This is issue 2320. + f.Index = []int{i} + return +} + +// TODO(gri): Should there be an error/bool indicator if the index +// is wrong for FieldByIndex? + +// FieldByIndex returns the nested field corresponding to index. +func (t *structType) FieldByIndex(index []int) (f StructField) { + f.Type = toType(&t.Type) + for i, x := range index { + if i > 0 { + ft := f.Type + if ft.Kind() == Pointer && ft.Elem().Kind() == Struct { + ft = ft.Elem() + } + f.Type = ft + } + f = f.Type.Field(x) + } + return +} + +// A fieldScan represents an item on the fieldByNameFunc scan work list. +type fieldScan struct { + typ *structType + index []int +} + +// FieldByNameFunc returns the struct field with a name that satisfies the +// match function and a boolean to indicate if the field was found. +func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { + // This uses the same condition that the Go language does: there must be a unique instance + // of the match at a given depth level. If there are multiple instances of a match at the + // same depth, they annihilate each other and inhibit any possible match at a lower level. + // The algorithm is breadth first search, one depth level at a time. + + // The current and next slices are work queues: + // current lists the fields to visit on this depth level, + // and next lists the fields on the next lower level. + current := []fieldScan{} + next := []fieldScan{{typ: t}} + + // nextCount records the number of times an embedded type has been + // encountered and considered for queueing in the 'next' slice. + // We only queue the first one, but we increment the count on each. + // If a struct type T can be reached more than once at a given depth level, + // then it annihilates itself and need not be considered at all when we + // process that next depth level. + var nextCount map[*structType]int + + // visited records the structs that have been considered already. + // Embedded pointer fields can create cycles in the graph of + // reachable embedded types; visited avoids following those cycles. + // It also avoids duplicated effort: if we didn't find the field in an + // embedded type T at level 2, we won't find it in one at level 4 either. + visited := map[*structType]bool{} + + for len(next) > 0 { + current, next = next, current[:0] + count := nextCount + nextCount = nil + + // Process all the fields at this depth, now listed in 'current'. + // The loop queues embedded fields found in 'next', for processing during the next + // iteration. The multiplicity of the 'current' field counts is recorded + // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. + for _, scan := range current { + t := scan.typ + if visited[t] { + // We've looked through this type before, at a higher level. + // That higher level would shadow the lower level we're now at, + // so this one can't be useful to us. Ignore it. + continue + } + visited[t] = true + for i := range t.Fields { + f := &t.Fields[i] + // Find name and (for embedded field) type for field f. + fname := f.Name.Name() + var ntyp *abi.Type + if f.Embedded() { + // Embedded field of type T or *T. + ntyp = f.Typ + if ntyp.Kind() == abi.Pointer { + ntyp = ntyp.Elem() + } + } + + // Does it match? + if match(fname) { + // Potential match + if count[t] > 1 || ok { + // Name appeared multiple times at this level: annihilate. + return StructField{}, false + } + result = t.Field(i) + result.Index = nil + result.Index = append(result.Index, scan.index...) + result.Index = append(result.Index, i) + ok = true + continue + } + + // Queue embedded struct fields for processing with next level, + // but only if we haven't seen a match yet at this level and only + // if the embedded types haven't already been queued. + if ok || ntyp == nil || ntyp.Kind() != abi.Struct { + continue + } + styp := (*structType)(unsafe.Pointer(ntyp)) + if nextCount[styp] > 0 { + nextCount[styp] = 2 // exact multiple doesn't matter + continue + } + if nextCount == nil { + nextCount = map[*structType]int{} + } + nextCount[styp] = 1 + if count[t] > 1 { + nextCount[styp] = 2 // exact multiple doesn't matter + } + var index []int + index = append(index, scan.index...) + index = append(index, i) + next = append(next, fieldScan{styp, index}) + } + } + if ok { + break + } + } + return +} + +// FieldByName returns the struct field with the given name +// and a boolean to indicate if the field was found. +func (t *structType) FieldByName(name string) (f StructField, present bool) { + // Quick check for top-level name, or struct without embedded fields. + hasEmbeds := false + if name != "" { + for i := range t.Fields { + tf := &t.Fields[i] + if tf.Name.Name() == name { + return t.Field(i), true + } + if tf.Embedded() { + hasEmbeds = true + } + } + } + if !hasEmbeds { + return + } + return t.FieldByNameFunc(func(s string) bool { return s == name }) +} + +// TypeOf returns the reflection [Type] that represents the dynamic type of i. +// If i is a nil interface value, TypeOf returns nil. +func TypeOf(i any) Type { + eface := *(*emptyInterface)(unsafe.Pointer(&i)) + // Noescape so this doesn't make i to escape. See the comment + // at Value.typ for why this is safe. + return toType((*abi.Type)(noescape(unsafe.Pointer(eface.typ)))) +} + +// rtypeOf directly extracts the *rtype of the provided value. +func rtypeOf(i any) *abi.Type { + eface := *(*emptyInterface)(unsafe.Pointer(&i)) + return eface.typ +} + +// ptrMap is the cache for PointerTo. +var ptrMap sync.Map // map[*rtype]*ptrType + +// PtrTo returns the pointer type with element t. +// For example, if t represents type Foo, PtrTo(t) represents *Foo. +// +// PtrTo is the old spelling of [PointerTo]. +// The two functions behave identically. +// +// Deprecated: Superseded by [PointerTo]. +func PtrTo(t Type) Type { return PointerTo(t) } + +// PointerTo returns the pointer type with element t. +// For example, if t represents type Foo, PointerTo(t) represents *Foo. +func PointerTo(t Type) Type { + return toRType(t.(*rtype).ptrTo()) +} + +func (t *rtype) ptrTo() *abi.Type { + at := &t.t + if at.PtrToThis != 0 { + return t.typeOff(at.PtrToThis) + } + + // Check the cache. + if pi, ok := ptrMap.Load(t); ok { + return &pi.(*ptrType).Type + } + + // Look in known types. + s := "*" + t.String() + for _, tt := range typesByString(s) { + p := (*ptrType)(unsafe.Pointer(tt)) + if p.Elem != &t.t { + continue + } + pi, _ := ptrMap.LoadOrStore(t, p) + return &pi.(*ptrType).Type + } + + // Create a new ptrType starting with the description + // of an *unsafe.Pointer. + var iptr any = (*unsafe.Pointer)(nil) + prototype := *(**ptrType)(unsafe.Pointer(&iptr)) + pp := *prototype + + pp.Str = resolveReflectName(newName(s, "", false, false)) + pp.PtrToThis = 0 + + // For the type structures linked into the binary, the + // compiler provides a good hash of the string. + // Create a good hash for the new string by using + // the FNV-1 hash's mixing function to combine the + // old hash and the new "*". + pp.Hash = fnv1(t.t.Hash, '*') + + pp.Elem = at + + pi, _ := ptrMap.LoadOrStore(t, &pp) + return &pi.(*ptrType).Type +} + +func ptrTo(t *abi.Type) *abi.Type { + return toRType(t).ptrTo() +} + +// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. +func fnv1(x uint32, list ...byte) uint32 { + for _, b := range list { + x = x*16777619 ^ uint32(b) + } + return x +} + +func (t *rtype) Implements(u Type) bool { + if u == nil { + panic("reflect: nil type passed to Type.Implements") + } + if u.Kind() != Interface { + panic("reflect: non-interface type passed to Type.Implements") + } + return implements(u.common(), t.common()) +} + +func (t *rtype) AssignableTo(u Type) bool { + if u == nil { + panic("reflect: nil type passed to Type.AssignableTo") + } + uu := u.common() + return directlyAssignable(uu, t.common()) || implements(uu, t.common()) +} + +func (t *rtype) ConvertibleTo(u Type) bool { + if u == nil { + panic("reflect: nil type passed to Type.ConvertibleTo") + } + return convertOp(u.common(), t.common()) != nil +} + +func (t *rtype) Comparable() bool { + return t.t.Equal != nil +} + +// implements reports whether the type V implements the interface type T. +func implements(T, V *abi.Type) bool { + if T.Kind() != abi.Interface { + return false + } + t := (*interfaceType)(unsafe.Pointer(T)) + if len(t.Methods) == 0 { + return true + } + + // The same algorithm applies in both cases, but the + // method tables for an interface type and a concrete type + // are different, so the code is duplicated. + // In both cases the algorithm is a linear scan over the two + // lists - T's methods and V's methods - simultaneously. + // Since method tables are stored in a unique sorted order + // (alphabetical, with no duplicate method names), the scan + // through V's methods must hit a match for each of T's + // methods along the way, or else V does not implement T. + // This lets us run the scan in overall linear time instead of + // the quadratic time a naive search would require. + // See also ../runtime/iface.go. + if V.Kind() == abi.Interface { + v := (*interfaceType)(unsafe.Pointer(V)) + i := 0 + for j := 0; j < len(v.Methods); j++ { + tm := &t.Methods[i] + tmName := t.nameOff(tm.Name) + vm := &v.Methods[j] + vmName := nameOffFor(V, vm.Name) + if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) { + if !tmName.IsExported() { + tmPkgPath := pkgPath(tmName) + if tmPkgPath == "" { + tmPkgPath = t.PkgPath.Name() + } + vmPkgPath := pkgPath(vmName) + if vmPkgPath == "" { + vmPkgPath = v.PkgPath.Name() + } + if tmPkgPath != vmPkgPath { + continue + } + } + if i++; i >= len(t.Methods) { + return true + } + } + } + return false + } + + v := V.Uncommon() + if v == nil { + return false + } + i := 0 + vmethods := v.Methods() + for j := 0; j < int(v.Mcount); j++ { + tm := &t.Methods[i] + tmName := t.nameOff(tm.Name) + vm := vmethods[j] + vmName := nameOffFor(V, vm.Name) + if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) { + if !tmName.IsExported() { + tmPkgPath := pkgPath(tmName) + if tmPkgPath == "" { + tmPkgPath = t.PkgPath.Name() + } + vmPkgPath := pkgPath(vmName) + if vmPkgPath == "" { + vmPkgPath = nameOffFor(V, v.PkgPath).Name() + } + if tmPkgPath != vmPkgPath { + continue + } + } + if i++; i >= len(t.Methods) { + return true + } + } + } + return false +} + +// specialChannelAssignability reports whether a value x of channel type V +// can be directly assigned (using memmove) to another channel type T. +// https://golang.org/doc/go_spec.html#Assignability +// T and V must be both of Chan kind. +func specialChannelAssignability(T, V *abi.Type) bool { + // Special case: + // x is a bidirectional channel value, T is a channel type, + // x's type V and T have identical element types, + // and at least one of V or T is not a defined type. + return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true) +} + +// directlyAssignable reports whether a value x of type V can be directly +// assigned (using memmove) to a value of type T. +// https://golang.org/doc/go_spec.html#Assignability +// Ignoring the interface rules (implemented elsewhere) +// and the ideal constant rules (no ideal constants at run time). +func directlyAssignable(T, V *abi.Type) bool { + // x's type V is identical to T? + if T == V { + return true + } + + // Otherwise at least one of T and V must not be defined + // and they must have the same kind. + if T.HasName() && V.HasName() || T.Kind() != V.Kind() { + return false + } + + if T.Kind() == abi.Chan && specialChannelAssignability(T, V) { + return true + } + + // x's type T and V must have identical underlying types. + return haveIdenticalUnderlyingType(T, V, true) +} + +func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool { + if cmpTags { + return T == V + } + + if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) { + return false + } + + return haveIdenticalUnderlyingType(T, V, false) +} + +func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool { + if T == V { + return true + } + + kind := Kind(T.Kind()) + if kind != Kind(V.Kind()) { + return false + } + + // Non-composite types of equal kind have same underlying type + // (the predefined instance of the type). + if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { + return true + } + + // Composite types. + switch kind { + case Array: + return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) + + case Chan: + return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) + + case Func: + t := (*funcType)(unsafe.Pointer(T)) + v := (*funcType)(unsafe.Pointer(V)) + if t.OutCount != v.OutCount || t.InCount != v.InCount { + return false + } + for i := 0; i < t.NumIn(); i++ { + if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { + return false + } + } + for i := 0; i < t.NumOut(); i++ { + if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { + return false + } + } + return true + + case Interface: + t := (*interfaceType)(unsafe.Pointer(T)) + v := (*interfaceType)(unsafe.Pointer(V)) + if len(t.Methods) == 0 && len(v.Methods) == 0 { + return true + } + // Might have the same methods but still + // need a run time conversion. + return false + + case Map: + return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) + + case Pointer, Slice: + return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) + + case Struct: + t := (*structType)(unsafe.Pointer(T)) + v := (*structType)(unsafe.Pointer(V)) + if len(t.Fields) != len(v.Fields) { + return false + } + if t.PkgPath.Name() != v.PkgPath.Name() { + return false + } + for i := range t.Fields { + tf := &t.Fields[i] + vf := &v.Fields[i] + if tf.Name.Name() != vf.Name.Name() { + return false + } + if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) { + return false + } + if cmpTags && tf.Name.Tag() != vf.Name.Tag() { + return false + } + if tf.Offset != vf.Offset { + return false + } + if tf.Embedded() != vf.Embedded() { + return false + } + } + return true + } + + return false +} + +// typelinks is implemented in package runtime. +// It returns a slice of the sections in each module, +// and a slice of *rtype offsets in each module. +// +// The types in each module are sorted by string. That is, the first +// two linked types of the first module are: +// +// d0 := sections[0] +// t1 := (*rtype)(add(d0, offset[0][0])) +// t2 := (*rtype)(add(d0, offset[0][1])) +// +// and +// +// t1.String() < t2.String() +// +// Note that strings are not unique identifiers for types: +// there can be more than one with a given string. +// Only types we might want to look up are included: +// pointers, channels, maps, slices, and arrays. +func typelinks() (sections []unsafe.Pointer, offset [][]int32) + +func rtypeOff(section unsafe.Pointer, off int32) *abi.Type { + return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0")) +} + +// typesByString returns the subslice of typelinks() whose elements have +// the given string representation. +// It may be empty (no known types with that string) or may have +// multiple elements (multiple types with that string). +func typesByString(s string) []*abi.Type { + sections, offset := typelinks() + var ret []*abi.Type + + for offsI, offs := range offset { + section := sections[offsI] + + // We are looking for the first index i where the string becomes >= s. + // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). + i, j := 0, len(offs) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !(stringFor(rtypeOff(section, offs[h])) >= s) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + + // Having found the first, linear scan forward to find the last. + // We could do a second binary search, but the caller is going + // to do a linear scan anyway. + for j := i; j < len(offs); j++ { + typ := rtypeOff(section, offs[j]) + if stringFor(typ) != s { + break + } + ret = append(ret, typ) + } + } + return ret +} + +// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. +var lookupCache sync.Map // map[cacheKey]*rtype + +// A cacheKey is the key for use in the lookupCache. +// Four values describe any of the types we are looking for: +// type kind, one or two subtypes, and an extra integer. +type cacheKey struct { + kind Kind + t1 *abi.Type + t2 *abi.Type + extra uintptr +} + +// The funcLookupCache caches FuncOf lookups. +// FuncOf does not share the common lookupCache since cacheKey is not +// sufficient to represent functions unambiguously. +var funcLookupCache struct { + sync.Mutex // Guards stores (but not loads) on m. + + // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. + // Elements of m are append-only and thus safe for concurrent reading. + m sync.Map +} + +// ChanOf returns the channel type with the given direction and element type. +// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. +// +// The gc runtime imposes a limit of 64 kB on channel element types. +// If t's size is equal to or exceeds this limit, ChanOf panics. +func ChanOf(dir ChanDir, t Type) Type { + typ := t.common() + + // Look in cache. + ckey := cacheKey{Chan, typ, nil, uintptr(dir)} + if ch, ok := lookupCache.Load(ckey); ok { + return ch.(*rtype) + } + + // This restriction is imposed by the gc compiler and the runtime. + if typ.Size_ >= 1<<16 { + panic("reflect.ChanOf: element size too large") + } + + // Look in known types. + var s string + switch dir { + default: + panic("reflect.ChanOf: invalid dir") + case SendDir: + s = "chan<- " + stringFor(typ) + case RecvDir: + s = "<-chan " + stringFor(typ) + case BothDir: + typeStr := stringFor(typ) + if typeStr[0] == '<' { + // typ is recv chan, need parentheses as "<-" associates with leftmost + // chan possible, see: + // * https://golang.org/ref/spec#Channel_types + // * https://github.com/golang/go/issues/39897 + s = "chan (" + typeStr + ")" + } else { + s = "chan " + typeStr + } + } + for _, tt := range typesByString(s) { + ch := (*chanType)(unsafe.Pointer(tt)) + if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) + return ti.(Type) + } + } + + // Make a channel type. + var ichan any = (chan unsafe.Pointer)(nil) + prototype := *(**chanType)(unsafe.Pointer(&ichan)) + ch := *prototype + ch.TFlag = abi.TFlagRegularMemory + ch.Dir = abi.ChanDir(dir) + ch.Str = resolveReflectName(newName(s, "", false, false)) + ch.Hash = fnv1(typ.Hash, 'c', byte(dir)) + ch.Elem = typ + + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type)) + return ti.(Type) +} + +// MapOf returns the map type with the given key and element types. +// For example, if k represents int and e represents string, +// MapOf(k, e) represents map[int]string. +// +// If the key type is not a valid map key type (that is, if it does +// not implement Go's == operator), MapOf panics. +func MapOf(key, elem Type) Type { + ktyp := key.common() + etyp := elem.common() + + if ktyp.Equal == nil { + panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) + } + + // Look in cache. + ckey := cacheKey{Map, ktyp, etyp, 0} + if mt, ok := lookupCache.Load(ckey); ok { + return mt.(Type) + } + + // Look in known types. + s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) + for _, tt := range typesByString(s) { + mt := (*mapType)(unsafe.Pointer(tt)) + if mt.Key == ktyp && mt.Elem == etyp { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) + return ti.(Type) + } + } + + // Make a map type. + // Note: flag values must match those used in the TMAP case + // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. + var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) + mt := **(**mapType)(unsafe.Pointer(&imap)) + mt.Str = resolveReflectName(newName(s, "", false, false)) + mt.TFlag = 0 + mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) + mt.Key = ktyp + mt.Elem = etyp + mt.Bucket = bucketOf(ktyp, etyp) + mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { + return typehash(ktyp, p, seed) + } + mt.Flags = 0 + if ktyp.Size_ > maxKeySize { + mt.KeySize = uint8(goarch.PtrSize) + mt.Flags |= 1 // indirect key + } else { + mt.KeySize = uint8(ktyp.Size_) + } + if etyp.Size_ > maxValSize { + mt.ValueSize = uint8(goarch.PtrSize) + mt.Flags |= 2 // indirect value + } else { + mt.MapType.ValueSize = uint8(etyp.Size_) + } + mt.MapType.BucketSize = uint16(mt.Bucket.Size_) + if isReflexive(ktyp) { + mt.Flags |= 4 + } + if needKeyUpdate(ktyp) { + mt.Flags |= 8 + } + if hashMightPanic(ktyp) { + mt.Flags |= 16 + } + mt.PtrToThis = 0 + + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) + return ti.(Type) +} + +var funcTypes []Type +var funcTypesMutex sync.Mutex + +func initFuncTypes(n int) Type { + funcTypesMutex.Lock() + defer funcTypesMutex.Unlock() + if n >= len(funcTypes) { + newFuncTypes := make([]Type, n+1) + copy(newFuncTypes, funcTypes) + funcTypes = newFuncTypes + } + if funcTypes[n] != nil { + return funcTypes[n] + } + + funcTypes[n] = StructOf([]StructField{ + { + Name: "FuncType", + Type: TypeOf(funcType{}), + }, + { + Name: "Args", + Type: ArrayOf(n, TypeOf(&rtype{})), + }, + }) + return funcTypes[n] +} + +// FuncOf returns the function type with the given argument and result types. +// For example if k represents int and e represents string, +// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. +// +// The variadic argument controls whether the function is variadic. FuncOf +// panics if the in[len(in)-1] does not represent a slice and variadic is +// true. +func FuncOf(in, out []Type, variadic bool) Type { + if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { + panic("reflect.FuncOf: last arg of variadic func must be slice") + } + + // Make a func type. + var ifunc any = (func())(nil) + prototype := *(**funcType)(unsafe.Pointer(&ifunc)) + n := len(in) + len(out) + + if n > 128 { + panic("reflect.FuncOf: too many arguments") + } + + o := New(initFuncTypes(n)).Elem() + ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer())) + args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n] + *ft = *prototype + + // Build a hash and minimally populate ft. + var hash uint32 + for _, in := range in { + t := in.(*rtype) + args = append(args, t) + hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) + } + if variadic { + hash = fnv1(hash, 'v') + } + hash = fnv1(hash, '.') + for _, out := range out { + t := out.(*rtype) + args = append(args, t) + hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) + } + + ft.TFlag = 0 + ft.Hash = hash + ft.InCount = uint16(len(in)) + ft.OutCount = uint16(len(out)) + if variadic { + ft.OutCount |= 1 << 15 + } + + // Look in cache. + if ts, ok := funcLookupCache.m.Load(hash); ok { + for _, t := range ts.([]*abi.Type) { + if haveIdenticalUnderlyingType(&ft.Type, t, true) { + return toRType(t) + } + } + } + + // Not in cache, lock and retry. + funcLookupCache.Lock() + defer funcLookupCache.Unlock() + if ts, ok := funcLookupCache.m.Load(hash); ok { + for _, t := range ts.([]*abi.Type) { + if haveIdenticalUnderlyingType(&ft.Type, t, true) { + return toRType(t) + } + } + } + + addToCache := func(tt *abi.Type) Type { + var rts []*abi.Type + if rti, ok := funcLookupCache.m.Load(hash); ok { + rts = rti.([]*abi.Type) + } + funcLookupCache.m.Store(hash, append(rts, tt)) + return toType(tt) + } + + // Look in known types for the same string representation. + str := funcStr(ft) + for _, tt := range typesByString(str) { + if haveIdenticalUnderlyingType(&ft.Type, tt, true) { + return addToCache(tt) + } + } + + // Populate the remaining fields of ft and store in cache. + ft.Str = resolveReflectName(newName(str, "", false, false)) + ft.PtrToThis = 0 + return addToCache(&ft.Type) +} +func stringFor(t *abi.Type) string { + return toRType(t).String() +} + +// funcStr builds a string representation of a funcType. +func funcStr(ft *funcType) string { + repr := make([]byte, 0, 64) + repr = append(repr, "func("...) + for i, t := range ft.InSlice() { + if i > 0 { + repr = append(repr, ", "...) + } + if ft.IsVariadic() && i == int(ft.InCount)-1 { + repr = append(repr, "..."...) + repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...) + } else { + repr = append(repr, stringFor(t)...) + } + } + repr = append(repr, ')') + out := ft.OutSlice() + if len(out) == 1 { + repr = append(repr, ' ') + } else if len(out) > 1 { + repr = append(repr, " ("...) + } + for i, t := range out { + if i > 0 { + repr = append(repr, ", "...) + } + repr = append(repr, stringFor(t)...) + } + if len(out) > 1 { + repr = append(repr, ')') + } + return string(repr) +} + +// isReflexive reports whether the == operation on the type is reflexive. +// That is, x == x for all values x of type t. +func isReflexive(t *abi.Type) bool { + switch Kind(t.Kind()) { + case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: + return true + case Float32, Float64, Complex64, Complex128, Interface: + return false + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + return isReflexive(tt.Elem) + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for _, f := range tt.Fields { + if !isReflexive(f.Typ) { + return false + } + } + return true + default: + // Func, Map, Slice, Invalid + panic("isReflexive called on non-key type " + stringFor(t)) + } +} + +// needKeyUpdate reports whether map overwrites require the key to be copied. +func needKeyUpdate(t *abi.Type) bool { + switch Kind(t.Kind()) { + case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: + return false + case Float32, Float64, Complex64, Complex128, Interface, String: + // Float keys can be updated from +0 to -0. + // String keys can be updated to use a smaller backing store. + // Interfaces might have floats or strings in them. + return true + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + return needKeyUpdate(tt.Elem) + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for _, f := range tt.Fields { + if needKeyUpdate(f.Typ) { + return true + } + } + return false + default: + // Func, Map, Slice, Invalid + panic("needKeyUpdate called on non-key type " + stringFor(t)) + } +} + +// hashMightPanic reports whether the hash of a map key of type t might panic. +func hashMightPanic(t *abi.Type) bool { + switch Kind(t.Kind()) { + case Interface: + return true + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + return hashMightPanic(tt.Elem) + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for _, f := range tt.Fields { + if hashMightPanic(f.Typ) { + return true + } + } + return false + default: + return false + } +} + +// Make sure these routines stay in sync with ../runtime/map.go! +// These types exist only for GC, so we only fill out GC relevant info. +// Currently, that's just size and the GC program. We also fill in string +// for possible debugging use. +const ( + bucketSize uintptr = abi.MapBucketCount + maxKeySize uintptr = abi.MapMaxKeyBytes + maxValSize uintptr = abi.MapMaxElemBytes +) + +func bucketOf(ktyp, etyp *abi.Type) *abi.Type { + if ktyp.Size_ > maxKeySize { + ktyp = ptrTo(ktyp) + } + if etyp.Size_ > maxValSize { + etyp = ptrTo(etyp) + } + + // Prepare GC data if any. + // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes, + // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap. + // Note that since the key and value are known to be <= 128 bytes, + // they're guaranteed to have bitmaps instead of GC programs. + var gcdata *byte + var ptrdata uintptr + + size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize + if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { + panic("reflect: bad size computation in MapOf") + } + + if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 { + nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize + n := (nptr + 7) / 8 + + // Runtime needs pointer masks to be a multiple of uintptr in size. + n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) + mask := make([]byte, n) + base := bucketSize / goarch.PtrSize + + if ktyp.PtrBytes != 0 { + emitGCMask(mask, base, ktyp, bucketSize) + } + base += bucketSize * ktyp.Size_ / goarch.PtrSize + + if etyp.PtrBytes != 0 { + emitGCMask(mask, base, etyp, bucketSize) + } + base += bucketSize * etyp.Size_ / goarch.PtrSize + + word := base + mask[word/8] |= 1 << (word % 8) + gcdata = &mask[0] + ptrdata = (word + 1) * goarch.PtrSize + + // overflow word must be last + if ptrdata != size { + panic("reflect: bad layout computation in MapOf") + } + } + + b := &abi.Type{ + Align_: goarch.PtrSize, + Size_: size, + Kind_: uint8(Struct), + PtrBytes: ptrdata, + GCData: gcdata, + } + s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")" + b.Str = resolveReflectName(newName(s, "", false, false)) + return b +} + +func (t *rtype) gcSlice(begin, end uintptr) []byte { + return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end] +} + +// emitGCMask writes the GC mask for [n]typ into out, starting at bit +// offset base. +func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) { + if typ.Kind_&kindGCProg != 0 { + panic("reflect: unexpected GC program") + } + ptrs := typ.PtrBytes / goarch.PtrSize + words := typ.Size_ / goarch.PtrSize + mask := typ.GcSlice(0, (ptrs+7)/8) + for j := uintptr(0); j < ptrs; j++ { + if (mask[j/8]>>(j%8))&1 != 0 { + for i := uintptr(0); i < n; i++ { + k := base + i*words + j + out[k/8] |= 1 << (k % 8) + } + } + } +} + +// appendGCProg appends the GC program for the first ptrdata bytes of +// typ to dst and returns the extended slice. +func appendGCProg(dst []byte, typ *abi.Type) []byte { + if typ.Kind_&kindGCProg != 0 { + // Element has GC program; emit one element. + n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData))) + prog := typ.GcSlice(4, 4+n-1) + return append(dst, prog...) + } + + // Element is small with pointer mask; use as literal bits. + ptrs := typ.PtrBytes / goarch.PtrSize + mask := typ.GcSlice(0, (ptrs+7)/8) + + // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). + for ; ptrs > 120; ptrs -= 120 { + dst = append(dst, 120) + dst = append(dst, mask[:15]...) + mask = mask[15:] + } + + dst = append(dst, byte(ptrs)) + dst = append(dst, mask...) + return dst +} + +// SliceOf returns the slice type with element type t. +// For example, if t represents int, SliceOf(t) represents []int. +func SliceOf(t Type) Type { + typ := t.common() + + // Look in cache. + ckey := cacheKey{Slice, typ, nil, 0} + if slice, ok := lookupCache.Load(ckey); ok { + return slice.(Type) + } + + // Look in known types. + s := "[]" + stringFor(typ) + for _, tt := range typesByString(s) { + slice := (*sliceType)(unsafe.Pointer(tt)) + if slice.Elem == typ { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) + return ti.(Type) + } + } + + // Make a slice type. + var islice any = ([]unsafe.Pointer)(nil) + prototype := *(**sliceType)(unsafe.Pointer(&islice)) + slice := *prototype + slice.TFlag = 0 + slice.Str = resolveReflectName(newName(s, "", false, false)) + slice.Hash = fnv1(typ.Hash, '[') + slice.Elem = typ + slice.PtrToThis = 0 + + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type)) + return ti.(Type) +} + +// The structLookupCache caches StructOf lookups. +// StructOf does not share the common lookupCache since we need to pin +// the memory associated with *structTypeFixedN. +var structLookupCache struct { + sync.Mutex // Guards stores (but not loads) on m. + + // m is a map[uint32][]Type keyed by the hash calculated in StructOf. + // Elements in m are append-only and thus safe for concurrent reading. + m sync.Map +} + +type structTypeUncommon struct { + structType + u uncommonType +} + +// isLetter reports whether a given 'rune' is classified as a Letter. +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +// isValidFieldName checks if a string is a valid (struct) field name or not. +// +// According to the language spec, a field name should be an identifier. +// +// identifier = letter { letter | unicode_digit } . +// letter = unicode_letter | "_" . +func isValidFieldName(fieldName string) bool { + for i, c := range fieldName { + if i == 0 && !isLetter(c) { + return false + } + + if !(isLetter(c) || unicode.IsDigit(c)) { + return false + } + } + + return len(fieldName) > 0 +} + +// StructOf returns the struct type containing fields. +// The Offset and Index fields are ignored and computed as they would be +// by the compiler. +// +// StructOf currently does not support promoted methods of embedded fields +// and panics if passed unexported StructFields. +func StructOf(fields []StructField) Type { + var ( + hash = fnv1(0, []byte("struct {")...) + size uintptr + typalign uint8 + comparable = true + methods []abi.Method + + fs = make([]structField, len(fields)) + repr = make([]byte, 0, 64) + fset = map[string]struct{}{} // fields' names + + hasGCProg = false // records whether a struct-field type has a GCProg + ) + + lastzero := uintptr(0) + repr = append(repr, "struct {"...) + pkgpath := "" + for i, field := range fields { + if field.Name == "" { + panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name") + } + if !isValidFieldName(field.Name) { + panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name") + } + if field.Type == nil { + panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") + } + f, fpkgpath := runtimeStructField(field) + ft := f.Typ + if ft.Kind_&kindGCProg != 0 { + hasGCProg = true + } + if fpkgpath != "" { + if pkgpath == "" { + pkgpath = fpkgpath + } else if pkgpath != fpkgpath { + panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath) + } + } + + // Update string and hash + name := f.Name.Name() + hash = fnv1(hash, []byte(name)...) + repr = append(repr, (" " + name)...) + if f.Embedded() { + // Embedded field + if f.Typ.Kind() == abi.Pointer { + // Embedded ** and *interface{} are illegal + elem := ft.Elem() + if k := elem.Kind(); k == abi.Pointer || k == abi.Interface { + panic("reflect.StructOf: illegal embedded field type " + stringFor(ft)) + } + } + + switch Kind(f.Typ.Kind()) { + case Interface: + ift := (*interfaceType)(unsafe.Pointer(ft)) + for _, m := range ift.Methods { + if pkgPath(ift.nameOff(m.Name)) != "" { + // TODO(sbinet). Issue 15924. + panic("reflect: embedded interface with unexported method(s) not implemented") + } + + fnStub := resolveReflectText(unsafe.Pointer(abi.FuncPCABIInternal(embeddedIfaceMethStub))) + methods = append(methods, abi.Method{ + Name: resolveReflectName(ift.nameOff(m.Name)), + Mtyp: resolveReflectType(ift.typeOff(m.Typ)), + Ifn: fnStub, + Tfn: fnStub, + }) + } + case Pointer: + ptr := (*ptrType)(unsafe.Pointer(ft)) + if unt := ptr.Uncommon(); unt != nil { + if i > 0 && unt.Mcount > 0 { + // Issue 15924. + panic("reflect: embedded type with methods not implemented if type is not first field") + } + if len(fields) > 1 { + panic("reflect: embedded type with methods not implemented if there is more than one field") + } + for _, m := range unt.Methods() { + mname := nameOffFor(ft, m.Name) + if pkgPath(mname) != "" { + // TODO(sbinet). + // Issue 15924. + panic("reflect: embedded interface with unexported method(s) not implemented") + } + methods = append(methods, abi.Method{ + Name: resolveReflectName(mname), + Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), + }) + } + } + if unt := ptr.Elem.Uncommon(); unt != nil { + for _, m := range unt.Methods() { + mname := nameOffFor(ft, m.Name) + if pkgPath(mname) != "" { + // TODO(sbinet) + // Issue 15924. + panic("reflect: embedded interface with unexported method(s) not implemented") + } + methods = append(methods, abi.Method{ + Name: resolveReflectName(mname), + Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)), + }) + } + } + default: + if unt := ft.Uncommon(); unt != nil { + if i > 0 && unt.Mcount > 0 { + // Issue 15924. + panic("reflect: embedded type with methods not implemented if type is not first field") + } + if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 { + panic("reflect: embedded type with methods not implemented for non-pointer type") + } + for _, m := range unt.Methods() { + mname := nameOffFor(ft, m.Name) + if pkgPath(mname) != "" { + // TODO(sbinet) + // Issue 15924. + panic("reflect: embedded interface with unexported method(s) not implemented") + } + methods = append(methods, abi.Method{ + Name: resolveReflectName(mname), + Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), + }) + + } + } + } + } + if _, dup := fset[name]; dup && name != "_" { + panic("reflect.StructOf: duplicate field " + name) + } + fset[name] = struct{}{} + + hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash)) + + repr = append(repr, (" " + stringFor(ft))...) + if f.Name.HasTag() { + hash = fnv1(hash, []byte(f.Name.Tag())...) + repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...) + } + if i < len(fields)-1 { + repr = append(repr, ';') + } + + comparable = comparable && (ft.Equal != nil) + + offset := align(size, uintptr(ft.Align_)) + if offset < size { + panic("reflect.StructOf: struct size would exceed virtual address space") + } + if ft.Align_ > typalign { + typalign = ft.Align_ + } + size = offset + ft.Size_ + if size < offset { + panic("reflect.StructOf: struct size would exceed virtual address space") + } + f.Offset = offset + + if ft.Size_ == 0 { + lastzero = size + } + + fs[i] = f + } + + if size > 0 && lastzero == size { + // This is a non-zero sized struct that ends in a + // zero-sized field. We add an extra byte of padding, + // to ensure that taking the address of the final + // zero-sized field can't manufacture a pointer to the + // next object in the heap. See issue 9401. + size++ + if size == 0 { + panic("reflect.StructOf: struct size would exceed virtual address space") + } + } + + var typ *structType + var ut *uncommonType + + if len(methods) == 0 { + t := new(structTypeUncommon) + typ = &t.structType + ut = &t.u + } else { + // A *rtype representing a struct is followed directly in memory by an + // array of method objects representing the methods attached to the + // struct. To get the same layout for a run time generated type, we + // need an array directly following the uncommonType memory. + // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. + tt := New(StructOf([]StructField{ + {Name: "S", Type: TypeOf(structType{})}, + {Name: "U", Type: TypeOf(uncommonType{})}, + {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, + })) + + typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer()) + ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer()) + + copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]abi.Method), methods) + } + // TODO(sbinet): Once we allow embedding multiple types, + // methods will need to be sorted like the compiler does. + // TODO(sbinet): Once we allow non-exported methods, we will + // need to compute xcount as the number of exported methods. + ut.Mcount = uint16(len(methods)) + ut.Xcount = ut.Mcount + ut.Moff = uint32(unsafe.Sizeof(uncommonType{})) + + if len(fs) > 0 { + repr = append(repr, ' ') + } + repr = append(repr, '}') + hash = fnv1(hash, '}') + str := string(repr) + + // Round the size up to be a multiple of the alignment. + s := align(size, uintptr(typalign)) + if s < size { + panic("reflect.StructOf: struct size would exceed virtual address space") + } + size = s + + // Make the struct type. + var istruct any = struct{}{} + prototype := *(**structType)(unsafe.Pointer(&istruct)) + *typ = *prototype + typ.Fields = fs + if pkgpath != "" { + typ.PkgPath = newName(pkgpath, "", false, false) + } + + // Look in cache. + if ts, ok := structLookupCache.m.Load(hash); ok { + for _, st := range ts.([]Type) { + t := st.common() + if haveIdenticalUnderlyingType(&typ.Type, t, true) { + return toType(t) + } + } + } + + // Not in cache, lock and retry. + structLookupCache.Lock() + defer structLookupCache.Unlock() + if ts, ok := structLookupCache.m.Load(hash); ok { + for _, st := range ts.([]Type) { + t := st.common() + if haveIdenticalUnderlyingType(&typ.Type, t, true) { + return toType(t) + } + } + } + + addToCache := func(t Type) Type { + var ts []Type + if ti, ok := structLookupCache.m.Load(hash); ok { + ts = ti.([]Type) + } + structLookupCache.m.Store(hash, append(ts, t)) + return t + } + + // Look in known types. + for _, t := range typesByString(str) { + if haveIdenticalUnderlyingType(&typ.Type, t, true) { + // even if 't' wasn't a structType with methods, we should be ok + // as the 'u uncommonType' field won't be accessed except when + // tflag&abi.TFlagUncommon is set. + return addToCache(toType(t)) + } + } + + typ.Str = resolveReflectName(newName(str, "", false, false)) + typ.TFlag = 0 // TODO: set tflagRegularMemory + typ.Hash = hash + typ.Size_ = size + typ.PtrBytes = typeptrdata(&typ.Type) + typ.Align_ = typalign + typ.FieldAlign_ = typalign + typ.PtrToThis = 0 + if len(methods) > 0 { + typ.TFlag |= abi.TFlagUncommon + } + + if hasGCProg { + lastPtrField := 0 + for i, ft := range fs { + if ft.Typ.Pointers() { + lastPtrField = i + } + } + prog := []byte{0, 0, 0, 0} // will be length of prog + var off uintptr + for i, ft := range fs { + if i > lastPtrField { + // gcprog should not include anything for any field after + // the last field that contains pointer data + break + } + if !ft.Typ.Pointers() { + // Ignore pointerless fields. + continue + } + // Pad to start of this field with zeros. + if ft.Offset > off { + n := (ft.Offset - off) / goarch.PtrSize + prog = append(prog, 0x01, 0x00) // emit a 0 bit + if n > 1 { + prog = append(prog, 0x81) // repeat previous bit + prog = appendVarint(prog, n-1) // n-1 times + } + off = ft.Offset + } + + prog = appendGCProg(prog, ft.Typ) + off += ft.Typ.PtrBytes + } + prog = append(prog, 0) + *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) + typ.Kind_ |= kindGCProg + typ.GCData = &prog[0] + } else { + typ.Kind_ &^= kindGCProg + bv := new(bitVector) + addTypeBits(bv, 0, &typ.Type) + if len(bv.data) > 0 { + typ.GCData = &bv.data[0] + } + } + typ.Equal = nil + if comparable { + typ.Equal = func(p, q unsafe.Pointer) bool { + for _, ft := range typ.Fields { + pi := add(p, ft.Offset, "&x.field safe") + qi := add(q, ft.Offset, "&x.field safe") + if !ft.Typ.Equal(pi, qi) { + return false + } + } + return true + } + } + + switch { + case len(fs) == 1 && !ifaceIndir(fs[0].Typ): + // structs of 1 direct iface type can be direct + typ.Kind_ |= kindDirectIface + default: + typ.Kind_ &^= kindDirectIface + } + + return addToCache(toType(&typ.Type)) +} + +func embeddedIfaceMethStub() { + panic("reflect: StructOf does not support methods of embedded interfaces") +} + +// runtimeStructField takes a StructField value passed to StructOf and +// returns both the corresponding internal representation, of type +// structField, and the pkgpath value to use for this field. +func runtimeStructField(field StructField) (structField, string) { + if field.Anonymous && field.PkgPath != "" { + panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set") + } + + if field.IsExported() { + // Best-effort check for misuse. + // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath") + } + } + + resolveReflectType(field.Type.common()) // install in runtime + f := structField{ + Name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous), + Typ: field.Type.common(), + Offset: 0, + } + return f, field.PkgPath +} + +// typeptrdata returns the length in bytes of the prefix of t +// containing pointer data. Anything after this offset is scalar data. +// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go +func typeptrdata(t *abi.Type) uintptr { + switch t.Kind() { + case abi.Struct: + st := (*structType)(unsafe.Pointer(t)) + // find the last field that has pointers. + field := -1 + for i := range st.Fields { + ft := st.Fields[i].Typ + if ft.Pointers() { + field = i + } + } + if field == -1 { + return 0 + } + f := st.Fields[field] + return f.Offset + f.Typ.PtrBytes + + default: + panic("reflect.typeptrdata: unexpected type, " + stringFor(t)) + } +} + +// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. +const maxPtrmaskBytes = 2048 + +// ArrayOf returns the array type with the given length and element type. +// For example, if t represents int, ArrayOf(5, t) represents [5]int. +// +// If the resulting type would be larger than the available address space, +// ArrayOf panics. +func ArrayOf(length int, elem Type) Type { + if length < 0 { + panic("reflect: negative length passed to ArrayOf") + } + + typ := elem.common() + + // Look in cache. + ckey := cacheKey{Array, typ, nil, uintptr(length)} + if array, ok := lookupCache.Load(ckey); ok { + return array.(Type) + } + + // Look in known types. + s := "[" + strconv.Itoa(length) + "]" + stringFor(typ) + for _, tt := range typesByString(s) { + array := (*arrayType)(unsafe.Pointer(tt)) + if array.Elem == typ { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) + return ti.(Type) + } + } + + // Make an array type. + var iarray any = [1]unsafe.Pointer{} + prototype := *(**arrayType)(unsafe.Pointer(&iarray)) + array := *prototype + array.TFlag = typ.TFlag & abi.TFlagRegularMemory + array.Str = resolveReflectName(newName(s, "", false, false)) + array.Hash = fnv1(typ.Hash, '[') + for n := uint32(length); n > 0; n >>= 8 { + array.Hash = fnv1(array.Hash, byte(n)) + } + array.Hash = fnv1(array.Hash, ']') + array.Elem = typ + array.PtrToThis = 0 + if typ.Size_ > 0 { + max := ^uintptr(0) / typ.Size_ + if uintptr(length) > max { + panic("reflect.ArrayOf: array size would exceed virtual address space") + } + } + array.Size_ = typ.Size_ * uintptr(length) + if length > 0 && typ.PtrBytes != 0 { + array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes + } + array.Align_ = typ.Align_ + array.FieldAlign_ = typ.FieldAlign_ + array.Len = uintptr(length) + array.Slice = &(SliceOf(elem).(*rtype).t) + + switch { + case typ.PtrBytes == 0 || array.Size_ == 0: + // No pointers. + array.GCData = nil + array.PtrBytes = 0 + + case length == 1: + // In memory, 1-element array looks just like the element. + array.Kind_ |= typ.Kind_ & kindGCProg + array.GCData = typ.GCData + array.PtrBytes = typ.PtrBytes + + case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: + // Element is small with pointer mask; array is still small. + // Create direct pointer mask by turning each 1 bit in elem + // into length 1 bits in larger mask. + n := (array.PtrBytes/goarch.PtrSize + 7) / 8 + // Runtime needs pointer masks to be a multiple of uintptr in size. + n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) + mask := make([]byte, n) + emitGCMask(mask, 0, typ, array.Len) + array.GCData = &mask[0] + + default: + // Create program that emits one element + // and then repeats to make the array. + prog := []byte{0, 0, 0, 0} // will be length of prog + prog = appendGCProg(prog, typ) + // Pad from ptrdata to size. + elemPtrs := typ.PtrBytes / goarch.PtrSize + elemWords := typ.Size_ / goarch.PtrSize + if elemPtrs < elemWords { + // Emit literal 0 bit, then repeat as needed. + prog = append(prog, 0x01, 0x00) + if elemPtrs+1 < elemWords { + prog = append(prog, 0x81) + prog = appendVarint(prog, elemWords-elemPtrs-1) + } + } + // Repeat length-1 times. + if elemWords < 0x80 { + prog = append(prog, byte(elemWords|0x80)) + } else { + prog = append(prog, 0x80) + prog = appendVarint(prog, elemWords) + } + prog = appendVarint(prog, uintptr(length)-1) + prog = append(prog, 0) + *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) + array.Kind_ |= kindGCProg + array.GCData = &prog[0] + array.PtrBytes = array.Size_ // overestimate but ok; must match program + } + + etyp := typ + esize := etyp.Size() + + array.Equal = nil + if eequal := etyp.Equal; eequal != nil { + array.Equal = func(p, q unsafe.Pointer) bool { + for i := 0; i < length; i++ { + pi := arrayAt(p, i, esize, "i < length") + qi := arrayAt(q, i, esize, "i < length") + if !eequal(pi, qi) { + return false + } + + } + return true + } + } + + switch { + case length == 1 && !ifaceIndir(typ): + // array of 1 direct iface type can be direct + array.Kind_ |= kindDirectIface + default: + array.Kind_ &^= kindDirectIface + } + + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type)) + return ti.(Type) +} + +func appendVarint(x []byte, v uintptr) []byte { + for ; v >= 0x80; v >>= 7 { + x = append(x, byte(v|0x80)) + } + x = append(x, byte(v)) + return x +} + +// toType converts from a *rtype to a Type that can be returned +// to the client of package reflect. In gc, the only concern is that +// a nil *rtype must be replaced by a nil Type, but in gccgo this +// function takes care of ensuring that multiple *rtype for the same +// type are coalesced into a single Type. +func toType(t *abi.Type) Type { + if t == nil { + return nil + } + return toRType(t) +} + +type layoutKey struct { + ftyp *funcType // function signature + rcvr *abi.Type // receiver type, or nil if none +} + +type layoutType struct { + t *abi.Type + framePool *sync.Pool + abid abiDesc +} + +var layoutCache sync.Map // map[layoutKey]layoutType + +// funcLayout computes a struct type representing the layout of the +// stack-assigned function arguments and return values for the function +// type t. +// If rcvr != nil, rcvr specifies the type of the receiver. +// The returned type exists only for GC, so we only fill out GC relevant info. +// Currently, that's just size and the GC program. We also fill in +// the name for possible debugging use. +func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) { + if t.Kind() != abi.Func { + panic("reflect: funcLayout of non-func type " + stringFor(&t.Type)) + } + if rcvr != nil && rcvr.Kind() == abi.Interface { + panic("reflect: funcLayout with interface receiver " + stringFor(rcvr)) + } + k := layoutKey{t, rcvr} + if lti, ok := layoutCache.Load(k); ok { + lt := lti.(layoutType) + return lt.t, lt.framePool, lt.abid + } + + // Compute the ABI layout. + abid = newAbiDesc(t, rcvr) + + // build dummy rtype holding gc program + x := &abi.Type{ + Align_: goarch.PtrSize, + // Don't add spill space here; it's only necessary in + // reflectcall's frame, not in the allocated frame. + // TODO(mknyszek): Remove this comment when register + // spill space in the frame is no longer required. + Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize), + PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize, + } + if abid.stackPtrs.n > 0 { + x.GCData = &abid.stackPtrs.data[0] + } + + var s string + if rcvr != nil { + s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")" + } else { + s = "funcargs(" + stringFor(&t.Type) + ")" + } + x.Str = resolveReflectName(newName(s, "", false, false)) + + // cache result for future callers + framePool = &sync.Pool{New: func() any { + return unsafe_New(x) + }} + lti, _ := layoutCache.LoadOrStore(k, layoutType{ + t: x, + framePool: framePool, + abid: abid, + }) + lt := lti.(layoutType) + return lt.t, lt.framePool, lt.abid +} + +// ifaceIndir reports whether t is stored indirectly in an interface value. +func ifaceIndir(t *abi.Type) bool { + return t.Kind_&kindDirectIface == 0 +} + +// Note: this type must agree with runtime.bitvector. +type bitVector struct { + n uint32 // number of bits + data []byte +} + +// append a bit to the bitmap. +func (bv *bitVector) append(bit uint8) { + if bv.n%(8*goarch.PtrSize) == 0 { + // Runtime needs pointer masks to be a multiple of uintptr in size. + // Since reflect passes bv.data directly to the runtime as a pointer mask, + // we append a full uintptr of zeros at a time. + for i := 0; i < goarch.PtrSize; i++ { + bv.data = append(bv.data, 0) + } + } + bv.data[bv.n/8] |= bit << (bv.n % 8) + bv.n++ +} + +func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { + if t.PtrBytes == 0 { + return + } + + switch Kind(t.Kind_ & kindMask) { + case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: + // 1 pointer at start of representation + for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { + bv.append(0) + } + bv.append(1) + + case Interface: + // 2 pointers + for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { + bv.append(0) + } + bv.append(1) + bv.append(1) + + case Array: + // repeat inner type + tt := (*arrayType)(unsafe.Pointer(t)) + for i := 0; i < int(tt.Len); i++ { + addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem) + } + + case Struct: + // apply fields + tt := (*structType)(unsafe.Pointer(t)) + for i := range tt.Fields { + f := &tt.Fields[i] + addTypeBits(bv, offset+f.Offset, f.Typ) + } + } +} + +// TypeFor returns the [Type] that represents the type argument T. +func TypeFor[T any]() Type { + return TypeOf((*T)(nil)).Elem() +} diff --git a/platform/dbops/binaries/go/go/src/reflect/type_test.go b/platform/dbops/binaries/go/go/src/reflect/type_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9e124273a2e0b50320e06f27162f941c695b9105 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/type_test.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "reflect" + "testing" +) + +func TestTypeFor(t *testing.T) { + type ( + mystring string + myiface interface{} + ) + + testcases := []struct { + wantFrom any + got reflect.Type + }{ + {new(int), reflect.TypeFor[int]()}, + {new(int64), reflect.TypeFor[int64]()}, + {new(string), reflect.TypeFor[string]()}, + {new(mystring), reflect.TypeFor[mystring]()}, + {new(any), reflect.TypeFor[any]()}, + {new(myiface), reflect.TypeFor[myiface]()}, + } + for _, tc := range testcases { + want := reflect.ValueOf(tc.wantFrom).Elem().Type() + if want != tc.got { + t.Errorf("unexpected reflect.Type: got %v; want %v", tc.got, want) + } + } +} + +func TestStructOfEmbeddedIfaceMethodCall(t *testing.T) { + type Named interface { + Name() string + } + + typ := reflect.StructOf([]reflect.StructField{ + { + Anonymous: true, + Name: "Named", + Type: reflect.TypeFor[Named](), + }, + }) + + v := reflect.New(typ).Elem() + v.Field(0).Set( + reflect.ValueOf(reflect.TypeFor[string]()), + ) + + x := v.Interface().(Named) + shouldPanic("StructOf does not support methods of embedded interfaces", func() { + _ = x.Name() + }) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/value.go b/platform/dbops/binaries/go/go/src/reflect/value.go new file mode 100644 index 0000000000000000000000000000000000000000..06f22f7428139f57d06412662cf9cd3880626111 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/value.go @@ -0,0 +1,4011 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "errors" + "internal/abi" + "internal/goarch" + "internal/itoa" + "internal/unsafeheader" + "math" + "runtime" + "unsafe" +) + +// Value is the reflection interface to a Go value. +// +// Not all methods apply to all kinds of values. Restrictions, +// if any, are noted in the documentation for each method. +// Use the Kind method to find out the kind of value before +// calling kind-specific methods. Calling a method +// inappropriate to the kind of type causes a run time panic. +// +// The zero Value represents no value. +// Its IsValid method returns false, its Kind method returns Invalid, +// its String method returns "", and all other methods panic. +// Most functions and methods never return an invalid value. +// If one does, its documentation states the conditions explicitly. +// +// A Value can be used concurrently by multiple goroutines provided that +// the underlying Go value can be used concurrently for the equivalent +// direct operations. +// +// To compare two Values, compare the results of the Interface method. +// Using == on two Values does not compare the underlying values +// they represent. +type Value struct { + // typ_ holds the type of the value represented by a Value. + // Access using the typ method to avoid escape of v. + typ_ *abi.Type + + // Pointer-valued data or, if flagIndir is set, pointer to data. + // Valid when either flagIndir is set or typ.pointers() is true. + ptr unsafe.Pointer + + // flag holds metadata about the value. + // + // The lowest five bits give the Kind of the value, mirroring typ.Kind(). + // + // The next set of bits are flag bits: + // - flagStickyRO: obtained via unexported not embedded field, so read-only + // - flagEmbedRO: obtained via unexported embedded field, so read-only + // - flagIndir: val holds a pointer to the data + // - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil) + // - flagMethod: v is a method value. + // If ifaceIndir(typ), code can assume that flagIndir is set. + // + // The remaining 22+ bits give a method number for method values. + // If flag.kind() != Func, code can assume that flagMethod is unset. + flag + + // A method value represents a curried method invocation + // like r.Read for some receiver r. The typ+val+flag bits describe + // the receiver r, but the flag's Kind bits say Func (methods are + // functions), and the top bits of the flag give the method number + // in r's type's method table. +} + +type flag uintptr + +const ( + flagKindWidth = 5 // there are 27 kinds + flagKindMask flag = 1< len(prefix) && name[:len(prefix)] == prefix { + methodName := name[len(prefix):] + if len(methodName) > 0 && 'A' <= methodName[0] && methodName[0] <= 'Z' { + return name + } + } + } + return "unknown method" +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ *abi.Type + word unsafe.Pointer +} + +// nonEmptyInterface is the header for an interface value with methods. +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp *abi.Type // static interface type + typ *abi.Type // dynamic concrete type + hash uint32 // copy of typ.hash + _ [4]byte + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} + +// mustBe panics if f's kind is not expected. +// Making this a method on flag instead of on Value +// (and embedding flag in Value) means that we can write +// the very clear v.mustBe(Bool) and have it compile into +// v.flag.mustBe(Bool), which will only bother to copy the +// single important word for the receiver. +func (f flag) mustBe(expected Kind) { + // TODO(mvdan): use f.kind() again once mid-stack inlining gets better + if Kind(f&flagKindMask) != expected { + panic(&ValueError{valueMethodName(), f.kind()}) + } +} + +// mustBeExported panics if f records that the value was obtained using +// an unexported field. +func (f flag) mustBeExported() { + if f == 0 || f&flagRO != 0 { + f.mustBeExportedSlow() + } +} + +func (f flag) mustBeExportedSlow() { + if f == 0 { + panic(&ValueError{valueMethodName(), Invalid}) + } + if f&flagRO != 0 { + panic("reflect: " + valueMethodName() + " using value obtained using unexported field") + } +} + +// mustBeAssignable panics if f records that the value is not assignable, +// which is to say that either it was obtained using an unexported field +// or it is not addressable. +func (f flag) mustBeAssignable() { + if f&flagRO != 0 || f&flagAddr == 0 { + f.mustBeAssignableSlow() + } +} + +func (f flag) mustBeAssignableSlow() { + if f == 0 { + panic(&ValueError{valueMethodName(), Invalid}) + } + // Assignable if addressable and not read-only. + if f&flagRO != 0 { + panic("reflect: " + valueMethodName() + " using value obtained using unexported field") + } + if f&flagAddr == 0 { + panic("reflect: " + valueMethodName() + " using unaddressable value") + } +} + +// Addr returns a pointer value representing the address of v. +// It panics if [Value.CanAddr] returns false. +// Addr is typically used to obtain a pointer to a struct field +// or slice element in order to call a method that requires a +// pointer receiver. +func (v Value) Addr() Value { + if v.flag&flagAddr == 0 { + panic("reflect.Value.Addr of unaddressable value") + } + // Preserve flagRO instead of using v.flag.ro() so that + // v.Addr().Elem() is equivalent to v (#32772) + fl := v.flag & flagRO + return Value{ptrTo(v.typ()), v.ptr, fl | flag(Pointer)} +} + +// Bool returns v's underlying value. +// It panics if v's kind is not [Bool]. +func (v Value) Bool() bool { + // panicNotBool is split out to keep Bool inlineable. + if v.kind() != Bool { + v.panicNotBool() + } + return *(*bool)(v.ptr) +} + +func (v Value) panicNotBool() { + v.mustBe(Bool) +} + +var bytesType = rtypeOf(([]byte)(nil)) + +// Bytes returns v's underlying value. +// It panics if v's underlying value is not a slice of bytes or +// an addressable array of bytes. +func (v Value) Bytes() []byte { + // bytesSlow is split out to keep Bytes inlineable for unnamed []byte. + if v.typ_ == bytesType { // ok to use v.typ_ directly as comparison doesn't cause escape + return *(*[]byte)(v.ptr) + } + return v.bytesSlow() +} + +func (v Value) bytesSlow() []byte { + switch v.kind() { + case Slice: + if v.typ().Elem().Kind() != abi.Uint8 { + panic("reflect.Value.Bytes of non-byte slice") + } + // Slice is always bigger than a word; assume flagIndir. + return *(*[]byte)(v.ptr) + case Array: + if v.typ().Elem().Kind() != abi.Uint8 { + panic("reflect.Value.Bytes of non-byte array") + } + if !v.CanAddr() { + panic("reflect.Value.Bytes of unaddressable byte array") + } + p := (*byte)(v.ptr) + n := int((*arrayType)(unsafe.Pointer(v.typ())).Len) + return unsafe.Slice(p, n) + } + panic(&ValueError{"reflect.Value.Bytes", v.kind()}) +} + +// runes returns v's underlying value. +// It panics if v's underlying value is not a slice of runes (int32s). +func (v Value) runes() []rune { + v.mustBe(Slice) + if v.typ().Elem().Kind() != abi.Int32 { + panic("reflect.Value.Bytes of non-rune slice") + } + // Slice is always bigger than a word; assume flagIndir. + return *(*[]rune)(v.ptr) +} + +// CanAddr reports whether the value's address can be obtained with [Value.Addr]. +// Such values are called addressable. A value is addressable if it is +// an element of a slice, an element of an addressable array, +// a field of an addressable struct, or the result of dereferencing a pointer. +// If CanAddr returns false, calling [Value.Addr] will panic. +func (v Value) CanAddr() bool { + return v.flag&flagAddr != 0 +} + +// CanSet reports whether the value of v can be changed. +// A [Value] can be changed only if it is addressable and was not +// obtained by the use of unexported struct fields. +// If CanSet returns false, calling [Value.Set] or any type-specific +// setter (e.g., [Value.SetBool], [Value.SetInt]) will panic. +func (v Value) CanSet() bool { + return v.flag&(flagAddr|flagRO) == flagAddr +} + +// Call calls the function v with the input arguments in. +// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]). +// Call panics if v's Kind is not [Func]. +// It returns the output results as Values. +// As in Go, each input argument must be assignable to the +// type of the function's corresponding input parameter. +// If v is a variadic function, Call creates the variadic slice parameter +// itself, copying in the corresponding values. +func (v Value) Call(in []Value) []Value { + v.mustBe(Func) + v.mustBeExported() + return v.call("Call", in) +} + +// CallSlice calls the variadic function v with the input arguments in, +// assigning the slice in[len(in)-1] to v's final variadic argument. +// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...). +// CallSlice panics if v's Kind is not [Func] or if v is not variadic. +// It returns the output results as Values. +// As in Go, each input argument must be assignable to the +// type of the function's corresponding input parameter. +func (v Value) CallSlice(in []Value) []Value { + v.mustBe(Func) + v.mustBeExported() + return v.call("CallSlice", in) +} + +var callGC bool // for testing; see TestCallMethodJump and TestCallArgLive + +const debugReflectCall = false + +func (v Value) call(op string, in []Value) []Value { + // Get function pointer, type. + t := (*funcType)(unsafe.Pointer(v.typ())) + var ( + fn unsafe.Pointer + rcvr Value + rcvrtype *abi.Type + ) + if v.flag&flagMethod != 0 { + rcvr = v + rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift) + } else if v.flag&flagIndir != 0 { + fn = *(*unsafe.Pointer)(v.ptr) + } else { + fn = v.ptr + } + + if fn == nil { + panic("reflect.Value.Call: call of nil function") + } + + isSlice := op == "CallSlice" + n := t.NumIn() + isVariadic := t.IsVariadic() + if isSlice { + if !isVariadic { + panic("reflect: CallSlice of non-variadic function") + } + if len(in) < n { + panic("reflect: CallSlice with too few input arguments") + } + if len(in) > n { + panic("reflect: CallSlice with too many input arguments") + } + } else { + if isVariadic { + n-- + } + if len(in) < n { + panic("reflect: Call with too few input arguments") + } + if !isVariadic && len(in) > n { + panic("reflect: Call with too many input arguments") + } + } + for _, x := range in { + if x.Kind() == Invalid { + panic("reflect: " + op + " using zero Value argument") + } + } + for i := 0; i < n; i++ { + if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(toRType(targ)) { + panic("reflect: " + op + " using " + xt.String() + " as type " + stringFor(targ)) + } + } + if !isSlice && isVariadic { + // prepare slice for remaining values + m := len(in) - n + slice := MakeSlice(toRType(t.In(n)), m, m) + elem := toRType(t.In(n)).Elem() // FIXME cast to slice type and Elem() + for i := 0; i < m; i++ { + x := in[n+i] + if xt := x.Type(); !xt.AssignableTo(elem) { + panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op) + } + slice.Index(i).Set(x) + } + origIn := in + in = make([]Value, n+1) + copy(in[:n], origIn) + in[n] = slice + } + + nin := len(in) + if nin != t.NumIn() { + panic("reflect.Value.Call: wrong argument count") + } + nout := t.NumOut() + + // Register argument space. + var regArgs abi.RegArgs + + // Compute frame type. + frametype, framePool, abid := funcLayout(t, rcvrtype) + + // Allocate a chunk of memory for frame if needed. + var stackArgs unsafe.Pointer + if frametype.Size() != 0 { + if nout == 0 { + stackArgs = framePool.Get().(unsafe.Pointer) + } else { + // Can't use pool if the function has return values. + // We will leak pointer to args in ret, so its lifetime is not scoped. + stackArgs = unsafe_New(frametype) + } + } + frameSize := frametype.Size() + + if debugReflectCall { + println("reflect.call", stringFor(&t.Type)) + abid.dump() + } + + // Copy inputs into args. + + // Handle receiver. + inStart := 0 + if rcvrtype != nil { + // Guaranteed to only be one word in size, + // so it will only take up exactly 1 abiStep (either + // in a register or on the stack). + switch st := abid.call.steps[0]; st.kind { + case abiStepStack: + storeRcvr(rcvr, stackArgs) + case abiStepPointer: + storeRcvr(rcvr, unsafe.Pointer(®Args.Ptrs[st.ireg])) + fallthrough + case abiStepIntReg: + storeRcvr(rcvr, unsafe.Pointer(®Args.Ints[st.ireg])) + case abiStepFloatReg: + storeRcvr(rcvr, unsafe.Pointer(®Args.Floats[st.freg])) + default: + panic("unknown ABI parameter kind") + } + inStart = 1 + } + + // Handle arguments. + for i, v := range in { + v.mustBeExported() + targ := toRType(t.In(i)) + // TODO(mknyszek): Figure out if it's possible to get some + // scratch space for this assignment check. Previously, it + // was possible to use space in the argument frame. + v = v.assignTo("reflect.Value.Call", &targ.t, nil) + stepsLoop: + for _, st := range abid.call.stepsForValue(i + inStart) { + switch st.kind { + case abiStepStack: + // Copy values to the "stack." + addr := add(stackArgs, st.stkOff, "precomputed stack arg offset") + if v.flag&flagIndir != 0 { + typedmemmove(&targ.t, addr, v.ptr) + } else { + *(*unsafe.Pointer)(addr) = v.ptr + } + // There's only one step for a stack-allocated value. + break stepsLoop + case abiStepIntReg, abiStepPointer: + // Copy values to "integer registers." + if v.flag&flagIndir != 0 { + offset := add(v.ptr, st.offset, "precomputed value offset") + if st.kind == abiStepPointer { + // Duplicate this pointer in the pointer area of the + // register space. Otherwise, there's the potential for + // this to be the last reference to v.ptr. + regArgs.Ptrs[st.ireg] = *(*unsafe.Pointer)(offset) + } + intToReg(®Args, st.ireg, st.size, offset) + } else { + if st.kind == abiStepPointer { + // See the comment in abiStepPointer case above. + regArgs.Ptrs[st.ireg] = v.ptr + } + regArgs.Ints[st.ireg] = uintptr(v.ptr) + } + case abiStepFloatReg: + // Copy values to "float registers." + if v.flag&flagIndir == 0 { + panic("attempted to copy pointer to FP register") + } + offset := add(v.ptr, st.offset, "precomputed value offset") + floatToReg(®Args, st.freg, st.size, offset) + default: + panic("unknown ABI part kind") + } + } + } + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + frameSize = align(frameSize, goarch.PtrSize) + frameSize += abid.spill + + // Mark pointers in registers for the return path. + regArgs.ReturnIsPtr = abid.outRegPtrs + + if debugReflectCall { + regArgs.Dump() + } + + // For testing; see TestCallArgLive. + if callGC { + runtime.GC() + } + + // Call. + call(frametype, fn, stackArgs, uint32(frametype.Size()), uint32(abid.retOffset), uint32(frameSize), ®Args) + + // For testing; see TestCallMethodJump. + if callGC { + runtime.GC() + } + + var ret []Value + if nout == 0 { + if stackArgs != nil { + typedmemclr(frametype, stackArgs) + framePool.Put(stackArgs) + } + } else { + if stackArgs != nil { + // Zero the now unused input area of args, + // because the Values returned by this function contain pointers to the args object, + // and will thus keep the args object alive indefinitely. + typedmemclrpartial(frametype, stackArgs, 0, abid.retOffset) + } + + // Wrap Values around return values in args. + ret = make([]Value, nout) + for i := 0; i < nout; i++ { + tv := t.Out(i) + if tv.Size() == 0 { + // For zero-sized return value, args+off may point to the next object. + // In this case, return the zero value instead. + ret[i] = Zero(toRType(tv)) + continue + } + steps := abid.ret.stepsForValue(i) + if st := steps[0]; st.kind == abiStepStack { + // This value is on the stack. If part of a value is stack + // allocated, the entire value is according to the ABI. So + // just make an indirection into the allocated frame. + fl := flagIndir | flag(tv.Kind()) + ret[i] = Value{tv, add(stackArgs, st.stkOff, "tv.Size() != 0"), fl} + // Note: this does introduce false sharing between results - + // if any result is live, they are all live. + // (And the space for the args is live as well, but as we've + // cleared that space it isn't as big a deal.) + continue + } + + // Handle pointers passed in registers. + if !ifaceIndir(tv) { + // Pointer-valued data gets put directly + // into v.ptr. + if steps[0].kind != abiStepPointer { + print("kind=", steps[0].kind, ", type=", stringFor(tv), "\n") + panic("mismatch between ABI description and types") + } + ret[i] = Value{tv, regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())} + continue + } + + // All that's left is values passed in registers that we need to + // create space for and copy values back into. + // + // TODO(mknyszek): We make a new allocation for each register-allocated + // value, but previously we could always point into the heap-allocated + // stack frame. This is a regression that could be fixed by adding + // additional space to the allocated stack frame and storing the + // register-allocated return values into the allocated stack frame and + // referring there in the resulting Value. + s := unsafe_New(tv) + for _, st := range steps { + switch st.kind { + case abiStepIntReg: + offset := add(s, st.offset, "precomputed value offset") + intFromReg(®Args, st.ireg, st.size, offset) + case abiStepPointer: + s := add(s, st.offset, "precomputed value offset") + *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg] + case abiStepFloatReg: + offset := add(s, st.offset, "precomputed value offset") + floatFromReg(®Args, st.freg, st.size, offset) + case abiStepStack: + panic("register-based return value has stack component") + default: + panic("unknown ABI part kind") + } + } + ret[i] = Value{tv, s, flagIndir | flag(tv.Kind())} + } + } + + return ret +} + +// callReflect is the call implementation used by a function +// returned by MakeFunc. In many ways it is the opposite of the +// method Value.call above. The method above converts a call using Values +// into a call of a function with a concrete argument frame, while +// callReflect converts a call of a function with a concrete argument +// frame into a call using Values. +// It is in this file so that it can be next to the call method above. +// The remainder of the MakeFunc implementation is in makefunc.go. +// +// NOTE: This function must be marked as a "wrapper" in the generated code, +// so that the linker can make it work correctly for panic and recover. +// The gc compilers know to do that for the name "reflect.callReflect". +// +// ctxt is the "closure" generated by MakeFunc. +// frame is a pointer to the arguments to that closure on the stack. +// retValid points to a boolean which should be set when the results +// section of frame is set. +// +// regs contains the argument values passed in registers and will contain +// the values returned from ctxt.fn in registers. +func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) { + if callGC { + // Call GC upon entry during testing. + // Getting our stack scanned here is the biggest hazard, because + // our caller (makeFuncStub) could have failed to place the last + // pointer to a value in regs' pointer space, in which case it + // won't be visible to the GC. + runtime.GC() + } + ftyp := ctxt.ftyp + f := ctxt.fn + + _, _, abid := funcLayout(ftyp, nil) + + // Copy arguments into Values. + ptr := frame + in := make([]Value, 0, int(ftyp.InCount)) + for i, typ := range ftyp.InSlice() { + if typ.Size() == 0 { + in = append(in, Zero(toRType(typ))) + continue + } + v := Value{typ, nil, flag(typ.Kind())} + steps := abid.call.stepsForValue(i) + if st := steps[0]; st.kind == abiStepStack { + if ifaceIndir(typ) { + // value cannot be inlined in interface data. + // Must make a copy, because f might keep a reference to it, + // and we cannot let f keep a reference to the stack frame + // after this function returns, not even a read-only reference. + v.ptr = unsafe_New(typ) + if typ.Size() > 0 { + typedmemmove(typ, v.ptr, add(ptr, st.stkOff, "typ.size > 0")) + } + v.flag |= flagIndir + } else { + v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr")) + } + } else { + if ifaceIndir(typ) { + // All that's left is values passed in registers that we need to + // create space for the values. + v.flag |= flagIndir + v.ptr = unsafe_New(typ) + for _, st := range steps { + switch st.kind { + case abiStepIntReg: + offset := add(v.ptr, st.offset, "precomputed value offset") + intFromReg(regs, st.ireg, st.size, offset) + case abiStepPointer: + s := add(v.ptr, st.offset, "precomputed value offset") + *((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg] + case abiStepFloatReg: + offset := add(v.ptr, st.offset, "precomputed value offset") + floatFromReg(regs, st.freg, st.size, offset) + case abiStepStack: + panic("register-based return value has stack component") + default: + panic("unknown ABI part kind") + } + } + } else { + // Pointer-valued data gets put directly + // into v.ptr. + if steps[0].kind != abiStepPointer { + print("kind=", steps[0].kind, ", type=", stringFor(typ), "\n") + panic("mismatch between ABI description and types") + } + v.ptr = regs.Ptrs[steps[0].ireg] + } + } + in = append(in, v) + } + + // Call underlying function. + out := f(in) + numOut := ftyp.NumOut() + if len(out) != numOut { + panic("reflect: wrong return count from function created by MakeFunc") + } + + // Copy results back into argument frame and register space. + if numOut > 0 { + for i, typ := range ftyp.OutSlice() { + v := out[i] + if v.typ() == nil { + panic("reflect: function created by MakeFunc using " + funcName(f) + + " returned zero Value") + } + if v.flag&flagRO != 0 { + panic("reflect: function created by MakeFunc using " + funcName(f) + + " returned value obtained from unexported field") + } + if typ.Size() == 0 { + continue + } + + // Convert v to type typ if v is assignable to a variable + // of type t in the language spec. + // See issue 28761. + // + // + // TODO(mknyszek): In the switch to the register ABI we lost + // the scratch space here for the register cases (and + // temporarily for all the cases). + // + // If/when this happens, take note of the following: + // + // We must clear the destination before calling assignTo, + // in case assignTo writes (with memory barriers) to the + // target location used as scratch space. See issue 39541. + v = v.assignTo("reflect.MakeFunc", typ, nil) + stepsLoop: + for _, st := range abid.ret.stepsForValue(i) { + switch st.kind { + case abiStepStack: + // Copy values to the "stack." + addr := add(ptr, st.stkOff, "precomputed stack arg offset") + // Do not use write barriers. The stack space used + // for this call is not adequately zeroed, and we + // are careful to keep the arguments alive until we + // return to makeFuncStub's caller. + if v.flag&flagIndir != 0 { + memmove(addr, v.ptr, st.size) + } else { + // This case must be a pointer type. + *(*uintptr)(addr) = uintptr(v.ptr) + } + // There's only one step for a stack-allocated value. + break stepsLoop + case abiStepIntReg, abiStepPointer: + // Copy values to "integer registers." + if v.flag&flagIndir != 0 { + offset := add(v.ptr, st.offset, "precomputed value offset") + intToReg(regs, st.ireg, st.size, offset) + } else { + // Only populate the Ints space on the return path. + // This is safe because out is kept alive until the + // end of this function, and the return path through + // makeFuncStub has no preemption, so these pointers + // are always visible to the GC. + regs.Ints[st.ireg] = uintptr(v.ptr) + } + case abiStepFloatReg: + // Copy values to "float registers." + if v.flag&flagIndir == 0 { + panic("attempted to copy pointer to FP register") + } + offset := add(v.ptr, st.offset, "precomputed value offset") + floatToReg(regs, st.freg, st.size, offset) + default: + panic("unknown ABI part kind") + } + } + } + } + + // Announce that the return values are valid. + // After this point the runtime can depend on the return values being valid. + *retValid = true + + // We have to make sure that the out slice lives at least until + // the runtime knows the return values are valid. Otherwise, the + // return values might not be scanned by anyone during a GC. + // (out would be dead, and the return slots not yet alive.) + runtime.KeepAlive(out) + + // runtime.getArgInfo expects to be able to find ctxt on the + // stack when it finds our caller, makeFuncStub. Make sure it + // doesn't get garbage collected. + runtime.KeepAlive(ctxt) +} + +// methodReceiver returns information about the receiver +// described by v. The Value v may or may not have the +// flagMethod bit set, so the kind cached in v.flag should +// not be used. +// The return value rcvrtype gives the method's actual receiver type. +// The return value t gives the method type signature (without the receiver). +// The return value fn is a pointer to the method code. +func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *abi.Type, t *funcType, fn unsafe.Pointer) { + i := methodIndex + if v.typ().Kind() == abi.Interface { + tt := (*interfaceType)(unsafe.Pointer(v.typ())) + if uint(i) >= uint(len(tt.Methods)) { + panic("reflect: internal error: invalid method index") + } + m := &tt.Methods[i] + if !tt.nameOff(m.Name).IsExported() { + panic("reflect: " + op + " of unexported method") + } + iface := (*nonEmptyInterface)(v.ptr) + if iface.itab == nil { + panic("reflect: " + op + " of method on nil interface value") + } + rcvrtype = iface.itab.typ + fn = unsafe.Pointer(&iface.itab.fun[i]) + t = (*funcType)(unsafe.Pointer(tt.typeOff(m.Typ))) + } else { + rcvrtype = v.typ() + ms := v.typ().ExportedMethods() + if uint(i) >= uint(len(ms)) { + panic("reflect: internal error: invalid method index") + } + m := ms[i] + if !nameOffFor(v.typ(), m.Name).IsExported() { + panic("reflect: " + op + " of unexported method") + } + ifn := textOffFor(v.typ(), m.Ifn) + fn = unsafe.Pointer(&ifn) + t = (*funcType)(unsafe.Pointer(typeOffFor(v.typ(), m.Mtyp))) + } + return +} + +// v is a method receiver. Store at p the word which is used to +// encode that receiver at the start of the argument list. +// Reflect uses the "interface" calling convention for +// methods, which always uses one word to record the receiver. +func storeRcvr(v Value, p unsafe.Pointer) { + t := v.typ() + if t.Kind() == abi.Interface { + // the interface data word becomes the receiver word + iface := (*nonEmptyInterface)(v.ptr) + *(*unsafe.Pointer)(p) = iface.word + } else if v.flag&flagIndir != 0 && !ifaceIndir(t) { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr) + } else { + *(*unsafe.Pointer)(p) = v.ptr + } +} + +// align returns the result of rounding x up to a multiple of n. +// n must be a power of two. +func align(x, n uintptr) uintptr { + return (x + n - 1) &^ (n - 1) +} + +// callMethod is the call implementation used by a function returned +// by makeMethodValue (used by v.Method(i).Interface()). +// It is a streamlined version of the usual reflect call: the caller has +// already laid out the argument frame for us, so we don't have +// to deal with individual Values for each argument. +// It is in this file so that it can be next to the two similar functions above. +// The remainder of the makeMethodValue implementation is in makefunc.go. +// +// NOTE: This function must be marked as a "wrapper" in the generated code, +// so that the linker can make it work correctly for panic and recover. +// The gc compilers know to do that for the name "reflect.callMethod". +// +// ctxt is the "closure" generated by makeVethodValue. +// frame is a pointer to the arguments to that closure on the stack. +// retValid points to a boolean which should be set when the results +// section of frame is set. +// +// regs contains the argument values passed in registers and will contain +// the values returned from ctxt.fn in registers. +func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) { + rcvr := ctxt.rcvr + rcvrType, valueFuncType, methodFn := methodReceiver("call", rcvr, ctxt.method) + + // There are two ABIs at play here. + // + // methodValueCall was invoked with the ABI assuming there was no + // receiver ("value ABI") and that's what frame and regs are holding. + // + // Meanwhile, we need to actually call the method with a receiver, which + // has its own ABI ("method ABI"). Everything that follows is a translation + // between the two. + _, _, valueABI := funcLayout(valueFuncType, nil) + valueFrame, valueRegs := frame, regs + methodFrameType, methodFramePool, methodABI := funcLayout(valueFuncType, rcvrType) + + // Make a new frame that is one word bigger so we can store the receiver. + // This space is used for both arguments and return values. + methodFrame := methodFramePool.Get().(unsafe.Pointer) + var methodRegs abi.RegArgs + + // Deal with the receiver. It's guaranteed to only be one word in size. + switch st := methodABI.call.steps[0]; st.kind { + case abiStepStack: + // Only copy the receiver to the stack if the ABI says so. + // Otherwise, it'll be in a register already. + storeRcvr(rcvr, methodFrame) + case abiStepPointer: + // Put the receiver in a register. + storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ptrs[st.ireg])) + fallthrough + case abiStepIntReg: + storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ints[st.ireg])) + case abiStepFloatReg: + storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Floats[st.freg])) + default: + panic("unknown ABI parameter kind") + } + + // Translate the rest of the arguments. + for i, t := range valueFuncType.InSlice() { + valueSteps := valueABI.call.stepsForValue(i) + methodSteps := methodABI.call.stepsForValue(i + 1) + + // Zero-sized types are trivial: nothing to do. + if len(valueSteps) == 0 { + if len(methodSteps) != 0 { + panic("method ABI and value ABI do not align") + } + continue + } + + // There are four cases to handle in translating each + // argument: + // 1. Stack -> stack translation. + // 2. Stack -> registers translation. + // 3. Registers -> stack translation. + // 4. Registers -> registers translation. + + // If the value ABI passes the value on the stack, + // then the method ABI does too, because it has strictly + // fewer arguments. Simply copy between the two. + if vStep := valueSteps[0]; vStep.kind == abiStepStack { + mStep := methodSteps[0] + // Handle stack -> stack translation. + if mStep.kind == abiStepStack { + if vStep.size != mStep.size { + panic("method ABI and value ABI do not align") + } + typedmemmove(t, + add(methodFrame, mStep.stkOff, "precomputed stack offset"), + add(valueFrame, vStep.stkOff, "precomputed stack offset")) + continue + } + // Handle stack -> register translation. + for _, mStep := range methodSteps { + from := add(valueFrame, vStep.stkOff+mStep.offset, "precomputed stack offset") + switch mStep.kind { + case abiStepPointer: + // Do the pointer copy directly so we get a write barrier. + methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from) + fallthrough // We need to make sure this ends up in Ints, too. + case abiStepIntReg: + intToReg(&methodRegs, mStep.ireg, mStep.size, from) + case abiStepFloatReg: + floatToReg(&methodRegs, mStep.freg, mStep.size, from) + default: + panic("unexpected method step") + } + } + continue + } + // Handle register -> stack translation. + if mStep := methodSteps[0]; mStep.kind == abiStepStack { + for _, vStep := range valueSteps { + to := add(methodFrame, mStep.stkOff+vStep.offset, "precomputed stack offset") + switch vStep.kind { + case abiStepPointer: + // Do the pointer copy directly so we get a write barrier. + *(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg] + case abiStepIntReg: + intFromReg(valueRegs, vStep.ireg, vStep.size, to) + case abiStepFloatReg: + floatFromReg(valueRegs, vStep.freg, vStep.size, to) + default: + panic("unexpected value step") + } + } + continue + } + // Handle register -> register translation. + if len(valueSteps) != len(methodSteps) { + // Because it's the same type for the value, and it's assigned + // to registers both times, it should always take up the same + // number of registers for each ABI. + panic("method ABI and value ABI don't align") + } + for i, vStep := range valueSteps { + mStep := methodSteps[i] + if mStep.kind != vStep.kind { + panic("method ABI and value ABI don't align") + } + switch vStep.kind { + case abiStepPointer: + // Copy this too, so we get a write barrier. + methodRegs.Ptrs[mStep.ireg] = valueRegs.Ptrs[vStep.ireg] + fallthrough + case abiStepIntReg: + methodRegs.Ints[mStep.ireg] = valueRegs.Ints[vStep.ireg] + case abiStepFloatReg: + methodRegs.Floats[mStep.freg] = valueRegs.Floats[vStep.freg] + default: + panic("unexpected value step") + } + } + } + + methodFrameSize := methodFrameType.Size() + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + methodFrameSize = align(methodFrameSize, goarch.PtrSize) + methodFrameSize += methodABI.spill + + // Mark pointers in registers for the return path. + methodRegs.ReturnIsPtr = methodABI.outRegPtrs + + // Call. + // Call copies the arguments from scratch to the stack, calls fn, + // and then copies the results back into scratch. + call(methodFrameType, methodFn, methodFrame, uint32(methodFrameType.Size()), uint32(methodABI.retOffset), uint32(methodFrameSize), &methodRegs) + + // Copy return values. + // + // This is somewhat simpler because both ABIs have an identical + // return value ABI (the types are identical). As a result, register + // results can simply be copied over. Stack-allocated values are laid + // out the same, but are at different offsets from the start of the frame + // Ignore any changes to args. + // Avoid constructing out-of-bounds pointers if there are no return values. + // because the arguments may be laid out differently. + if valueRegs != nil { + *valueRegs = methodRegs + } + if retSize := methodFrameType.Size() - methodABI.retOffset; retSize > 0 { + valueRet := add(valueFrame, valueABI.retOffset, "valueFrame's size > retOffset") + methodRet := add(methodFrame, methodABI.retOffset, "methodFrame's size > retOffset") + // This copies to the stack. Write barriers are not needed. + memmove(valueRet, methodRet, retSize) + } + + // Tell the runtime it can now depend on the return values + // being properly initialized. + *retValid = true + + // Clear the scratch space and put it back in the pool. + // This must happen after the statement above, so that the return + // values will always be scanned by someone. + typedmemclr(methodFrameType, methodFrame) + methodFramePool.Put(methodFrame) + + // See the comment in callReflect. + runtime.KeepAlive(ctxt) + + // Keep valueRegs alive because it may hold live pointer results. + // The caller (methodValueCall) has it as a stack object, which is only + // scanned when there is a reference to it. + runtime.KeepAlive(valueRegs) +} + +// funcName returns the name of f, for use in error messages. +func funcName(f func([]Value) []Value) string { + pc := *(*uintptr)(unsafe.Pointer(&f)) + rf := runtime.FuncForPC(pc) + if rf != nil { + return rf.Name() + } + return "closure" +} + +// Cap returns v's capacity. +// It panics if v's Kind is not [Array], [Chan], [Slice] or pointer to [Array]. +func (v Value) Cap() int { + // capNonSlice is split out to keep Cap inlineable for slice kinds. + if v.kind() == Slice { + return (*unsafeheader.Slice)(v.ptr).Cap + } + return v.capNonSlice() +} + +func (v Value) capNonSlice() int { + k := v.kind() + switch k { + case Array: + return v.typ().Len() + case Chan: + return chancap(v.pointer()) + case Ptr: + if v.typ().Elem().Kind() == abi.Array { + return v.typ().Elem().Len() + } + panic("reflect: call of reflect.Value.Cap on ptr to non-array Value") + } + panic(&ValueError{"reflect.Value.Cap", v.kind()}) +} + +// Close closes the channel v. +// It panics if v's Kind is not [Chan] or +// v is a receive-only channel. +func (v Value) Close() { + v.mustBe(Chan) + v.mustBeExported() + tt := (*chanType)(unsafe.Pointer(v.typ())) + if ChanDir(tt.Dir)&SendDir == 0 { + panic("reflect: close of receive-only channel") + } + + chanclose(v.pointer()) +} + +// CanComplex reports whether [Value.Complex] can be used without panicking. +func (v Value) CanComplex() bool { + switch v.kind() { + case Complex64, Complex128: + return true + default: + return false + } +} + +// Complex returns v's underlying value, as a complex128. +// It panics if v's Kind is not [Complex64] or [Complex128] +func (v Value) Complex() complex128 { + k := v.kind() + switch k { + case Complex64: + return complex128(*(*complex64)(v.ptr)) + case Complex128: + return *(*complex128)(v.ptr) + } + panic(&ValueError{"reflect.Value.Complex", v.kind()}) +} + +// Elem returns the value that the interface v contains +// or that the pointer v points to. +// It panics if v's Kind is not [Interface] or [Pointer]. +// It returns the zero Value if v is nil. +func (v Value) Elem() Value { + k := v.kind() + switch k { + case Interface: + var eface any + if v.typ().NumMethod() == 0 { + eface = *(*any)(v.ptr) + } else { + eface = (any)(*(*interface { + M() + })(v.ptr)) + } + x := unpackEface(eface) + if x.flag != 0 { + x.flag |= v.flag.ro() + } + return x + case Pointer: + ptr := v.ptr + if v.flag&flagIndir != 0 { + if ifaceIndir(v.typ()) { + // This is a pointer to a not-in-heap object. ptr points to a uintptr + // in the heap. That uintptr is the address of a not-in-heap object. + // In general, pointers to not-in-heap objects can be total junk. + // But Elem() is asking to dereference it, so the user has asserted + // that at least it is a valid pointer (not just an integer stored in + // a pointer slot). So let's check, to make sure that it isn't a pointer + // that the runtime will crash on if it sees it during GC or write barriers. + // Since it is a not-in-heap pointer, all pointers to the heap are + // forbidden! That makes the test pretty easy. + // See issue 48399. + if !verifyNotInHeapPtr(*(*uintptr)(ptr)) { + panic("reflect: reflect.Value.Elem on an invalid notinheap pointer") + } + } + ptr = *(*unsafe.Pointer)(ptr) + } + // The returned value's address is v's value. + if ptr == nil { + return Value{} + } + tt := (*ptrType)(unsafe.Pointer(v.typ())) + typ := tt.Elem + fl := v.flag&flagRO | flagIndir | flagAddr + fl |= flag(typ.Kind()) + return Value{typ, ptr, fl} + } + panic(&ValueError{"reflect.Value.Elem", v.kind()}) +} + +// Field returns the i'th field of the struct v. +// It panics if v's Kind is not [Struct] or i is out of range. +func (v Value) Field(i int) Value { + if v.kind() != Struct { + panic(&ValueError{"reflect.Value.Field", v.kind()}) + } + tt := (*structType)(unsafe.Pointer(v.typ())) + if uint(i) >= uint(len(tt.Fields)) { + panic("reflect: Field index out of range") + } + field := &tt.Fields[i] + typ := field.Typ + + // Inherit permission bits from v, but clear flagEmbedRO. + fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind()) + // Using an unexported field forces flagRO. + if !field.Name.IsExported() { + if field.Embedded() { + fl |= flagEmbedRO + } else { + fl |= flagStickyRO + } + } + // Either flagIndir is set and v.ptr points at struct, + // or flagIndir is not set and v.ptr is the actual struct data. + // In the former case, we want v.ptr + offset. + // In the latter case, we must have field.offset = 0, + // so v.ptr + field.offset is still the correct address. + ptr := add(v.ptr, field.Offset, "same as non-reflect &v.field") + return Value{typ, ptr, fl} +} + +// FieldByIndex returns the nested field corresponding to index. +// It panics if evaluation requires stepping through a nil +// pointer or a field that is not a struct. +func (v Value) FieldByIndex(index []int) Value { + if len(index) == 1 { + return v.Field(index[0]) + } + v.mustBe(Struct) + for i, x := range index { + if i > 0 { + if v.Kind() == Pointer && v.typ().Elem().Kind() == abi.Struct { + if v.IsNil() { + panic("reflect: indirection through nil pointer to embedded struct") + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} + +// FieldByIndexErr returns the nested field corresponding to index. +// It returns an error if evaluation requires stepping through a nil +// pointer, but panics if it must step through a field that +// is not a struct. +func (v Value) FieldByIndexErr(index []int) (Value, error) { + if len(index) == 1 { + return v.Field(index[0]), nil + } + v.mustBe(Struct) + for i, x := range index { + if i > 0 { + if v.Kind() == Ptr && v.typ().Elem().Kind() == abi.Struct { + if v.IsNil() { + return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + nameFor(v.typ().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v, nil +} + +// FieldByName returns the struct field with the given name. +// It returns the zero Value if no field was found. +// It panics if v's Kind is not [Struct]. +func (v Value) FieldByName(name string) Value { + v.mustBe(Struct) + if f, ok := toRType(v.typ()).FieldByName(name); ok { + return v.FieldByIndex(f.Index) + } + return Value{} +} + +// FieldByNameFunc returns the struct field with a name +// that satisfies the match function. +// It panics if v's Kind is not [Struct]. +// It returns the zero Value if no field was found. +func (v Value) FieldByNameFunc(match func(string) bool) Value { + if f, ok := toRType(v.typ()).FieldByNameFunc(match); ok { + return v.FieldByIndex(f.Index) + } + return Value{} +} + +// CanFloat reports whether [Value.Float] can be used without panicking. +func (v Value) CanFloat() bool { + switch v.kind() { + case Float32, Float64: + return true + default: + return false + } +} + +// Float returns v's underlying value, as a float64. +// It panics if v's Kind is not [Float32] or [Float64] +func (v Value) Float() float64 { + k := v.kind() + switch k { + case Float32: + return float64(*(*float32)(v.ptr)) + case Float64: + return *(*float64)(v.ptr) + } + panic(&ValueError{"reflect.Value.Float", v.kind()}) +} + +var uint8Type = rtypeOf(uint8(0)) + +// Index returns v's i'th element. +// It panics if v's Kind is not [Array], [Slice], or [String] or i is out of range. +func (v Value) Index(i int) Value { + switch v.kind() { + case Array: + tt := (*arrayType)(unsafe.Pointer(v.typ())) + if uint(i) >= uint(tt.Len) { + panic("reflect: array index out of range") + } + typ := tt.Elem + offset := uintptr(i) * typ.Size() + + // Either flagIndir is set and v.ptr points at array, + // or flagIndir is not set and v.ptr is the actual array data. + // In the former case, we want v.ptr + offset. + // In the latter case, we must be doing Index(0), so offset = 0, + // so v.ptr + offset is still the correct address. + val := add(v.ptr, offset, "same as &v[i], i < tt.len") + fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array + return Value{typ, val, fl} + + case Slice: + // Element flag same as Elem of Pointer. + // Addressable, indirect, possibly read-only. + s := (*unsafeheader.Slice)(v.ptr) + if uint(i) >= uint(s.Len) { + panic("reflect: slice index out of range") + } + tt := (*sliceType)(unsafe.Pointer(v.typ())) + typ := tt.Elem + val := arrayAt(s.Data, i, typ.Size(), "i < s.Len") + fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind()) + return Value{typ, val, fl} + + case String: + s := (*unsafeheader.String)(v.ptr) + if uint(i) >= uint(s.Len) { + panic("reflect: string index out of range") + } + p := arrayAt(s.Data, i, 1, "i < s.Len") + fl := v.flag.ro() | flag(Uint8) | flagIndir + return Value{uint8Type, p, fl} + } + panic(&ValueError{"reflect.Value.Index", v.kind()}) +} + +// CanInt reports whether Int can be used without panicking. +func (v Value) CanInt() bool { + switch v.kind() { + case Int, Int8, Int16, Int32, Int64: + return true + default: + return false + } +} + +// Int returns v's underlying value, as an int64. +// It panics if v's Kind is not [Int], [Int8], [Int16], [Int32], or [Int64]. +func (v Value) Int() int64 { + k := v.kind() + p := v.ptr + switch k { + case Int: + return int64(*(*int)(p)) + case Int8: + return int64(*(*int8)(p)) + case Int16: + return int64(*(*int16)(p)) + case Int32: + return int64(*(*int32)(p)) + case Int64: + return *(*int64)(p) + } + panic(&ValueError{"reflect.Value.Int", v.kind()}) +} + +// CanInterface reports whether [Value.Interface] can be used without panicking. +func (v Value) CanInterface() bool { + if v.flag == 0 { + panic(&ValueError{"reflect.Value.CanInterface", Invalid}) + } + return v.flag&flagRO == 0 +} + +// Interface returns v's current value as an interface{}. +// It is equivalent to: +// +// var i interface{} = (v's underlying value) +// +// It panics if the Value was obtained by accessing +// unexported struct fields. +func (v Value) Interface() (i any) { + return valueInterface(v, true) +} + +func valueInterface(v Value, safe bool) any { + if v.flag == 0 { + panic(&ValueError{"reflect.Value.Interface", Invalid}) + } + if safe && v.flag&flagRO != 0 { + // Do not allow access to unexported values via Interface, + // because they might be pointers that should not be + // writable or methods or function that should not be callable. + panic("reflect.Value.Interface: cannot return value obtained from unexported field or method") + } + if v.flag&flagMethod != 0 { + v = makeMethodValue("Interface", v) + } + + if v.kind() == Interface { + // Special case: return the element inside the interface. + // Empty interface has one layout, all interfaces with + // methods have a second layout. + if v.NumMethod() == 0 { + return *(*any)(v.ptr) + } + return *(*interface { + M() + })(v.ptr) + } + + // TODO: pass safe to packEface so we don't need to copy if safe==true? + return packEface(v) +} + +// InterfaceData returns a pair of unspecified uintptr values. +// It panics if v's Kind is not Interface. +// +// In earlier versions of Go, this function returned the interface's +// value as a uintptr pair. As of Go 1.4, the implementation of +// interface values precludes any defined use of InterfaceData. +// +// Deprecated: The memory representation of interface values is not +// compatible with InterfaceData. +func (v Value) InterfaceData() [2]uintptr { + v.mustBe(Interface) + // The compiler loses track as it converts to uintptr. Force escape. + escapes(v.ptr) + // We treat this as a read operation, so we allow + // it even for unexported data, because the caller + // has to import "unsafe" to turn it into something + // that can be abused. + // Interface value is always bigger than a word; assume flagIndir. + return *(*[2]uintptr)(v.ptr) +} + +// IsNil reports whether its argument v is nil. The argument must be +// a chan, func, interface, map, pointer, or slice value; if it is +// not, IsNil panics. Note that IsNil is not always equivalent to a +// regular comparison with nil in Go. For example, if v was created +// by calling ValueOf with an uninitialized interface variable i, +// i==nil will be true but v.IsNil will panic as v will be the zero +// Value. +func (v Value) IsNil() bool { + k := v.kind() + switch k { + case Chan, Func, Map, Pointer, UnsafePointer: + if v.flag&flagMethod != 0 { + return false + } + ptr := v.ptr + if v.flag&flagIndir != 0 { + ptr = *(*unsafe.Pointer)(ptr) + } + return ptr == nil + case Interface, Slice: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return *(*unsafe.Pointer)(v.ptr) == nil + } + panic(&ValueError{"reflect.Value.IsNil", v.kind()}) +} + +// IsValid reports whether v represents a value. +// It returns false if v is the zero Value. +// If IsValid returns false, all other methods except String panic. +// Most functions and methods never return an invalid Value. +// If one does, its documentation states the conditions explicitly. +func (v Value) IsValid() bool { + return v.flag != 0 +} + +// IsZero reports whether v is the zero value for its type. +// It panics if the argument is invalid. +func (v Value) IsZero() bool { + switch v.kind() { + case Bool: + return !v.Bool() + case Int, Int8, Int16, Int32, Int64: + return v.Int() == 0 + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return v.Uint() == 0 + case Float32, Float64: + return v.Float() == 0 + case Complex64, Complex128: + return v.Complex() == 0 + case Array: + if v.flag&flagIndir == 0 { + return v.ptr == nil + } + typ := (*abi.ArrayType)(unsafe.Pointer(v.typ())) + // If the type is comparable, then compare directly with zero. + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { + // v.ptr doesn't escape, as Equal functions are compiler generated + // and never escape. The escape analysis doesn't know, as it is a + // function pointer call. + return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) + } + if typ.TFlag&abi.TFlagRegularMemory != 0 { + // For some types where the zero value is a value where all bits of this type are 0 + // optimize it. + return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size())) + } + n := int(typ.Len) + for i := 0; i < n; i++ { + if !v.Index(i).IsZero() { + return false + } + } + return true + case Chan, Func, Interface, Map, Pointer, Slice, UnsafePointer: + return v.IsNil() + case String: + return v.Len() == 0 + case Struct: + if v.flag&flagIndir == 0 { + return v.ptr == nil + } + typ := (*abi.StructType)(unsafe.Pointer(v.typ())) + // If the type is comparable, then compare directly with zero. + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { + // See noescape justification above. + return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) + } + if typ.TFlag&abi.TFlagRegularMemory != 0 { + // For some types where the zero value is a value where all bits of this type are 0 + // optimize it. + return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size())) + } + + n := v.NumField() + for i := 0; i < n; i++ { + if !v.Field(i).IsZero() && v.Type().Field(i).Name != "_" { + return false + } + } + return true + default: + // This should never happen, but will act as a safeguard for later, + // as a default value doesn't makes sense here. + panic(&ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} + +// isZero For all zeros, performance is not as good as +// return bytealg.Count(b, byte(0)) == len(b) +func isZero(b []byte) bool { + if len(b) == 0 { + return true + } + const n = 32 + // Align memory addresses to 8 bytes. + for uintptr(unsafe.Pointer(&b[0]))%8 != 0 { + if b[0] != 0 { + return false + } + b = b[1:] + if len(b) == 0 { + return true + } + } + for len(b)%8 != 0 { + if b[len(b)-1] != 0 { + return false + } + b = b[:len(b)-1] + } + if len(b) == 0 { + return true + } + w := unsafe.Slice((*uint64)(unsafe.Pointer(&b[0])), len(b)/8) + for len(w)%n != 0 { + if w[0] != 0 { + return false + } + w = w[1:] + } + for len(w) >= n { + if w[0] != 0 || w[1] != 0 || w[2] != 0 || w[3] != 0 || + w[4] != 0 || w[5] != 0 || w[6] != 0 || w[7] != 0 || + w[8] != 0 || w[9] != 0 || w[10] != 0 || w[11] != 0 || + w[12] != 0 || w[13] != 0 || w[14] != 0 || w[15] != 0 || + w[16] != 0 || w[17] != 0 || w[18] != 0 || w[19] != 0 || + w[20] != 0 || w[21] != 0 || w[22] != 0 || w[23] != 0 || + w[24] != 0 || w[25] != 0 || w[26] != 0 || w[27] != 0 || + w[28] != 0 || w[29] != 0 || w[30] != 0 || w[31] != 0 { + return false + } + w = w[n:] + } + return true +} + +// SetZero sets v to be the zero value of v's type. +// It panics if [Value.CanSet] returns false. +func (v Value) SetZero() { + v.mustBeAssignable() + switch v.kind() { + case Bool: + *(*bool)(v.ptr) = false + case Int: + *(*int)(v.ptr) = 0 + case Int8: + *(*int8)(v.ptr) = 0 + case Int16: + *(*int16)(v.ptr) = 0 + case Int32: + *(*int32)(v.ptr) = 0 + case Int64: + *(*int64)(v.ptr) = 0 + case Uint: + *(*uint)(v.ptr) = 0 + case Uint8: + *(*uint8)(v.ptr) = 0 + case Uint16: + *(*uint16)(v.ptr) = 0 + case Uint32: + *(*uint32)(v.ptr) = 0 + case Uint64: + *(*uint64)(v.ptr) = 0 + case Uintptr: + *(*uintptr)(v.ptr) = 0 + case Float32: + *(*float32)(v.ptr) = 0 + case Float64: + *(*float64)(v.ptr) = 0 + case Complex64: + *(*complex64)(v.ptr) = 0 + case Complex128: + *(*complex128)(v.ptr) = 0 + case String: + *(*string)(v.ptr) = "" + case Slice: + *(*unsafeheader.Slice)(v.ptr) = unsafeheader.Slice{} + case Interface: + *(*emptyInterface)(v.ptr) = emptyInterface{} + case Chan, Func, Map, Pointer, UnsafePointer: + *(*unsafe.Pointer)(v.ptr) = nil + case Array, Struct: + typedmemclr(v.typ(), v.ptr) + default: + // This should never happen, but will act as a safeguard for later, + // as a default value doesn't makes sense here. + panic(&ValueError{"reflect.Value.SetZero", v.Kind()}) + } +} + +// Kind returns v's Kind. +// If v is the zero Value ([Value.IsValid] returns false), Kind returns Invalid. +func (v Value) Kind() Kind { + return v.kind() +} + +// Len returns v's length. +// It panics if v's Kind is not [Array], [Chan], [Map], [Slice], [String], or pointer to [Array]. +func (v Value) Len() int { + // lenNonSlice is split out to keep Len inlineable for slice kinds. + if v.kind() == Slice { + return (*unsafeheader.Slice)(v.ptr).Len + } + return v.lenNonSlice() +} + +func (v Value) lenNonSlice() int { + switch k := v.kind(); k { + case Array: + tt := (*arrayType)(unsafe.Pointer(v.typ())) + return int(tt.Len) + case Chan: + return chanlen(v.pointer()) + case Map: + return maplen(v.pointer()) + case String: + // String is bigger than a word; assume flagIndir. + return (*unsafeheader.String)(v.ptr).Len + case Ptr: + if v.typ().Elem().Kind() == abi.Array { + return v.typ().Elem().Len() + } + panic("reflect: call of reflect.Value.Len on ptr to non-array Value") + } + panic(&ValueError{"reflect.Value.Len", v.kind()}) +} + +var stringType = rtypeOf("") + +// MapIndex returns the value associated with key in the map v. +// It panics if v's Kind is not [Map]. +// It returns the zero Value if key is not found in the map or if v represents a nil map. +// As in Go, the key's value must be assignable to the map's key type. +func (v Value) MapIndex(key Value) Value { + v.mustBe(Map) + tt := (*mapType)(unsafe.Pointer(v.typ())) + + // Do not require key to be exported, so that DeepEqual + // and other programs can use all the keys returned by + // MapKeys as arguments to MapIndex. If either the map + // or the key is unexported, though, the result will be + // considered unexported. This is consistent with the + // behavior for structs, which allow read but not write + // of unexported fields. + + var e unsafe.Pointer + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize { + k := *(*string)(key.ptr) + e = mapaccess_faststr(v.typ(), v.pointer(), k) + } else { + key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil) + var k unsafe.Pointer + if key.flag&flagIndir != 0 { + k = key.ptr + } else { + k = unsafe.Pointer(&key.ptr) + } + e = mapaccess(v.typ(), v.pointer(), k) + } + if e == nil { + return Value{} + } + typ := tt.Elem + fl := (v.flag | key.flag).ro() + fl |= flag(typ.Kind()) + return copyVal(typ, fl, e) +} + +// MapKeys returns a slice containing all the keys present in the map, +// in unspecified order. +// It panics if v's Kind is not [Map]. +// It returns an empty slice if v represents a nil map. +func (v Value) MapKeys() []Value { + v.mustBe(Map) + tt := (*mapType)(unsafe.Pointer(v.typ())) + keyType := tt.Key + + fl := v.flag.ro() | flag(keyType.Kind()) + + m := v.pointer() + mlen := int(0) + if m != nil { + mlen = maplen(m) + } + var it hiter + mapiterinit(v.typ(), m, &it) + a := make([]Value, mlen) + var i int + for i = 0; i < len(a); i++ { + key := mapiterkey(&it) + if key == nil { + // Someone deleted an entry from the map since we + // called maplen above. It's a data race, but nothing + // we can do about it. + break + } + a[i] = copyVal(keyType, fl, key) + mapiternext(&it) + } + return a[:i] +} + +// hiter's structure matches runtime.hiter's structure. +// Having a clone here allows us to embed a map iterator +// inside type MapIter so that MapIters can be re-used +// without doing any allocations. +type hiter struct { + key unsafe.Pointer + elem unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +func (h *hiter) initialized() bool { + return h.t != nil +} + +// A MapIter is an iterator for ranging over a map. +// See [Value.MapRange]. +type MapIter struct { + m Value + hiter hiter +} + +// Key returns the key of iter's current map entry. +func (iter *MapIter) Key() Value { + if !iter.hiter.initialized() { + panic("MapIter.Key called before Next") + } + iterkey := mapiterkey(&iter.hiter) + if iterkey == nil { + panic("MapIter.Key called on exhausted iterator") + } + + t := (*mapType)(unsafe.Pointer(iter.m.typ())) + ktype := t.Key + return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey) +} + +// SetIterKey assigns to v the key of iter's current map entry. +// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value. +// As in Go, the key must be assignable to v's type and +// must not be derived from an unexported field. +func (v Value) SetIterKey(iter *MapIter) { + if !iter.hiter.initialized() { + panic("reflect: Value.SetIterKey called before Next") + } + iterkey := mapiterkey(&iter.hiter) + if iterkey == nil { + panic("reflect: Value.SetIterKey called on exhausted iterator") + } + + v.mustBeAssignable() + var target unsafe.Pointer + if v.kind() == Interface { + target = v.ptr + } + + t := (*mapType)(unsafe.Pointer(iter.m.typ())) + ktype := t.Key + + iter.m.mustBeExported() // do not let unexported m leak + key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir} + key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target) + typedmemmove(v.typ(), v.ptr, key.ptr) +} + +// Value returns the value of iter's current map entry. +func (iter *MapIter) Value() Value { + if !iter.hiter.initialized() { + panic("MapIter.Value called before Next") + } + iterelem := mapiterelem(&iter.hiter) + if iterelem == nil { + panic("MapIter.Value called on exhausted iterator") + } + + t := (*mapType)(unsafe.Pointer(iter.m.typ())) + vtype := t.Elem + return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem) +} + +// SetIterValue assigns to v the value of iter's current map entry. +// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value. +// As in Go, the value must be assignable to v's type and +// must not be derived from an unexported field. +func (v Value) SetIterValue(iter *MapIter) { + if !iter.hiter.initialized() { + panic("reflect: Value.SetIterValue called before Next") + } + iterelem := mapiterelem(&iter.hiter) + if iterelem == nil { + panic("reflect: Value.SetIterValue called on exhausted iterator") + } + + v.mustBeAssignable() + var target unsafe.Pointer + if v.kind() == Interface { + target = v.ptr + } + + t := (*mapType)(unsafe.Pointer(iter.m.typ())) + vtype := t.Elem + + iter.m.mustBeExported() // do not let unexported m leak + elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir} + elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target) + typedmemmove(v.typ(), v.ptr, elem.ptr) +} + +// Next advances the map iterator and reports whether there is another +// entry. It returns false when iter is exhausted; subsequent +// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic. +func (iter *MapIter) Next() bool { + if !iter.m.IsValid() { + panic("MapIter.Next called on an iterator that does not have an associated map Value") + } + if !iter.hiter.initialized() { + mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter) + } else { + if mapiterkey(&iter.hiter) == nil { + panic("MapIter.Next called on exhausted iterator") + } + mapiternext(&iter.hiter) + } + return mapiterkey(&iter.hiter) != nil +} + +// Reset modifies iter to iterate over v. +// It panics if v's Kind is not [Map] and v is not the zero Value. +// Reset(Value{}) causes iter to not to refer to any map, +// which may allow the previously iterated-over map to be garbage collected. +func (iter *MapIter) Reset(v Value) { + if v.IsValid() { + v.mustBe(Map) + } + iter.m = v + iter.hiter = hiter{} +} + +// MapRange returns a range iterator for a map. +// It panics if v's Kind is not [Map]. +// +// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry. +// [MapIter.Next] returns false when the iterator is exhausted. +// MapRange follows the same iteration semantics as a range statement. +// +// Example: +// +// iter := reflect.ValueOf(m).MapRange() +// for iter.Next() { +// k := iter.Key() +// v := iter.Value() +// ... +// } +func (v Value) MapRange() *MapIter { + // This is inlinable to take advantage of "function outlining". + // The allocation of MapIter can be stack allocated if the caller + // does not allow it to escape. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + if v.kind() != Map { + v.panicNotMap() + } + return &MapIter{m: v} +} + +// Force slow panicking path not inlined, so it won't add to the +// inlining budget of the caller. +// TODO: undo when the inliner is no longer bottom-up only. +// +//go:noinline +func (f flag) panicNotMap() { + f.mustBe(Map) +} + +// copyVal returns a Value containing the map key or value at ptr, +// allocating a new variable as needed. +func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value { + if typ.IfaceIndir() { + // Copy result so future changes to the map + // won't change the underlying value. + c := unsafe_New(typ) + typedmemmove(typ, c, ptr) + return Value{typ, c, fl | flagIndir} + } + return Value{typ, *(*unsafe.Pointer)(ptr), fl} +} + +// Method returns a function value corresponding to v's i'th method. +// The arguments to a Call on the returned function should not include +// a receiver; the returned function will always use v as the receiver. +// Method panics if i is out of range or if v is a nil interface value. +func (v Value) Method(i int) Value { + if v.typ() == nil { + panic(&ValueError{"reflect.Value.Method", Invalid}) + } + if v.flag&flagMethod != 0 || uint(i) >= uint(toRType(v.typ()).NumMethod()) { + panic("reflect: Method index out of range") + } + if v.typ().Kind() == abi.Interface && v.IsNil() { + panic("reflect: Method on nil interface value") + } + fl := v.flag.ro() | (v.flag & flagIndir) + fl |= flag(Func) + fl |= flag(i)<> (64 - bitSize) + return x != trunc + } + panic(&ValueError{"reflect.Value.OverflowInt", v.kind()}) +} + +// OverflowUint reports whether the uint64 x cannot be represented by v's type. +// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64]. +func (v Value) OverflowUint(x uint64) bool { + k := v.kind() + switch k { + case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64: + bitSize := v.typ_.Size() * 8 // ok to use v.typ_ directly as Size doesn't escape + trunc := (x << (64 - bitSize)) >> (64 - bitSize) + return x != trunc + } + panic(&ValueError{"reflect.Value.OverflowUint", v.kind()}) +} + +//go:nocheckptr +// This prevents inlining Value.Pointer when -d=checkptr is enabled, +// which ensures cmd/compile can recognize unsafe.Pointer(v.Pointer()) +// and make an exception. + +// Pointer returns v's value as a uintptr. +// It panics if v's Kind is not [Chan], [Func], [Map], [Pointer], [Slice], or [UnsafePointer]. +// +// If v's Kind is [Func], the returned pointer is an underlying +// code pointer, but not necessarily enough to identify a +// single function uniquely. The only guarantee is that the +// result is zero if and only if v is a nil func Value. +// +// If v's Kind is [Slice], the returned pointer is to the first +// element of the slice. If the slice is nil the returned value +// is 0. If the slice is empty but non-nil the return value is non-zero. +// +// It's preferred to use uintptr(Value.UnsafePointer()) to get the equivalent result. +func (v Value) Pointer() uintptr { + // The compiler loses track as it converts to uintptr. Force escape. + escapes(v.ptr) + + k := v.kind() + switch k { + case Pointer: + if v.typ().PtrBytes == 0 { + val := *(*uintptr)(v.ptr) + // Since it is a not-in-heap pointer, all pointers to the heap are + // forbidden! See comment in Value.Elem and issue #48399. + if !verifyNotInHeapPtr(val) { + panic("reflect: reflect.Value.Pointer on an invalid notinheap pointer") + } + return val + } + fallthrough + case Chan, Map, UnsafePointer: + return uintptr(v.pointer()) + case Func: + if v.flag&flagMethod != 0 { + // As the doc comment says, the returned pointer is an + // underlying code pointer but not necessarily enough to + // identify a single function uniquely. All method expressions + // created via reflect have the same underlying code pointer, + // so their Pointers are equal. The function used here must + // match the one used in makeMethodValue. + return methodValueCallCodePtr() + } + p := v.pointer() + // Non-nil func value points at data block. + // First word of data block is actual code. + if p != nil { + p = *(*unsafe.Pointer)(p) + } + return uintptr(p) + + case Slice: + return uintptr((*unsafeheader.Slice)(v.ptr).Data) + } + panic(&ValueError{"reflect.Value.Pointer", v.kind()}) +} + +// Recv receives and returns a value from the channel v. +// It panics if v's Kind is not [Chan]. +// The receive blocks until a value is ready. +// The boolean value ok is true if the value x corresponds to a send +// on the channel, false if it is a zero value received because the channel is closed. +func (v Value) Recv() (x Value, ok bool) { + v.mustBe(Chan) + v.mustBeExported() + return v.recv(false) +} + +// internal recv, possibly non-blocking (nb). +// v is known to be a channel. +func (v Value) recv(nb bool) (val Value, ok bool) { + tt := (*chanType)(unsafe.Pointer(v.typ())) + if ChanDir(tt.Dir)&RecvDir == 0 { + panic("reflect: recv on send-only channel") + } + t := tt.Elem + val = Value{t, nil, flag(t.Kind())} + var p unsafe.Pointer + if ifaceIndir(t) { + p = unsafe_New(t) + val.ptr = p + val.flag |= flagIndir + } else { + p = unsafe.Pointer(&val.ptr) + } + selected, ok := chanrecv(v.pointer(), nb, p) + if !selected { + val = Value{} + } + return +} + +// Send sends x on the channel v. +// It panics if v's kind is not [Chan] or if x's type is not the same type as v's element type. +// As in Go, x's value must be assignable to the channel's element type. +func (v Value) Send(x Value) { + v.mustBe(Chan) + v.mustBeExported() + v.send(x, false) +} + +// internal send, possibly non-blocking. +// v is known to be a channel. +func (v Value) send(x Value, nb bool) (selected bool) { + tt := (*chanType)(unsafe.Pointer(v.typ())) + if ChanDir(tt.Dir)&SendDir == 0 { + panic("reflect: send on recv-only channel") + } + x.mustBeExported() + x = x.assignTo("reflect.Value.Send", tt.Elem, nil) + var p unsafe.Pointer + if x.flag&flagIndir != 0 { + p = x.ptr + } else { + p = unsafe.Pointer(&x.ptr) + } + return chansend(v.pointer(), p, nb) +} + +// Set assigns x to the value v. +// It panics if [Value.CanSet] returns false. +// As in Go, x's value must be assignable to v's type and +// must not be derived from an unexported field. +func (v Value) Set(x Value) { + v.mustBeAssignable() + x.mustBeExported() // do not let unexported x leak + var target unsafe.Pointer + if v.kind() == Interface { + target = v.ptr + } + x = x.assignTo("reflect.Set", v.typ(), target) + if x.flag&flagIndir != 0 { + if x.ptr == unsafe.Pointer(&zeroVal[0]) { + typedmemclr(v.typ(), v.ptr) + } else { + typedmemmove(v.typ(), v.ptr, x.ptr) + } + } else { + *(*unsafe.Pointer)(v.ptr) = x.ptr + } +} + +// SetBool sets v's underlying value. +// It panics if v's Kind is not [Bool] or if [Value.CanSet] returns false. +func (v Value) SetBool(x bool) { + v.mustBeAssignable() + v.mustBe(Bool) + *(*bool)(v.ptr) = x +} + +// SetBytes sets v's underlying value. +// It panics if v's underlying value is not a slice of bytes. +func (v Value) SetBytes(x []byte) { + v.mustBeAssignable() + v.mustBe(Slice) + if toRType(v.typ()).Elem().Kind() != Uint8 { // TODO add Elem method, fix mustBe(Slice) to return slice. + panic("reflect.Value.SetBytes of non-byte slice") + } + *(*[]byte)(v.ptr) = x +} + +// setRunes sets v's underlying value. +// It panics if v's underlying value is not a slice of runes (int32s). +func (v Value) setRunes(x []rune) { + v.mustBeAssignable() + v.mustBe(Slice) + if v.typ().Elem().Kind() != abi.Int32 { + panic("reflect.Value.setRunes of non-rune slice") + } + *(*[]rune)(v.ptr) = x +} + +// SetComplex sets v's underlying value to x. +// It panics if v's Kind is not [Complex64] or [Complex128], or if [Value.CanSet] returns false. +func (v Value) SetComplex(x complex128) { + v.mustBeAssignable() + switch k := v.kind(); k { + default: + panic(&ValueError{"reflect.Value.SetComplex", v.kind()}) + case Complex64: + *(*complex64)(v.ptr) = complex64(x) + case Complex128: + *(*complex128)(v.ptr) = x + } +} + +// SetFloat sets v's underlying value to x. +// It panics if v's Kind is not [Float32] or [Float64], or if [Value.CanSet] returns false. +func (v Value) SetFloat(x float64) { + v.mustBeAssignable() + switch k := v.kind(); k { + default: + panic(&ValueError{"reflect.Value.SetFloat", v.kind()}) + case Float32: + *(*float32)(v.ptr) = float32(x) + case Float64: + *(*float64)(v.ptr) = x + } +} + +// SetInt sets v's underlying value to x. +// It panics if v's Kind is not [Int], [Int8], [Int16], [Int32], or [Int64], or if [Value.CanSet] returns false. +func (v Value) SetInt(x int64) { + v.mustBeAssignable() + switch k := v.kind(); k { + default: + panic(&ValueError{"reflect.Value.SetInt", v.kind()}) + case Int: + *(*int)(v.ptr) = int(x) + case Int8: + *(*int8)(v.ptr) = int8(x) + case Int16: + *(*int16)(v.ptr) = int16(x) + case Int32: + *(*int32)(v.ptr) = int32(x) + case Int64: + *(*int64)(v.ptr) = x + } +} + +// SetLen sets v's length to n. +// It panics if v's Kind is not [Slice] or if n is negative or +// greater than the capacity of the slice. +func (v Value) SetLen(n int) { + v.mustBeAssignable() + v.mustBe(Slice) + s := (*unsafeheader.Slice)(v.ptr) + if uint(n) > uint(s.Cap) { + panic("reflect: slice length out of range in SetLen") + } + s.Len = n +} + +// SetCap sets v's capacity to n. +// It panics if v's Kind is not [Slice] or if n is smaller than the length or +// greater than the capacity of the slice. +func (v Value) SetCap(n int) { + v.mustBeAssignable() + v.mustBe(Slice) + s := (*unsafeheader.Slice)(v.ptr) + if n < s.Len || n > s.Cap { + panic("reflect: slice capacity out of range in SetCap") + } + s.Cap = n +} + +// SetMapIndex sets the element associated with key in the map v to elem. +// It panics if v's Kind is not [Map]. +// If elem is the zero Value, SetMapIndex deletes the key from the map. +// Otherwise if v holds a nil map, SetMapIndex will panic. +// As in Go, key's elem must be assignable to the map's key type, +// and elem's value must be assignable to the map's elem type. +func (v Value) SetMapIndex(key, elem Value) { + v.mustBe(Map) + v.mustBeExported() + key.mustBeExported() + tt := (*mapType)(unsafe.Pointer(v.typ())) + + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize { + k := *(*string)(key.ptr) + if elem.typ() == nil { + mapdelete_faststr(v.typ(), v.pointer(), k) + return + } + elem.mustBeExported() + elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) + var e unsafe.Pointer + if elem.flag&flagIndir != 0 { + e = elem.ptr + } else { + e = unsafe.Pointer(&elem.ptr) + } + mapassign_faststr(v.typ(), v.pointer(), k, e) + return + } + + key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil) + var k unsafe.Pointer + if key.flag&flagIndir != 0 { + k = key.ptr + } else { + k = unsafe.Pointer(&key.ptr) + } + if elem.typ() == nil { + mapdelete(v.typ(), v.pointer(), k) + return + } + elem.mustBeExported() + elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) + var e unsafe.Pointer + if elem.flag&flagIndir != 0 { + e = elem.ptr + } else { + e = unsafe.Pointer(&elem.ptr) + } + mapassign(v.typ(), v.pointer(), k, e) +} + +// SetUint sets v's underlying value to x. +// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64], or if [Value.CanSet] returns false. +func (v Value) SetUint(x uint64) { + v.mustBeAssignable() + switch k := v.kind(); k { + default: + panic(&ValueError{"reflect.Value.SetUint", v.kind()}) + case Uint: + *(*uint)(v.ptr) = uint(x) + case Uint8: + *(*uint8)(v.ptr) = uint8(x) + case Uint16: + *(*uint16)(v.ptr) = uint16(x) + case Uint32: + *(*uint32)(v.ptr) = uint32(x) + case Uint64: + *(*uint64)(v.ptr) = x + case Uintptr: + *(*uintptr)(v.ptr) = uintptr(x) + } +} + +// SetPointer sets the [unsafe.Pointer] value v to x. +// It panics if v's Kind is not UnsafePointer. +func (v Value) SetPointer(x unsafe.Pointer) { + v.mustBeAssignable() + v.mustBe(UnsafePointer) + *(*unsafe.Pointer)(v.ptr) = x +} + +// SetString sets v's underlying value to x. +// It panics if v's Kind is not [String] or if [Value.CanSet] returns false. +func (v Value) SetString(x string) { + v.mustBeAssignable() + v.mustBe(String) + *(*string)(v.ptr) = x +} + +// Slice returns v[i:j]. +// It panics if v's Kind is not [Array], [Slice] or [String], or if v is an unaddressable array, +// or if the indexes are out of bounds. +func (v Value) Slice(i, j int) Value { + var ( + cap int + typ *sliceType + base unsafe.Pointer + ) + switch kind := v.kind(); kind { + default: + panic(&ValueError{"reflect.Value.Slice", v.kind()}) + + case Array: + if v.flag&flagAddr == 0 { + panic("reflect.Value.Slice: slice of unaddressable array") + } + tt := (*arrayType)(unsafe.Pointer(v.typ())) + cap = int(tt.Len) + typ = (*sliceType)(unsafe.Pointer(tt.Slice)) + base = v.ptr + + case Slice: + typ = (*sliceType)(unsafe.Pointer(v.typ())) + s := (*unsafeheader.Slice)(v.ptr) + base = s.Data + cap = s.Cap + + case String: + s := (*unsafeheader.String)(v.ptr) + if i < 0 || j < i || j > s.Len { + panic("reflect.Value.Slice: string slice index out of bounds") + } + var t unsafeheader.String + if i < s.Len { + t = unsafeheader.String{Data: arrayAt(s.Data, i, 1, "i < s.Len"), Len: j - i} + } + return Value{v.typ(), unsafe.Pointer(&t), v.flag} + } + + if i < 0 || j < i || j > cap { + panic("reflect.Value.Slice: slice index out of bounds") + } + + // Declare slice so that gc can see the base pointer in it. + var x []unsafe.Pointer + + // Reinterpret as *unsafeheader.Slice to edit. + s := (*unsafeheader.Slice)(unsafe.Pointer(&x)) + s.Len = j - i + s.Cap = cap - i + if cap-i > 0 { + s.Data = arrayAt(base, i, typ.Elem.Size(), "i < cap") + } else { + // do not advance pointer, to avoid pointing beyond end of slice + s.Data = base + } + + fl := v.flag.ro() | flagIndir | flag(Slice) + return Value{typ.Common(), unsafe.Pointer(&x), fl} +} + +// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k]. +// It panics if v's Kind is not [Array] or [Slice], or if v is an unaddressable array, +// or if the indexes are out of bounds. +func (v Value) Slice3(i, j, k int) Value { + var ( + cap int + typ *sliceType + base unsafe.Pointer + ) + switch kind := v.kind(); kind { + default: + panic(&ValueError{"reflect.Value.Slice3", v.kind()}) + + case Array: + if v.flag&flagAddr == 0 { + panic("reflect.Value.Slice3: slice of unaddressable array") + } + tt := (*arrayType)(unsafe.Pointer(v.typ())) + cap = int(tt.Len) + typ = (*sliceType)(unsafe.Pointer(tt.Slice)) + base = v.ptr + + case Slice: + typ = (*sliceType)(unsafe.Pointer(v.typ())) + s := (*unsafeheader.Slice)(v.ptr) + base = s.Data + cap = s.Cap + } + + if i < 0 || j < i || k < j || k > cap { + panic("reflect.Value.Slice3: slice index out of bounds") + } + + // Declare slice so that the garbage collector + // can see the base pointer in it. + var x []unsafe.Pointer + + // Reinterpret as *unsafeheader.Slice to edit. + s := (*unsafeheader.Slice)(unsafe.Pointer(&x)) + s.Len = j - i + s.Cap = k - i + if k-i > 0 { + s.Data = arrayAt(base, i, typ.Elem.Size(), "i < k <= cap") + } else { + // do not advance pointer, to avoid pointing beyond end of slice + s.Data = base + } + + fl := v.flag.ro() | flagIndir | flag(Slice) + return Value{typ.Common(), unsafe.Pointer(&x), fl} +} + +// String returns the string v's underlying value, as a string. +// String is a special case because of Go's String method convention. +// Unlike the other getters, it does not panic if v's Kind is not [String]. +// Instead, it returns a string of the form "" where T is v's type. +// The fmt package treats Values specially. It does not call their String +// method implicitly but instead prints the concrete values they hold. +func (v Value) String() string { + // stringNonString is split out to keep String inlineable for string kinds. + if v.kind() == String { + return *(*string)(v.ptr) + } + return v.stringNonString() +} + +func (v Value) stringNonString() string { + if v.kind() == Invalid { + return "" + } + // If you call String on a reflect.Value of other type, it's better to + // print something than to panic. Useful in debugging. + return "<" + v.Type().String() + " Value>" +} + +// TryRecv attempts to receive a value from the channel v but will not block. +// It panics if v's Kind is not [Chan]. +// If the receive delivers a value, x is the transferred value and ok is true. +// If the receive cannot finish without blocking, x is the zero Value and ok is false. +// If the channel is closed, x is the zero value for the channel's element type and ok is false. +func (v Value) TryRecv() (x Value, ok bool) { + v.mustBe(Chan) + v.mustBeExported() + return v.recv(true) +} + +// TrySend attempts to send x on the channel v but will not block. +// It panics if v's Kind is not [Chan]. +// It reports whether the value was sent. +// As in Go, x's value must be assignable to the channel's element type. +func (v Value) TrySend(x Value) bool { + v.mustBe(Chan) + v.mustBeExported() + return v.send(x, true) +} + +// Type returns v's type. +func (v Value) Type() Type { + if v.flag != 0 && v.flag&flagMethod == 0 { + return (*rtype)(noescape(unsafe.Pointer(v.typ_))) // inline of toRType(v.typ()), for own inlining in inline test + } + return v.typeSlow() +} + +func (v Value) typeSlow() Type { + if v.flag == 0 { + panic(&ValueError{"reflect.Value.Type", Invalid}) + } + + typ := v.typ() + if v.flag&flagMethod == 0 { + return toRType(v.typ()) + } + + // Method value. + // v.typ describes the receiver, not the method type. + i := int(v.flag) >> flagMethodShift + if v.typ().Kind() == abi.Interface { + // Method on interface. + tt := (*interfaceType)(unsafe.Pointer(typ)) + if uint(i) >= uint(len(tt.Methods)) { + panic("reflect: internal error: invalid method index") + } + m := &tt.Methods[i] + return toRType(typeOffFor(typ, m.Typ)) + } + // Method on concrete type. + ms := typ.ExportedMethods() + if uint(i) >= uint(len(ms)) { + panic("reflect: internal error: invalid method index") + } + m := ms[i] + return toRType(typeOffFor(typ, m.Mtyp)) +} + +// CanUint reports whether [Value.Uint] can be used without panicking. +func (v Value) CanUint() bool { + switch v.kind() { + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return true + default: + return false + } +} + +// Uint returns v's underlying value, as a uint64. +// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64]. +func (v Value) Uint() uint64 { + k := v.kind() + p := v.ptr + switch k { + case Uint: + return uint64(*(*uint)(p)) + case Uint8: + return uint64(*(*uint8)(p)) + case Uint16: + return uint64(*(*uint16)(p)) + case Uint32: + return uint64(*(*uint32)(p)) + case Uint64: + return *(*uint64)(p) + case Uintptr: + return uint64(*(*uintptr)(p)) + } + panic(&ValueError{"reflect.Value.Uint", v.kind()}) +} + +//go:nocheckptr +// This prevents inlining Value.UnsafeAddr when -d=checkptr is enabled, +// which ensures cmd/compile can recognize unsafe.Pointer(v.UnsafeAddr()) +// and make an exception. + +// UnsafeAddr returns a pointer to v's data, as a uintptr. +// It panics if v is not addressable. +// +// It's preferred to use uintptr(Value.Addr().UnsafePointer()) to get the equivalent result. +func (v Value) UnsafeAddr() uintptr { + if v.typ() == nil { + panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid}) + } + if v.flag&flagAddr == 0 { + panic("reflect.Value.UnsafeAddr of unaddressable value") + } + // The compiler loses track as it converts to uintptr. Force escape. + escapes(v.ptr) + return uintptr(v.ptr) +} + +// UnsafePointer returns v's value as a [unsafe.Pointer]. +// It panics if v's Kind is not [Chan], [Func], [Map], [Pointer], [Slice], or [UnsafePointer]. +// +// If v's Kind is [Func], the returned pointer is an underlying +// code pointer, but not necessarily enough to identify a +// single function uniquely. The only guarantee is that the +// result is zero if and only if v is a nil func Value. +// +// If v's Kind is [Slice], the returned pointer is to the first +// element of the slice. If the slice is nil the returned value +// is nil. If the slice is empty but non-nil the return value is non-nil. +func (v Value) UnsafePointer() unsafe.Pointer { + k := v.kind() + switch k { + case Pointer: + if v.typ().PtrBytes == 0 { + // Since it is a not-in-heap pointer, all pointers to the heap are + // forbidden! See comment in Value.Elem and issue #48399. + if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) { + panic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer") + } + return *(*unsafe.Pointer)(v.ptr) + } + fallthrough + case Chan, Map, UnsafePointer: + return v.pointer() + case Func: + if v.flag&flagMethod != 0 { + // As the doc comment says, the returned pointer is an + // underlying code pointer but not necessarily enough to + // identify a single function uniquely. All method expressions + // created via reflect have the same underlying code pointer, + // so their Pointers are equal. The function used here must + // match the one used in makeMethodValue. + code := methodValueCallCodePtr() + return *(*unsafe.Pointer)(unsafe.Pointer(&code)) + } + p := v.pointer() + // Non-nil func value points at data block. + // First word of data block is actual code. + if p != nil { + p = *(*unsafe.Pointer)(p) + } + return p + + case Slice: + return (*unsafeheader.Slice)(v.ptr).Data + } + panic(&ValueError{"reflect.Value.UnsafePointer", v.kind()}) +} + +// StringHeader is the runtime representation of a string. +// It cannot be used safely or portably and its representation may +// change in a later release. +// Moreover, the Data field is not sufficient to guarantee the data +// it references will not be garbage collected, so programs must keep +// a separate, correctly typed pointer to the underlying data. +// +// Deprecated: Use unsafe.String or unsafe.StringData instead. +type StringHeader struct { + Data uintptr + Len int +} + +// SliceHeader is the runtime representation of a slice. +// It cannot be used safely or portably and its representation may +// change in a later release. +// Moreover, the Data field is not sufficient to guarantee the data +// it references will not be garbage collected, so programs must keep +// a separate, correctly typed pointer to the underlying data. +// +// Deprecated: Use unsafe.Slice or unsafe.SliceData instead. +type SliceHeader struct { + Data uintptr + Len int + Cap int +} + +func typesMustMatch(what string, t1, t2 Type) { + if t1 != t2 { + panic(what + ": " + t1.String() + " != " + t2.String()) + } +} + +// arrayAt returns the i-th element of p, +// an array whose elements are eltSize bytes wide. +// The array pointed at by p must have at least i+1 elements: +// it is invalid (but impossible to check here) to pass i >= len, +// because then the result will point outside the array. +// whySafe must explain why i < len. (Passing "i < len" is fine; +// the benefit is to surface this assumption at the call site.) +func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer { + return add(p, uintptr(i)*eltSize, "i < len") +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. +// +// It panics if v's Kind is not a [Slice] or if n is negative or too large to +// allocate the memory. +func (v Value) Grow(n int) { + v.mustBeAssignable() + v.mustBe(Slice) + v.grow(n) +} + +// grow is identical to Grow but does not check for assignability. +func (v Value) grow(n int) { + p := (*unsafeheader.Slice)(v.ptr) + switch { + case n < 0: + panic("reflect.Value.Grow: negative len") + case p.Len+n < 0: + panic("reflect.Value.Grow: slice overflow") + case p.Len+n > p.Cap: + t := v.typ().Elem() + *p = growslice(t, *p, n) + } +} + +// extendSlice extends a slice by n elements. +// +// Unlike Value.grow, which modifies the slice in place and +// does not change the length of the slice in place, +// extendSlice returns a new slice value with the length +// incremented by the number of specified elements. +func (v Value) extendSlice(n int) Value { + v.mustBeExported() + v.mustBe(Slice) + + // Shallow copy the slice header to avoid mutating the source slice. + sh := *(*unsafeheader.Slice)(v.ptr) + s := &sh + v.ptr = unsafe.Pointer(s) + v.flag = flagIndir | flag(Slice) // equivalent flag to MakeSlice + + v.grow(n) // fine to treat as assignable since we allocate a new slice header + s.Len += n + return v +} + +// Clear clears the contents of a map or zeros the contents of a slice. +// +// It panics if v's Kind is not [Map] or [Slice]. +func (v Value) Clear() { + switch v.Kind() { + case Slice: + sh := *(*unsafeheader.Slice)(v.ptr) + st := (*sliceType)(unsafe.Pointer(v.typ())) + typedarrayclear(st.Elem, sh.Data, sh.Len) + case Map: + mapclear(v.typ(), v.pointer()) + default: + panic(&ValueError{"reflect.Value.Clear", v.Kind()}) + } +} + +// Append appends the values x to a slice s and returns the resulting slice. +// As in Go, each x's value must be assignable to the slice's element type. +func Append(s Value, x ...Value) Value { + s.mustBe(Slice) + n := s.Len() + s = s.extendSlice(len(x)) + for i, v := range x { + s.Index(n + i).Set(v) + } + return s +} + +// AppendSlice appends a slice t to a slice s and returns the resulting slice. +// The slices s and t must have the same element type. +func AppendSlice(s, t Value) Value { + s.mustBe(Slice) + t.mustBe(Slice) + typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem()) + ns := s.Len() + nt := t.Len() + s = s.extendSlice(nt) + Copy(s.Slice(ns, ns+nt), t) + return s +} + +// Copy copies the contents of src into dst until either +// dst has been filled or src has been exhausted. +// It returns the number of elements copied. +// Dst and src each must have kind [Slice] or [Array], and +// dst and src must have the same element type. +// +// As a special case, src can have kind [String] if the element type of dst is kind [Uint8]. +func Copy(dst, src Value) int { + dk := dst.kind() + if dk != Array && dk != Slice { + panic(&ValueError{"reflect.Copy", dk}) + } + if dk == Array { + dst.mustBeAssignable() + } + dst.mustBeExported() + + sk := src.kind() + var stringCopy bool + if sk != Array && sk != Slice { + stringCopy = sk == String && dst.typ().Elem().Kind() == abi.Uint8 + if !stringCopy { + panic(&ValueError{"reflect.Copy", sk}) + } + } + src.mustBeExported() + + de := dst.typ().Elem() + if !stringCopy { + se := src.typ().Elem() + typesMustMatch("reflect.Copy", toType(de), toType(se)) + } + + var ds, ss unsafeheader.Slice + if dk == Array { + ds.Data = dst.ptr + ds.Len = dst.Len() + ds.Cap = ds.Len + } else { + ds = *(*unsafeheader.Slice)(dst.ptr) + } + if sk == Array { + ss.Data = src.ptr + ss.Len = src.Len() + ss.Cap = ss.Len + } else if sk == Slice { + ss = *(*unsafeheader.Slice)(src.ptr) + } else { + sh := *(*unsafeheader.String)(src.ptr) + ss.Data = sh.Data + ss.Len = sh.Len + ss.Cap = sh.Len + } + + return typedslicecopy(de.Common(), ds, ss) +} + +// A runtimeSelect is a single case passed to rselect. +// This must match ../runtime/select.go:/runtimeSelect +type runtimeSelect struct { + dir SelectDir // SelectSend, SelectRecv or SelectDefault + typ *rtype // channel type + ch unsafe.Pointer // channel + val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) +} + +// rselect runs a select. It returns the index of the chosen case. +// If the case was a receive, val is filled in with the received value. +// The conventional OK bool indicates whether the receive corresponds +// to a sent value. +// +// rselect generally doesn't escape the runtimeSelect slice, except +// that for the send case the value to send needs to escape. We don't +// have a way to represent that in the function signature. So we handle +// that with a forced escape in function Select. +// +//go:noescape +func rselect([]runtimeSelect) (chosen int, recvOK bool) + +// A SelectDir describes the communication direction of a select case. +type SelectDir int + +// NOTE: These values must match ../runtime/select.go:/selectDir. + +const ( + _ SelectDir = iota + SelectSend // case Chan <- Send + SelectRecv // case <-Chan: + SelectDefault // default +) + +// A SelectCase describes a single case in a select operation. +// The kind of case depends on Dir, the communication direction. +// +// If Dir is SelectDefault, the case represents a default case. +// Chan and Send must be zero Values. +// +// If Dir is SelectSend, the case represents a send operation. +// Normally Chan's underlying value must be a channel, and Send's underlying value must be +// assignable to the channel's element type. As a special case, if Chan is a zero Value, +// then the case is ignored, and the field Send will also be ignored and may be either zero +// or non-zero. +// +// If Dir is SelectRecv, the case represents a receive operation. +// Normally Chan's underlying value must be a channel and Send must be a zero Value. +// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value. +// When a receive operation is selected, the received Value is returned by Select. +type SelectCase struct { + Dir SelectDir // direction of case + Chan Value // channel to use (for send or receive) + Send Value // value to send (for send) +} + +// Select executes a select operation described by the list of cases. +// Like the Go select statement, it blocks until at least one of the cases +// can proceed, makes a uniform pseudo-random choice, +// and then executes that case. It returns the index of the chosen case +// and, if that case was a receive operation, the value received and a +// boolean indicating whether the value corresponds to a send on the channel +// (as opposed to a zero value received because the channel is closed). +// Select supports a maximum of 65536 cases. +func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { + if len(cases) > 65536 { + panic("reflect.Select: too many cases (max 65536)") + } + // NOTE: Do not trust that caller is not modifying cases data underfoot. + // The range is safe because the caller cannot modify our copy of the len + // and each iteration makes its own copy of the value c. + var runcases []runtimeSelect + if len(cases) > 4 { + // Slice is heap allocated due to runtime dependent capacity. + runcases = make([]runtimeSelect, len(cases)) + } else { + // Slice can be stack allocated due to constant capacity. + runcases = make([]runtimeSelect, len(cases), 4) + } + + haveDefault := false + for i, c := range cases { + rc := &runcases[i] + rc.dir = c.Dir + switch c.Dir { + default: + panic("reflect.Select: invalid Dir") + + case SelectDefault: // default + if haveDefault { + panic("reflect.Select: multiple default cases") + } + haveDefault = true + if c.Chan.IsValid() { + panic("reflect.Select: default case has Chan value") + } + if c.Send.IsValid() { + panic("reflect.Select: default case has Send value") + } + + case SelectSend: + ch := c.Chan + if !ch.IsValid() { + break + } + ch.mustBe(Chan) + ch.mustBeExported() + tt := (*chanType)(unsafe.Pointer(ch.typ())) + if ChanDir(tt.Dir)&SendDir == 0 { + panic("reflect.Select: SendDir case using recv-only channel") + } + rc.ch = ch.pointer() + rc.typ = toRType(&tt.Type) + v := c.Send + if !v.IsValid() { + panic("reflect.Select: SendDir case missing Send value") + } + v.mustBeExported() + v = v.assignTo("reflect.Select", tt.Elem, nil) + if v.flag&flagIndir != 0 { + rc.val = v.ptr + } else { + rc.val = unsafe.Pointer(&v.ptr) + } + // The value to send needs to escape. See the comment at rselect for + // why we need forced escape. + escapes(rc.val) + + case SelectRecv: + if c.Send.IsValid() { + panic("reflect.Select: RecvDir case has Send value") + } + ch := c.Chan + if !ch.IsValid() { + break + } + ch.mustBe(Chan) + ch.mustBeExported() + tt := (*chanType)(unsafe.Pointer(ch.typ())) + if ChanDir(tt.Dir)&RecvDir == 0 { + panic("reflect.Select: RecvDir case using send-only channel") + } + rc.ch = ch.pointer() + rc.typ = toRType(&tt.Type) + rc.val = unsafe_New(tt.Elem) + } + } + + chosen, recvOK = rselect(runcases) + if runcases[chosen].dir == SelectRecv { + tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ)) + t := tt.Elem + p := runcases[chosen].val + fl := flag(t.Kind()) + if t.IfaceIndir() { + recv = Value{t, p, fl | flagIndir} + } else { + recv = Value{t, *(*unsafe.Pointer)(p), fl} + } + } + return chosen, recv, recvOK +} + +/* + * constructors + */ + +// implemented in package runtime + +//go:noescape +func unsafe_New(*abi.Type) unsafe.Pointer + +//go:noescape +func unsafe_NewArray(*abi.Type, int) unsafe.Pointer + +// MakeSlice creates a new zero-initialized slice value +// for the specified slice type, length, and capacity. +func MakeSlice(typ Type, len, cap int) Value { + if typ.Kind() != Slice { + panic("reflect.MakeSlice of non-slice type") + } + if len < 0 { + panic("reflect.MakeSlice: negative len") + } + if cap < 0 { + panic("reflect.MakeSlice: negative cap") + } + if len > cap { + panic("reflect.MakeSlice: len > cap") + } + + s := unsafeheader.Slice{Data: unsafe_NewArray(&(typ.Elem().(*rtype).t), cap), Len: len, Cap: cap} + return Value{&typ.(*rtype).t, unsafe.Pointer(&s), flagIndir | flag(Slice)} +} + +// MakeChan creates a new channel with the specified type and buffer size. +func MakeChan(typ Type, buffer int) Value { + if typ.Kind() != Chan { + panic("reflect.MakeChan of non-chan type") + } + if buffer < 0 { + panic("reflect.MakeChan: negative buffer size") + } + if typ.ChanDir() != BothDir { + panic("reflect.MakeChan: unidirectional channel type") + } + t := typ.common() + ch := makechan(t, buffer) + return Value{t, ch, flag(Chan)} +} + +// MakeMap creates a new map with the specified type. +func MakeMap(typ Type) Value { + return MakeMapWithSize(typ, 0) +} + +// MakeMapWithSize creates a new map with the specified type +// and initial space for approximately n elements. +func MakeMapWithSize(typ Type, n int) Value { + if typ.Kind() != Map { + panic("reflect.MakeMapWithSize of non-map type") + } + t := typ.common() + m := makemap(t, n) + return Value{t, m, flag(Map)} +} + +// Indirect returns the value that v points to. +// If v is a nil pointer, Indirect returns a zero Value. +// If v is not a pointer, Indirect returns v. +func Indirect(v Value) Value { + if v.Kind() != Pointer { + return v + } + return v.Elem() +} + +// ValueOf returns a new Value initialized to the concrete value +// stored in the interface i. ValueOf(nil) returns the zero Value. +func ValueOf(i any) Value { + if i == nil { + return Value{} + } + return unpackEface(i) +} + +// Zero returns a Value representing the zero value for the specified type. +// The result is different from the zero value of the Value struct, +// which represents no value at all. +// For example, Zero(TypeOf(42)) returns a Value with Kind [Int] and value 0. +// The returned value is neither addressable nor settable. +func Zero(typ Type) Value { + if typ == nil { + panic("reflect: Zero(nil)") + } + t := &typ.(*rtype).t + fl := flag(t.Kind()) + if t.IfaceIndir() { + var p unsafe.Pointer + if t.Size() <= abi.ZeroValSize { + p = unsafe.Pointer(&zeroVal[0]) + } else { + p = unsafe_New(t) + } + return Value{t, p, fl | flagIndir} + } + return Value{t, nil, fl} +} + +//go:linkname zeroVal runtime.zeroVal +var zeroVal [abi.ZeroValSize]byte + +// New returns a Value representing a pointer to a new zero value +// for the specified type. That is, the returned Value's Type is PointerTo(typ). +func New(typ Type) Value { + if typ == nil { + panic("reflect: New(nil)") + } + t := &typ.(*rtype).t + pt := ptrTo(t) + if ifaceIndir(pt) { + // This is a pointer to a not-in-heap type. + panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)") + } + ptr := unsafe_New(t) + fl := flag(Pointer) + return Value{pt, ptr, fl} +} + +// NewAt returns a Value representing a pointer to a value of the +// specified type, using p as that pointer. +func NewAt(typ Type, p unsafe.Pointer) Value { + fl := flag(Pointer) + t := typ.(*rtype) + return Value{t.ptrTo(), p, fl} +} + +// assignTo returns a value v that can be assigned directly to dst. +// It panics if v is not assignable to dst. +// For a conversion to an interface type, target, if not nil, +// is a suggested scratch space to use. +// target must be initialized memory (or nil). +func (v Value) assignTo(context string, dst *abi.Type, target unsafe.Pointer) Value { + if v.flag&flagMethod != 0 { + v = makeMethodValue(context, v) + } + + switch { + case directlyAssignable(dst, v.typ()): + // Overwrite type so that they match. + // Same memory layout, so no harm done. + fl := v.flag&(flagAddr|flagIndir) | v.flag.ro() + fl |= flag(dst.Kind()) + return Value{dst, v.ptr, fl} + + case implements(dst, v.typ()): + if v.Kind() == Interface && v.IsNil() { + // A nil ReadWriter passed to nil Reader is OK, + // but using ifaceE2I below will panic. + // Avoid the panic by returning a nil dst (e.g., Reader) explicitly. + return Value{dst, nil, flag(Interface)} + } + x := valueInterface(v, false) + if target == nil { + target = unsafe_New(dst) + } + if dst.NumMethod() == 0 { + *(*any)(target) = x + } else { + ifaceE2I(dst, x, target) + } + return Value{dst, target, flagIndir | flag(Interface)} + } + + // Failed. + panic(context + ": value of type " + stringFor(v.typ()) + " is not assignable to type " + stringFor(dst)) +} + +// Convert returns the value v converted to type t. +// If the usual Go conversion rules do not allow conversion +// of the value v to type t, or if converting v to type t panics, Convert panics. +func (v Value) Convert(t Type) Value { + if v.flag&flagMethod != 0 { + v = makeMethodValue("Convert", v) + } + op := convertOp(t.common(), v.typ()) + if op == nil { + panic("reflect.Value.Convert: value of type " + stringFor(v.typ()) + " cannot be converted to type " + t.String()) + } + return op(v, t) +} + +// CanConvert reports whether the value v can be converted to type t. +// If v.CanConvert(t) returns true then v.Convert(t) will not panic. +func (v Value) CanConvert(t Type) bool { + vt := v.Type() + if !vt.ConvertibleTo(t) { + return false + } + // Converting from slice to array or to pointer-to-array can panic + // depending on the value. + switch { + case vt.Kind() == Slice && t.Kind() == Array: + if t.Len() > v.Len() { + return false + } + case vt.Kind() == Slice && t.Kind() == Pointer && t.Elem().Kind() == Array: + n := t.Elem().Len() + if n > v.Len() { + return false + } + } + return true +} + +// Comparable reports whether the value v is comparable. +// If the type of v is an interface, this checks the dynamic type. +// If this reports true then v.Interface() == x will not panic for any x, +// nor will v.Equal(u) for any Value u. +func (v Value) Comparable() bool { + k := v.Kind() + switch k { + case Invalid: + return false + + case Array: + switch v.Type().Elem().Kind() { + case Interface, Array, Struct: + for i := 0; i < v.Type().Len(); i++ { + if !v.Index(i).Comparable() { + return false + } + } + return true + } + return v.Type().Comparable() + + case Interface: + return v.Elem().Comparable() + + case Struct: + for i := 0; i < v.NumField(); i++ { + if !v.Field(i).Comparable() { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} + +// Equal reports true if v is equal to u. +// For two invalid values, Equal will report true. +// For an interface value, Equal will compare the value within the interface. +// Otherwise, If the values have different types, Equal will report false. +// Otherwise, for arrays and structs Equal will compare each element in order, +// and report false if it finds non-equal elements. +// During all comparisons, if values of the same type are compared, +// and the type is not comparable, Equal will panic. +func (v Value) Equal(u Value) bool { + if v.Kind() == Interface { + v = v.Elem() + } + if u.Kind() == Interface { + u = u.Elem() + } + + if !v.IsValid() || !u.IsValid() { + return v.IsValid() == u.IsValid() + } + + if v.Kind() != u.Kind() || v.Type() != u.Type() { + return false + } + + // Handle each Kind directly rather than calling valueInterface + // to avoid allocating. + switch v.Kind() { + default: + panic("reflect.Value.Equal: invalid Kind") + case Bool: + return v.Bool() == u.Bool() + case Int, Int8, Int16, Int32, Int64: + return v.Int() == u.Int() + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return v.Uint() == u.Uint() + case Float32, Float64: + return v.Float() == u.Float() + case Complex64, Complex128: + return v.Complex() == u.Complex() + case String: + return v.String() == u.String() + case Chan, Pointer, UnsafePointer: + return v.Pointer() == u.Pointer() + case Array: + // u and v have the same type so they have the same length + vl := v.Len() + if vl == 0 { + // panic on [0]func() + if !v.Type().Elem().Comparable() { + break + } + return true + } + for i := 0; i < vl; i++ { + if !v.Index(i).Equal(u.Index(i)) { + return false + } + } + return true + case Struct: + // u and v have the same type so they have the same fields + nf := v.NumField() + for i := 0; i < nf; i++ { + if !v.Field(i).Equal(u.Field(i)) { + return false + } + } + return true + case Func, Map, Slice: + break + } + panic("reflect.Value.Equal: values of type " + v.Type().String() + " are not comparable") +} + +// convertOp returns the function to convert a value of type src +// to a value of type dst. If the conversion is illegal, convertOp returns nil. +func convertOp(dst, src *abi.Type) func(Value, Type) Value { + switch Kind(src.Kind()) { + case Int, Int8, Int16, Int32, Int64: + switch Kind(dst.Kind()) { + case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return cvtInt + case Float32, Float64: + return cvtIntFloat + case String: + return cvtIntString + } + + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + switch Kind(dst.Kind()) { + case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return cvtUint + case Float32, Float64: + return cvtUintFloat + case String: + return cvtUintString + } + + case Float32, Float64: + switch Kind(dst.Kind()) { + case Int, Int8, Int16, Int32, Int64: + return cvtFloatInt + case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: + return cvtFloatUint + case Float32, Float64: + return cvtFloat + } + + case Complex64, Complex128: + switch Kind(dst.Kind()) { + case Complex64, Complex128: + return cvtComplex + } + + case String: + if dst.Kind() == abi.Slice && pkgPathFor(dst.Elem()) == "" { + switch Kind(dst.Elem().Kind()) { + case Uint8: + return cvtStringBytes + case Int32: + return cvtStringRunes + } + } + + case Slice: + if dst.Kind() == abi.String && pkgPathFor(src.Elem()) == "" { + switch Kind(src.Elem().Kind()) { + case Uint8: + return cvtBytesString + case Int32: + return cvtRunesString + } + } + // "x is a slice, T is a pointer-to-array type, + // and the slice and array types have identical element types." + if dst.Kind() == abi.Pointer && dst.Elem().Kind() == abi.Array && src.Elem() == dst.Elem().Elem() { + return cvtSliceArrayPtr + } + // "x is a slice, T is an array type, + // and the slice and array types have identical element types." + if dst.Kind() == abi.Array && src.Elem() == dst.Elem() { + return cvtSliceArray + } + + case Chan: + if dst.Kind() == abi.Chan && specialChannelAssignability(dst, src) { + return cvtDirect + } + } + + // dst and src have same underlying type. + if haveIdenticalUnderlyingType(dst, src, false) { + return cvtDirect + } + + // dst and src are non-defined pointer types with same underlying base type. + if dst.Kind() == abi.Pointer && nameFor(dst) == "" && + src.Kind() == abi.Pointer && nameFor(src) == "" && + haveIdenticalUnderlyingType(elem(dst), elem(src), false) { + return cvtDirect + } + + if implements(dst, src) { + if src.Kind() == abi.Interface { + return cvtI2I + } + return cvtT2I + } + + return nil +} + +// makeInt returns a Value of type t equal to bits (possibly truncated), +// where t is a signed or unsigned int type. +func makeInt(f flag, bits uint64, t Type) Value { + typ := t.common() + ptr := unsafe_New(typ) + switch typ.Size() { + case 1: + *(*uint8)(ptr) = uint8(bits) + case 2: + *(*uint16)(ptr) = uint16(bits) + case 4: + *(*uint32)(ptr) = uint32(bits) + case 8: + *(*uint64)(ptr) = bits + } + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} +} + +// makeFloat returns a Value of type t equal to v (possibly truncated to float32), +// where t is a float32 or float64 type. +func makeFloat(f flag, v float64, t Type) Value { + typ := t.common() + ptr := unsafe_New(typ) + switch typ.Size() { + case 4: + *(*float32)(ptr) = float32(v) + case 8: + *(*float64)(ptr) = v + } + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} +} + +// makeFloat32 returns a Value of type t equal to v, where t is a float32 type. +func makeFloat32(f flag, v float32, t Type) Value { + typ := t.common() + ptr := unsafe_New(typ) + *(*float32)(ptr) = v + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} +} + +// makeComplex returns a Value of type t equal to v (possibly truncated to complex64), +// where t is a complex64 or complex128 type. +func makeComplex(f flag, v complex128, t Type) Value { + typ := t.common() + ptr := unsafe_New(typ) + switch typ.Size() { + case 8: + *(*complex64)(ptr) = complex64(v) + case 16: + *(*complex128)(ptr) = v + } + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} +} + +func makeString(f flag, v string, t Type) Value { + ret := New(t).Elem() + ret.SetString(v) + ret.flag = ret.flag&^flagAddr | f + return ret +} + +func makeBytes(f flag, v []byte, t Type) Value { + ret := New(t).Elem() + ret.SetBytes(v) + ret.flag = ret.flag&^flagAddr | f + return ret +} + +func makeRunes(f flag, v []rune, t Type) Value { + ret := New(t).Elem() + ret.setRunes(v) + ret.flag = ret.flag&^flagAddr | f + return ret +} + +// These conversion functions are returned by convertOp +// for classes of conversions. For example, the first function, cvtInt, +// takes any value v of signed int type and returns the value converted +// to type t, where t is any signed or unsigned int type. + +// convertOp: intXX -> [u]intXX +func cvtInt(v Value, t Type) Value { + return makeInt(v.flag.ro(), uint64(v.Int()), t) +} + +// convertOp: uintXX -> [u]intXX +func cvtUint(v Value, t Type) Value { + return makeInt(v.flag.ro(), v.Uint(), t) +} + +// convertOp: floatXX -> intXX +func cvtFloatInt(v Value, t Type) Value { + return makeInt(v.flag.ro(), uint64(int64(v.Float())), t) +} + +// convertOp: floatXX -> uintXX +func cvtFloatUint(v Value, t Type) Value { + return makeInt(v.flag.ro(), uint64(v.Float()), t) +} + +// convertOp: intXX -> floatXX +func cvtIntFloat(v Value, t Type) Value { + return makeFloat(v.flag.ro(), float64(v.Int()), t) +} + +// convertOp: uintXX -> floatXX +func cvtUintFloat(v Value, t Type) Value { + return makeFloat(v.flag.ro(), float64(v.Uint()), t) +} + +// convertOp: floatXX -> floatXX +func cvtFloat(v Value, t Type) Value { + if v.Type().Kind() == Float32 && t.Kind() == Float32 { + // Don't do any conversion if both types have underlying type float32. + // This avoids converting to float64 and back, which will + // convert a signaling NaN to a quiet NaN. See issue 36400. + return makeFloat32(v.flag.ro(), *(*float32)(v.ptr), t) + } + return makeFloat(v.flag.ro(), v.Float(), t) +} + +// convertOp: complexXX -> complexXX +func cvtComplex(v Value, t Type) Value { + return makeComplex(v.flag.ro(), v.Complex(), t) +} + +// convertOp: intXX -> string +func cvtIntString(v Value, t Type) Value { + s := "\uFFFD" + if x := v.Int(); int64(rune(x)) == x { + s = string(rune(x)) + } + return makeString(v.flag.ro(), s, t) +} + +// convertOp: uintXX -> string +func cvtUintString(v Value, t Type) Value { + s := "\uFFFD" + if x := v.Uint(); uint64(rune(x)) == x { + s = string(rune(x)) + } + return makeString(v.flag.ro(), s, t) +} + +// convertOp: []byte -> string +func cvtBytesString(v Value, t Type) Value { + return makeString(v.flag.ro(), string(v.Bytes()), t) +} + +// convertOp: string -> []byte +func cvtStringBytes(v Value, t Type) Value { + return makeBytes(v.flag.ro(), []byte(v.String()), t) +} + +// convertOp: []rune -> string +func cvtRunesString(v Value, t Type) Value { + return makeString(v.flag.ro(), string(v.runes()), t) +} + +// convertOp: string -> []rune +func cvtStringRunes(v Value, t Type) Value { + return makeRunes(v.flag.ro(), []rune(v.String()), t) +} + +// convertOp: []T -> *[N]T +func cvtSliceArrayPtr(v Value, t Type) Value { + n := t.Elem().Len() + if n > v.Len() { + panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n)) + } + h := (*unsafeheader.Slice)(v.ptr) + return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Pointer)} +} + +// convertOp: []T -> [N]T +func cvtSliceArray(v Value, t Type) Value { + n := t.Len() + if n > v.Len() { + panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to array with length " + itoa.Itoa(n)) + } + h := (*unsafeheader.Slice)(v.ptr) + typ := t.common() + ptr := h.Data + c := unsafe_New(typ) + typedmemmove(typ, c, ptr) + ptr = c + + return Value{typ, ptr, v.flag&^(flagAddr|flagKindMask) | flag(Array)} +} + +// convertOp: direct copy +func cvtDirect(v Value, typ Type) Value { + f := v.flag + t := typ.common() + ptr := v.ptr + if f&flagAddr != 0 { + // indirect, mutable word - make a copy + c := unsafe_New(t) + typedmemmove(t, c, ptr) + ptr = c + f &^= flagAddr + } + return Value{t, ptr, v.flag.ro() | f} // v.flag.ro()|f == f? +} + +// convertOp: concrete -> interface +func cvtT2I(v Value, typ Type) Value { + target := unsafe_New(typ.common()) + x := valueInterface(v, false) + if typ.NumMethod() == 0 { + *(*any)(target) = x + } else { + ifaceE2I(typ.common(), x, target) + } + return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)} +} + +// convertOp: interface -> interface +func cvtI2I(v Value, typ Type) Value { + if v.IsNil() { + ret := Zero(typ) + ret.flag |= v.flag.ro() + return ret + } + return cvtT2I(v.Elem(), typ) +} + +// implemented in ../runtime +// +//go:noescape +func chancap(ch unsafe.Pointer) int + +//go:noescape +func chanclose(ch unsafe.Pointer) + +//go:noescape +func chanlen(ch unsafe.Pointer) int + +// Note: some of the noescape annotations below are technically a lie, +// but safe in the context of this package. Functions like chansend0 +// and mapassign0 don't escape the referent, but may escape anything +// the referent points to (they do shallow copies of the referent). +// We add a 0 to their names and wrap them in functions with the +// proper escape behavior. + +//go:noescape +func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool) + +//go:noescape +func chansend0(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool + +func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool { + contentEscapes(val) + return chansend0(ch, val, nb) +} + +func makechan(typ *abi.Type, size int) (ch unsafe.Pointer) +func makemap(t *abi.Type, cap int) (m unsafe.Pointer) + +//go:noescape +func mapaccess(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) + +//go:noescape +func mapaccess_faststr(t *abi.Type, m unsafe.Pointer, key string) (val unsafe.Pointer) + +//go:noescape +func mapassign0(t *abi.Type, m unsafe.Pointer, key, val unsafe.Pointer) + +func mapassign(t *abi.Type, m unsafe.Pointer, key, val unsafe.Pointer) { + contentEscapes(key) + contentEscapes(val) + mapassign0(t, m, key, val) +} + +//go:noescape +func mapassign_faststr0(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer) + +func mapassign_faststr(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer) { + contentEscapes((*unsafeheader.String)(unsafe.Pointer(&key)).Data) + contentEscapes(val) + mapassign_faststr0(t, m, key, val) +} + +//go:noescape +func mapdelete(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) + +//go:noescape +func mapdelete_faststr(t *abi.Type, m unsafe.Pointer, key string) + +//go:noescape +func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter) + +//go:noescape +func mapiterkey(it *hiter) (key unsafe.Pointer) + +//go:noescape +func mapiterelem(it *hiter) (elem unsafe.Pointer) + +//go:noescape +func mapiternext(it *hiter) + +//go:noescape +func maplen(m unsafe.Pointer) int + +func mapclear(t *abi.Type, m unsafe.Pointer) + +// call calls fn with "stackArgsSize" bytes of stack arguments laid out +// at stackArgs and register arguments laid out in regArgs. frameSize is +// the total amount of stack space that will be reserved by call, so this +// should include enough space to spill register arguments to the stack in +// case of preemption. +// +// After fn returns, call copies stackArgsSize-stackRetOffset result bytes +// back into stackArgs+stackRetOffset before returning, for any return +// values passed on the stack. Register-based return values will be found +// in the same regArgs structure. +// +// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap +// indicating which registers will contain pointer-valued return values. The +// purpose of this bitmap is to keep pointers visible to the GC between +// returning from reflectcall and actually using them. +// +// If copying result bytes back from the stack, the caller must pass the +// argument frame type as stackArgsType, so that call can execute appropriate +// write barriers during the copy. +// +// Arguments passed through to call do not escape. The type is used only in a +// very limited callee of call, the stackArgs are copied, and regArgs is only +// used in the call frame. +// +//go:noescape +//go:linkname call runtime.reflectcall +func call(stackArgsType *abi.Type, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) + +func ifaceE2I(t *abi.Type, src any, dst unsafe.Pointer) + +// memmove copies size bytes to dst from src. No write barriers are used. +// +//go:noescape +func memmove(dst, src unsafe.Pointer, size uintptr) + +// typedmemmove copies a value of type t to dst from src. +// +//go:noescape +func typedmemmove(t *abi.Type, dst, src unsafe.Pointer) + +// typedmemclr zeros the value at ptr of type t. +// +//go:noescape +func typedmemclr(t *abi.Type, ptr unsafe.Pointer) + +// typedmemclrpartial is like typedmemclr but assumes that +// dst points off bytes into the value and only clears size bytes. +// +//go:noescape +func typedmemclrpartial(t *abi.Type, ptr unsafe.Pointer, off, size uintptr) + +// typedslicecopy copies a slice of elemType values from src to dst, +// returning the number of elements copied. +// +//go:noescape +func typedslicecopy(t *abi.Type, dst, src unsafeheader.Slice) int + +// typedarrayclear zeroes the value at ptr of an array of elemType, +// only clears len elem. +// +//go:noescape +func typedarrayclear(elemType *abi.Type, ptr unsafe.Pointer, len int) + +//go:noescape +func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr + +func verifyNotInHeapPtr(p uintptr) bool + +//go:noescape +func growslice(t *abi.Type, old unsafeheader.Slice, num int) unsafeheader.Slice + +// Dummy annotation marking that the value x escapes, +// for use in cases where the reflect code is so clever that +// the compiler cannot follow. +func escapes(x any) { + if dummy.b { + dummy.x = x + } +} + +var dummy struct { + b bool + x any +} + +// Dummy annotation marking that the content of value x +// escapes (i.e. modeling roughly heap=*x), +// for use in cases where the reflect code is so clever that +// the compiler cannot follow. +func contentEscapes(x unsafe.Pointer) { + if dummy.b { + escapes(*(*any)(x)) // the dereference may not always be safe, but never executed + } +} + +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/visiblefields.go b/platform/dbops/binaries/go/go/src/reflect/visiblefields.go new file mode 100644 index 0000000000000000000000000000000000000000..9375faa11045a6b5b473b72dd189b654cd964276 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/visiblefields.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +// VisibleFields returns all the visible fields in t, which must be a +// struct type. A field is defined as visible if it's accessible +// directly with a FieldByName call. The returned fields include fields +// inside anonymous struct members and unexported fields. They follow +// the same order found in the struct, with anonymous fields followed +// immediately by their promoted fields. +// +// For each element e of the returned slice, the corresponding field +// can be retrieved from a value v of type t by calling v.FieldByIndex(e.Index). +func VisibleFields(t Type) []StructField { + if t == nil { + panic("reflect: VisibleFields(nil)") + } + if t.Kind() != Struct { + panic("reflect.VisibleFields of non-struct type") + } + w := &visibleFieldsWalker{ + byName: make(map[string]int), + visiting: make(map[Type]bool), + fields: make([]StructField, 0, t.NumField()), + index: make([]int, 0, 2), + } + w.walk(t) + // Remove all the fields that have been hidden. + // Use an in-place removal that avoids copying in + // the common case that there are no hidden fields. + j := 0 + for i := range w.fields { + f := &w.fields[i] + if f.Name == "" { + continue + } + if i != j { + // A field has been removed. We need to shuffle + // all the subsequent elements up. + w.fields[j] = *f + } + j++ + } + return w.fields[:j] +} + +type visibleFieldsWalker struct { + byName map[string]int + visiting map[Type]bool + fields []StructField + index []int +} + +// walk walks all the fields in the struct type t, visiting +// fields in index preorder and appending them to w.fields +// (this maintains the required ordering). +// Fields that have been overridden have their +// Name field cleared. +func (w *visibleFieldsWalker) walk(t Type) { + if w.visiting[t] { + return + } + w.visiting[t] = true + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + w.index = append(w.index, i) + add := true + if oldIndex, ok := w.byName[f.Name]; ok { + old := &w.fields[oldIndex] + if len(w.index) == len(old.Index) { + // Fields with the same name at the same depth + // cancel one another out. Set the field name + // to empty to signify that has happened, and + // there's no need to add this field. + old.Name = "" + add = false + } else if len(w.index) < len(old.Index) { + // The old field loses because it's deeper than the new one. + old.Name = "" + } else { + // The old field wins because it's shallower than the new one. + add = false + } + } + if add { + // Copy the index so that it's not overwritten + // by the other appends. + f.Index = append([]int(nil), w.index...) + w.byName[f.Name] = len(w.fields) + w.fields = append(w.fields, f) + } + if f.Anonymous { + if f.Type.Kind() == Pointer { + f.Type = f.Type.Elem() + } + if f.Type.Kind() == Struct { + w.walk(f.Type) + } + } + w.index = w.index[:len(w.index)-1] + } + delete(w.visiting, t) +} diff --git a/platform/dbops/binaries/go/go/src/reflect/visiblefields_test.go b/platform/dbops/binaries/go/go/src/reflect/visiblefields_test.go new file mode 100644 index 0000000000000000000000000000000000000000..66d545dd1f7c2cc185ab358f8114ec0c92412961 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/reflect/visiblefields_test.go @@ -0,0 +1,349 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + . "reflect" + "strings" + "testing" +) + +type structField struct { + name string + index []int +} + +var fieldsTests = []struct { + testName string + val any + expect []structField +}{{ + testName: "SimpleStruct", + val: struct { + A int + B string + C bool + }{}, + expect: []structField{{ + name: "A", + index: []int{0}, + }, { + name: "B", + index: []int{1}, + }, { + name: "C", + index: []int{2}, + }}, +}, { + testName: "NonEmbeddedStructMember", + val: struct { + A struct { + X int + } + }{}, + expect: []structField{{ + name: "A", + index: []int{0}, + }}, +}, { + testName: "EmbeddedExportedStruct", + val: struct { + SFG + }{}, + expect: []structField{{ + name: "SFG", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }, { + name: "G", + index: []int{0, 1}, + }}, +}, { + testName: "EmbeddedUnexportedStruct", + val: struct { + sFG + }{}, + expect: []structField{{ + name: "sFG", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }, { + name: "G", + index: []int{0, 1}, + }}, +}, { + testName: "TwoEmbeddedStructsWithCancelingMembers", + val: struct { + SFG + SF + }{}, + expect: []structField{{ + name: "SFG", + index: []int{0}, + }, { + name: "G", + index: []int{0, 1}, + }, { + name: "SF", + index: []int{1}, + }}, +}, { + testName: "EmbeddedStructsWithSameFieldsAtDifferentDepths", + val: struct { + SFGH3 + SG1 + SFG2 + SF2 + L int + }{}, + expect: []structField{{ + name: "SFGH3", + index: []int{0}, + }, { + name: "SFGH2", + index: []int{0, 0}, + }, { + name: "SFGH1", + index: []int{0, 0, 0}, + }, { + name: "SFGH", + index: []int{0, 0, 0, 0}, + }, { + name: "H", + index: []int{0, 0, 0, 0, 2}, + }, { + name: "SG1", + index: []int{1}, + }, { + name: "SG", + index: []int{1, 0}, + }, { + name: "G", + index: []int{1, 0, 0}, + }, { + name: "SFG2", + index: []int{2}, + }, { + name: "SFG1", + index: []int{2, 0}, + }, { + name: "SFG", + index: []int{2, 0, 0}, + }, { + name: "SF2", + index: []int{3}, + }, { + name: "SF1", + index: []int{3, 0}, + }, { + name: "SF", + index: []int{3, 0, 0}, + }, { + name: "L", + index: []int{4}, + }}, +}, { + testName: "EmbeddedPointerStruct", + val: struct { + *SF + }{}, + expect: []structField{{ + name: "SF", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }}, +}, { + testName: "EmbeddedNotAPointer", + val: struct { + M + }{}, + expect: []structField{{ + name: "M", + index: []int{0}, + }}, +}, { + testName: "RecursiveEmbedding", + val: Rec1{}, + expect: []structField{{ + name: "Rec2", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }, { + name: "Rec1", + index: []int{0, 1}, + }}, +}, { + testName: "RecursiveEmbedding2", + val: Rec2{}, + expect: []structField{{ + name: "F", + index: []int{0}, + }, { + name: "Rec1", + index: []int{1}, + }, { + name: "Rec2", + index: []int{1, 0}, + }}, +}, { + testName: "RecursiveEmbedding3", + val: RS3{}, + expect: []structField{{ + name: "RS2", + index: []int{0}, + }, { + name: "RS1", + index: []int{1}, + }, { + name: "i", + index: []int{1, 0}, + }}, +}} + +type SFG struct { + F int + G int +} + +type SFG1 struct { + SFG +} + +type SFG2 struct { + SFG1 +} + +type SFGH struct { + F int + G int + H int +} + +type SFGH1 struct { + SFGH +} + +type SFGH2 struct { + SFGH1 +} + +type SFGH3 struct { + SFGH2 +} + +type SF struct { + F int +} + +type SF1 struct { + SF +} + +type SF2 struct { + SF1 +} + +type SG struct { + G int +} + +type SG1 struct { + SG +} + +type sFG struct { + F int + G int +} + +type RS1 struct { + i int +} + +type RS2 struct { + RS1 +} + +type RS3 struct { + RS2 + RS1 +} + +type M map[string]any + +type Rec1 struct { + *Rec2 +} + +type Rec2 struct { + F string + *Rec1 +} + +func TestFields(t *testing.T) { + for _, test := range fieldsTests { + test := test + t.Run(test.testName, func(t *testing.T) { + typ := TypeOf(test.val) + fields := VisibleFields(typ) + if got, want := len(fields), len(test.expect); got != want { + t.Fatalf("unexpected field count; got %d want %d", got, want) + } + + for j, field := range fields { + expect := test.expect[j] + t.Logf("field %d: %s", j, expect.name) + gotField := typ.FieldByIndex(field.Index) + // Unfortunately, FieldByIndex does not return + // a field with the same index that we passed in, + // so we set it to the expected value so that + // it can be compared later with the result of FieldByName. + gotField.Index = field.Index + expectField := typ.FieldByIndex(expect.index) + // ditto. + expectField.Index = expect.index + if !DeepEqual(gotField, expectField) { + t.Fatalf("unexpected field result\ngot %#v\nwant %#v", gotField, expectField) + } + + // Sanity check that we can actually access the field by the + // expected name. + gotField1, ok := typ.FieldByName(expect.name) + if !ok { + t.Fatalf("field %q not accessible by name", expect.name) + } + if !DeepEqual(gotField1, expectField) { + t.Fatalf("unexpected FieldByName result; got %#v want %#v", gotField1, expectField) + } + } + }) + } +} + +// Must not panic with nil embedded pointer. +func TestFieldByIndexErr(t *testing.T) { + type A struct { + S string + } + type B struct { + *A + } + v := ValueOf(B{}) + _, err := v.FieldByIndexErr([]int{0, 0}) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "embedded struct field A") { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/regexp/all_test.go b/platform/dbops/binaries/go/go/src/regexp/all_test.go new file mode 100644 index 0000000000000000000000000000000000000000..124313d1af970e5a16396253cd2f352a126c83c5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/all_test.go @@ -0,0 +1,975 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "reflect" + "regexp/syntax" + "strings" + "testing" + "unicode/utf8" +) + +var goodRe = []string{ + ``, + `.`, + `^.$`, + `a`, + `a*`, + `a+`, + `a?`, + `a|b`, + `a*|b*`, + `(a*|b)(c*|d)`, + `[a-z]`, + `[a-abc-c\-\]\[]`, + `[a-z]+`, + `[abc]`, + `[^1234]`, + `[^\n]`, + `\!\\`, +} + +type stringError struct { + re string + err string +} + +var badRe = []stringError{ + {`*`, "missing argument to repetition operator: `*`"}, + {`+`, "missing argument to repetition operator: `+`"}, + {`?`, "missing argument to repetition operator: `?`"}, + {`(abc`, "missing closing ): `(abc`"}, + {`abc)`, "unexpected ): `abc)`"}, + {`x[a-z`, "missing closing ]: `[a-z`"}, + {`[z-a]`, "invalid character class range: `z-a`"}, + {`abc\`, "trailing backslash at end of expression"}, + {`a**`, "invalid nested repetition operator: `**`"}, + {`a*+`, "invalid nested repetition operator: `*+`"}, + {`\x`, "invalid escape sequence: `\\x`"}, + {strings.Repeat(`\pL`, 27000), "expression too large"}, +} + +func compileTest(t *testing.T, expr string, error string) *Regexp { + re, err := Compile(expr) + if error == "" && err != nil { + t.Error("compiling `", expr, "`; unexpected error: ", err.Error()) + } + if error != "" && err == nil { + t.Error("compiling `", expr, "`; missing error") + } else if error != "" && !strings.Contains(err.Error(), error) { + t.Error("compiling `", expr, "`; wrong error: ", err.Error(), "; want ", error) + } + return re +} + +func TestGoodCompile(t *testing.T) { + for i := 0; i < len(goodRe); i++ { + compileTest(t, goodRe[i], "") + } +} + +func TestBadCompile(t *testing.T) { + for i := 0; i < len(badRe); i++ { + compileTest(t, badRe[i].re, badRe[i].err) + } +} + +func matchTest(t *testing.T, test *FindTest) { + re := compileTest(t, test.pat, "") + if re == nil { + return + } + m := re.MatchString(test.text) + if m != (len(test.matches) > 0) { + t.Errorf("MatchString failure on %s: %t should be %t", test, m, len(test.matches) > 0) + } + // now try bytes + m = re.Match([]byte(test.text)) + if m != (len(test.matches) > 0) { + t.Errorf("Match failure on %s: %t should be %t", test, m, len(test.matches) > 0) + } +} + +func TestMatch(t *testing.T) { + for _, test := range findTests { + matchTest(t, &test) + } +} + +func matchFunctionTest(t *testing.T, test *FindTest) { + m, err := MatchString(test.pat, test.text) + if err == nil { + return + } + if m != (len(test.matches) > 0) { + t.Errorf("Match failure on %s: %t should be %t", test, m, len(test.matches) > 0) + } +} + +func TestMatchFunction(t *testing.T) { + for _, test := range findTests { + matchFunctionTest(t, &test) + } +} + +func copyMatchTest(t *testing.T, test *FindTest) { + re := compileTest(t, test.pat, "") + if re == nil { + return + } + m1 := re.MatchString(test.text) + m2 := re.Copy().MatchString(test.text) + if m1 != m2 { + t.Errorf("Copied Regexp match failure on %s: original gave %t; copy gave %t; should be %t", + test, m1, m2, len(test.matches) > 0) + } +} + +func TestCopyMatch(t *testing.T) { + for _, test := range findTests { + copyMatchTest(t, &test) + } +} + +type ReplaceTest struct { + pattern, replacement, input, output string +} + +var replaceTests = []ReplaceTest{ + // Test empty input and/or replacement, with pattern that matches the empty string. + {"", "", "", ""}, + {"", "x", "", "x"}, + {"", "", "abc", "abc"}, + {"", "x", "abc", "xaxbxcx"}, + + // Test empty input and/or replacement, with pattern that does not match the empty string. + {"b", "", "", ""}, + {"b", "x", "", ""}, + {"b", "", "abc", "ac"}, + {"b", "x", "abc", "axc"}, + {"y", "", "", ""}, + {"y", "x", "", ""}, + {"y", "", "abc", "abc"}, + {"y", "x", "abc", "abc"}, + + // Multibyte characters -- verify that we don't try to match in the middle + // of a character. + {"[a-c]*", "x", "\u65e5", "x\u65e5x"}, + {"[^\u65e5]", "x", "abc\u65e5def", "xxx\u65e5xxx"}, + + // Start and end of a string. + {"^[a-c]*", "x", "abcdabc", "xdabc"}, + {"[a-c]*$", "x", "abcdabc", "abcdx"}, + {"^[a-c]*$", "x", "abcdabc", "abcdabc"}, + {"^[a-c]*", "x", "abc", "x"}, + {"[a-c]*$", "x", "abc", "x"}, + {"^[a-c]*$", "x", "abc", "x"}, + {"^[a-c]*", "x", "dabce", "xdabce"}, + {"[a-c]*$", "x", "dabce", "dabcex"}, + {"^[a-c]*$", "x", "dabce", "dabce"}, + {"^[a-c]*", "x", "", "x"}, + {"[a-c]*$", "x", "", "x"}, + {"^[a-c]*$", "x", "", "x"}, + + {"^[a-c]+", "x", "abcdabc", "xdabc"}, + {"[a-c]+$", "x", "abcdabc", "abcdx"}, + {"^[a-c]+$", "x", "abcdabc", "abcdabc"}, + {"^[a-c]+", "x", "abc", "x"}, + {"[a-c]+$", "x", "abc", "x"}, + {"^[a-c]+$", "x", "abc", "x"}, + {"^[a-c]+", "x", "dabce", "dabce"}, + {"[a-c]+$", "x", "dabce", "dabce"}, + {"^[a-c]+$", "x", "dabce", "dabce"}, + {"^[a-c]+", "x", "", ""}, + {"[a-c]+$", "x", "", ""}, + {"^[a-c]+$", "x", "", ""}, + + // Other cases. + {"abc", "def", "abcdefg", "defdefg"}, + {"bc", "BC", "abcbcdcdedef", "aBCBCdcdedef"}, + {"abc", "", "abcdabc", "d"}, + {"x", "xXx", "xxxXxxx", "xXxxXxxXxXxXxxXxxXx"}, + {"abc", "d", "", ""}, + {"abc", "d", "abc", "d"}, + {".+", "x", "abc", "x"}, + {"[a-c]*", "x", "def", "xdxexfx"}, + {"[a-c]+", "x", "abcbcdcdedef", "xdxdedef"}, + {"[a-c]*", "x", "abcbcdcdedef", "xdxdxexdxexfx"}, + + // Substitutions + {"a+", "($0)", "banana", "b(a)n(a)n(a)"}, + {"a+", "(${0})", "banana", "b(a)n(a)n(a)"}, + {"a+", "(${0})$0", "banana", "b(a)an(a)an(a)a"}, + {"a+", "(${0})$0", "banana", "b(a)an(a)an(a)a"}, + {"hello, (.+)", "goodbye, ${1}", "hello, world", "goodbye, world"}, + {"hello, (.+)", "goodbye, $1x", "hello, world", "goodbye, "}, + {"hello, (.+)", "goodbye, ${1}x", "hello, world", "goodbye, worldx"}, + {"hello, (.+)", "<$0><$1><$2><$3>", "hello, world", "<><>"}, + {"hello, (?P.+)", "goodbye, $noun!", "hello, world", "goodbye, world!"}, + {"hello, (?P.+)", "goodbye, ${noun}", "hello, world", "goodbye, world"}, + {"(?Phi)|(?Pbye)", "$x$x$x", "hi", "hihihi"}, + {"(?Phi)|(?Pbye)", "$x$x$x", "bye", "byebyebye"}, + {"(?Phi)|(?Pbye)", "$xyz", "hi", ""}, + {"(?Phi)|(?Pbye)", "${x}yz", "hi", "hiyz"}, + {"(?Phi)|(?Pbye)", "hello $$x", "hi", "hello $x"}, + {"a+", "${oops", "aaa", "${oops"}, + {"a+", "$$", "aaa", "$"}, + {"a+", "$", "aaa", "$"}, + + // Substitution when subexpression isn't found + {"(x)?", "$1", "123", "123"}, + {"abc", "$1", "123", "123"}, + + // Substitutions involving a (x){0} + {"(a)(b){0}(c)", ".$1|$3.", "xacxacx", "x.a|c.x.a|c.x"}, + {"(a)(((b))){0}c", ".$1.", "xacxacx", "x.a.x.a.x"}, + {"((a(b){0}){3}){5}(h)", "y caramb$2", "say aaaaaaaaaaaaaaaah", "say ay caramba"}, + {"((a(b){0}){3}){5}h", "y caramb$2", "say aaaaaaaaaaaaaaaah", "say ay caramba"}, +} + +var replaceLiteralTests = []ReplaceTest{ + // Substitutions + {"a+", "($0)", "banana", "b($0)n($0)n($0)"}, + {"a+", "(${0})", "banana", "b(${0})n(${0})n(${0})"}, + {"a+", "(${0})$0", "banana", "b(${0})$0n(${0})$0n(${0})$0"}, + {"a+", "(${0})$0", "banana", "b(${0})$0n(${0})$0n(${0})$0"}, + {"hello, (.+)", "goodbye, ${1}", "hello, world", "goodbye, ${1}"}, + {"hello, (?P.+)", "goodbye, $noun!", "hello, world", "goodbye, $noun!"}, + {"hello, (?P.+)", "goodbye, ${noun}", "hello, world", "goodbye, ${noun}"}, + {"(?Phi)|(?Pbye)", "$x$x$x", "hi", "$x$x$x"}, + {"(?Phi)|(?Pbye)", "$x$x$x", "bye", "$x$x$x"}, + {"(?Phi)|(?Pbye)", "$xyz", "hi", "$xyz"}, + {"(?Phi)|(?Pbye)", "${x}yz", "hi", "${x}yz"}, + {"(?Phi)|(?Pbye)", "hello $$x", "hi", "hello $$x"}, + {"a+", "${oops", "aaa", "${oops"}, + {"a+", "$$", "aaa", "$$"}, + {"a+", "$", "aaa", "$"}, +} + +type ReplaceFuncTest struct { + pattern string + replacement func(string) string + input, output string +} + +var replaceFuncTests = []ReplaceFuncTest{ + {"[a-c]", func(s string) string { return "x" + s + "y" }, "defabcdef", "defxayxbyxcydef"}, + {"[a-c]+", func(s string) string { return "x" + s + "y" }, "defabcdef", "defxabcydef"}, + {"[a-c]*", func(s string) string { return "x" + s + "y" }, "defabcdef", "xydxyexyfxabcydxyexyfxy"}, +} + +func TestReplaceAll(t *testing.T) { + for _, tc := range replaceTests { + re, err := Compile(tc.pattern) + if err != nil { + t.Errorf("Unexpected error compiling %q: %v", tc.pattern, err) + continue + } + actual := re.ReplaceAllString(tc.input, tc.replacement) + if actual != tc.output { + t.Errorf("%q.ReplaceAllString(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + // now try bytes + actual = string(re.ReplaceAll([]byte(tc.input), []byte(tc.replacement))) + if actual != tc.output { + t.Errorf("%q.ReplaceAll(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + } +} + +func TestReplaceAllLiteral(t *testing.T) { + // Run ReplaceAll tests that do not have $ expansions. + for _, tc := range replaceTests { + if strings.Contains(tc.replacement, "$") { + continue + } + re, err := Compile(tc.pattern) + if err != nil { + t.Errorf("Unexpected error compiling %q: %v", tc.pattern, err) + continue + } + actual := re.ReplaceAllLiteralString(tc.input, tc.replacement) + if actual != tc.output { + t.Errorf("%q.ReplaceAllLiteralString(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + // now try bytes + actual = string(re.ReplaceAllLiteral([]byte(tc.input), []byte(tc.replacement))) + if actual != tc.output { + t.Errorf("%q.ReplaceAllLiteral(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + } + + // Run literal-specific tests. + for _, tc := range replaceLiteralTests { + re, err := Compile(tc.pattern) + if err != nil { + t.Errorf("Unexpected error compiling %q: %v", tc.pattern, err) + continue + } + actual := re.ReplaceAllLiteralString(tc.input, tc.replacement) + if actual != tc.output { + t.Errorf("%q.ReplaceAllLiteralString(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + // now try bytes + actual = string(re.ReplaceAllLiteral([]byte(tc.input), []byte(tc.replacement))) + if actual != tc.output { + t.Errorf("%q.ReplaceAllLiteral(%q,%q) = %q; want %q", + tc.pattern, tc.input, tc.replacement, actual, tc.output) + } + } +} + +func TestReplaceAllFunc(t *testing.T) { + for _, tc := range replaceFuncTests { + re, err := Compile(tc.pattern) + if err != nil { + t.Errorf("Unexpected error compiling %q: %v", tc.pattern, err) + continue + } + actual := re.ReplaceAllStringFunc(tc.input, tc.replacement) + if actual != tc.output { + t.Errorf("%q.ReplaceFunc(%q,fn) = %q; want %q", + tc.pattern, tc.input, actual, tc.output) + } + // now try bytes + actual = string(re.ReplaceAllFunc([]byte(tc.input), func(s []byte) []byte { return []byte(tc.replacement(string(s))) })) + if actual != tc.output { + t.Errorf("%q.ReplaceFunc(%q,fn) = %q; want %q", + tc.pattern, tc.input, actual, tc.output) + } + } +} + +type MetaTest struct { + pattern, output, literal string + isLiteral bool +} + +var metaTests = []MetaTest{ + {``, ``, ``, true}, + {`foo`, `foo`, `foo`, true}, + {`日本語+`, `日本語\+`, `日本語`, false}, + {`foo\.\$`, `foo\\\.\\\$`, `foo.$`, true}, // has meta but no operator + {`foo.\$`, `foo\.\\\$`, `foo`, false}, // has escaped operators and real operators + {`!@#$%^&*()_+-=[{]}\|,<.>/?~`, `!@#\$%\^&\*\(\)_\+-=\[\{\]\}\\\|,<\.>/\?~`, `!@#`, false}, +} + +var literalPrefixTests = []MetaTest{ + // See golang.org/issue/11175. + // output is unused. + {`^0^0$`, ``, `0`, false}, + {`^0^`, ``, ``, false}, + {`^0$`, ``, `0`, true}, + {`$0^`, ``, ``, false}, + {`$0$`, ``, ``, false}, + {`^^0$$`, ``, ``, false}, + {`^$^$`, ``, ``, false}, + {`$$0^^`, ``, ``, false}, + {`a\x{fffd}b`, ``, `a`, false}, + {`\x{fffd}b`, ``, ``, false}, + {"\ufffd", ``, ``, false}, +} + +func TestQuoteMeta(t *testing.T) { + for _, tc := range metaTests { + // Verify that QuoteMeta returns the expected string. + quoted := QuoteMeta(tc.pattern) + if quoted != tc.output { + t.Errorf("QuoteMeta(`%s`) = `%s`; want `%s`", + tc.pattern, quoted, tc.output) + continue + } + + // Verify that the quoted string is in fact treated as expected + // by Compile -- i.e. that it matches the original, unquoted string. + if tc.pattern != "" { + re, err := Compile(quoted) + if err != nil { + t.Errorf("Unexpected error compiling QuoteMeta(`%s`): %v", tc.pattern, err) + continue + } + src := "abc" + tc.pattern + "def" + repl := "xyz" + replaced := re.ReplaceAllString(src, repl) + expected := "abcxyzdef" + if replaced != expected { + t.Errorf("QuoteMeta(`%s`).Replace(`%s`,`%s`) = `%s`; want `%s`", + tc.pattern, src, repl, replaced, expected) + } + } + } +} + +func TestLiteralPrefix(t *testing.T) { + for _, tc := range append(metaTests, literalPrefixTests...) { + // Literal method needs to scan the pattern. + re := MustCompile(tc.pattern) + str, complete := re.LiteralPrefix() + if complete != tc.isLiteral { + t.Errorf("LiteralPrefix(`%s`) = %t; want %t", tc.pattern, complete, tc.isLiteral) + } + if str != tc.literal { + t.Errorf("LiteralPrefix(`%s`) = `%s`; want `%s`", tc.pattern, str, tc.literal) + } + } +} + +type subexpIndex struct { + name string + index int +} + +type subexpCase struct { + input string + num int + names []string + indices []subexpIndex +} + +var emptySubexpIndices = []subexpIndex{{"", -1}, {"missing", -1}} + +var subexpCases = []subexpCase{ + {``, 0, nil, emptySubexpIndices}, + {`.*`, 0, nil, emptySubexpIndices}, + {`abba`, 0, nil, emptySubexpIndices}, + {`ab(b)a`, 1, []string{"", ""}, emptySubexpIndices}, + {`ab(.*)a`, 1, []string{"", ""}, emptySubexpIndices}, + {`(.*)ab(.*)a`, 2, []string{"", "", ""}, emptySubexpIndices}, + {`(.*)(ab)(.*)a`, 3, []string{"", "", "", ""}, emptySubexpIndices}, + {`(.*)((a)b)(.*)a`, 4, []string{"", "", "", "", ""}, emptySubexpIndices}, + {`(.*)(\(ab)(.*)a`, 3, []string{"", "", "", ""}, emptySubexpIndices}, + {`(.*)(\(a\)b)(.*)a`, 3, []string{"", "", "", ""}, emptySubexpIndices}, + {`(?P.*)(?P(a)b)(?P.*)a`, 4, []string{"", "foo", "bar", "", "foo"}, []subexpIndex{{"", -1}, {"missing", -1}, {"foo", 1}, {"bar", 2}}}, +} + +func TestSubexp(t *testing.T) { + for _, c := range subexpCases { + re := MustCompile(c.input) + n := re.NumSubexp() + if n != c.num { + t.Errorf("%q: NumSubexp = %d, want %d", c.input, n, c.num) + continue + } + names := re.SubexpNames() + if len(names) != 1+n { + t.Errorf("%q: len(SubexpNames) = %d, want %d", c.input, len(names), n) + continue + } + if c.names != nil { + for i := 0; i < 1+n; i++ { + if names[i] != c.names[i] { + t.Errorf("%q: SubexpNames[%d] = %q, want %q", c.input, i, names[i], c.names[i]) + } + } + } + for _, subexp := range c.indices { + index := re.SubexpIndex(subexp.name) + if index != subexp.index { + t.Errorf("%q: SubexpIndex(%q) = %d, want %d", c.input, subexp.name, index, subexp.index) + } + } + } +} + +var splitTests = []struct { + s string + r string + n int + out []string +}{ + {"foo:and:bar", ":", -1, []string{"foo", "and", "bar"}}, + {"foo:and:bar", ":", 1, []string{"foo:and:bar"}}, + {"foo:and:bar", ":", 2, []string{"foo", "and:bar"}}, + {"foo:and:bar", "foo", -1, []string{"", ":and:bar"}}, + {"foo:and:bar", "bar", -1, []string{"foo:and:", ""}}, + {"foo:and:bar", "baz", -1, []string{"foo:and:bar"}}, + {"baabaab", "a", -1, []string{"b", "", "b", "", "b"}}, + {"baabaab", "a*", -1, []string{"b", "b", "b"}}, + {"baabaab", "ba*", -1, []string{"", "", "", ""}}, + {"foobar", "f*b*", -1, []string{"", "o", "o", "a", "r"}}, + {"foobar", "f+.*b+", -1, []string{"", "ar"}}, + {"foobooboar", "o{2}", -1, []string{"f", "b", "boar"}}, + {"a,b,c,d,e,f", ",", 3, []string{"a", "b", "c,d,e,f"}}, + {"a,b,c,d,e,f", ",", 0, nil}, + {",", ",", -1, []string{"", ""}}, + {",,,", ",", -1, []string{"", "", "", ""}}, + {"", ",", -1, []string{""}}, + {"", ".*", -1, []string{""}}, + {"", ".+", -1, []string{""}}, + {"", "", -1, []string{}}, + {"foobar", "", -1, []string{"f", "o", "o", "b", "a", "r"}}, + {"abaabaccadaaae", "a*", 5, []string{"", "b", "b", "c", "cadaaae"}}, + {":x:y:z:", ":", -1, []string{"", "x", "y", "z", ""}}, +} + +func TestSplit(t *testing.T) { + for i, test := range splitTests { + re, err := Compile(test.r) + if err != nil { + t.Errorf("#%d: %q: compile error: %s", i, test.r, err.Error()) + continue + } + + split := re.Split(test.s, test.n) + if !reflect.DeepEqual(split, test.out) { + t.Errorf("#%d: %q: got %q; want %q", i, test.r, split, test.out) + } + + if QuoteMeta(test.r) == test.r { + strsplit := strings.SplitN(test.s, test.r, test.n) + if !reflect.DeepEqual(split, strsplit) { + t.Errorf("#%d: Split(%q, %q, %d): regexp vs strings mismatch\nregexp=%q\nstrings=%q", i, test.s, test.r, test.n, split, strsplit) + } + } + } +} + +// The following sequence of Match calls used to panic. See issue #12980. +func TestParseAndCompile(t *testing.T) { + expr := "a$" + s := "a\nb" + + for i, tc := range []struct { + reFlags syntax.Flags + expMatch bool + }{ + {syntax.Perl | syntax.OneLine, false}, + {syntax.Perl &^ syntax.OneLine, true}, + } { + parsed, err := syntax.Parse(expr, tc.reFlags) + if err != nil { + t.Fatalf("%d: parse: %v", i, err) + } + re, err := Compile(parsed.String()) + if err != nil { + t.Fatalf("%d: compile: %v", i, err) + } + if match := re.MatchString(s); match != tc.expMatch { + t.Errorf("%d: %q.MatchString(%q)=%t; expected=%t", i, re, s, match, tc.expMatch) + } + } +} + +// Check that one-pass cutoff does trigger. +func TestOnePassCutoff(t *testing.T) { + re, err := syntax.Parse(`^x{1,1000}y{1,1000}$`, syntax.Perl) + if err != nil { + t.Fatalf("parse: %v", err) + } + p, err := syntax.Compile(re.Simplify()) + if err != nil { + t.Fatalf("compile: %v", err) + } + if compileOnePass(p) != nil { + t.Fatalf("makeOnePass succeeded; wanted nil") + } +} + +// Check that the same machine can be used with the standard matcher +// and then the backtracker when there are no captures. +func TestSwitchBacktrack(t *testing.T) { + re := MustCompile(`a|b`) + long := make([]byte, maxBacktrackVector+1) + + // The following sequence of Match calls used to panic. See issue #10319. + re.Match(long) // triggers standard matcher + re.Match(long[:1]) // triggers backtracker +} + +func BenchmarkFind(b *testing.B) { + b.StopTimer() + re := MustCompile("a+b+") + wantSubs := "aaabb" + s := []byte("acbb" + wantSubs + "dd") + b.StartTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + subs := re.Find(s) + if string(subs) != wantSubs { + b.Fatalf("Find(%q) = %q; want %q", s, subs, wantSubs) + } + } +} + +func BenchmarkFindAllNoMatches(b *testing.B) { + re := MustCompile("a+b+") + s := []byte("acddee") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + all := re.FindAll(s, -1) + if all != nil { + b.Fatalf("FindAll(%q) = %q; want nil", s, all) + } + } +} + +func BenchmarkFindString(b *testing.B) { + b.StopTimer() + re := MustCompile("a+b+") + wantSubs := "aaabb" + s := "acbb" + wantSubs + "dd" + b.StartTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + subs := re.FindString(s) + if subs != wantSubs { + b.Fatalf("FindString(%q) = %q; want %q", s, subs, wantSubs) + } + } +} + +func BenchmarkFindSubmatch(b *testing.B) { + b.StopTimer() + re := MustCompile("a(a+b+)b") + wantSubs := "aaabb" + s := []byte("acbb" + wantSubs + "dd") + b.StartTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + subs := re.FindSubmatch(s) + if string(subs[0]) != wantSubs { + b.Fatalf("FindSubmatch(%q)[0] = %q; want %q", s, subs[0], wantSubs) + } + if string(subs[1]) != "aab" { + b.Fatalf("FindSubmatch(%q)[1] = %q; want %q", s, subs[1], "aab") + } + } +} + +func BenchmarkFindStringSubmatch(b *testing.B) { + b.StopTimer() + re := MustCompile("a(a+b+)b") + wantSubs := "aaabb" + s := "acbb" + wantSubs + "dd" + b.StartTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + subs := re.FindStringSubmatch(s) + if subs[0] != wantSubs { + b.Fatalf("FindStringSubmatch(%q)[0] = %q; want %q", s, subs[0], wantSubs) + } + if subs[1] != "aab" { + b.Fatalf("FindStringSubmatch(%q)[1] = %q; want %q", s, subs[1], "aab") + } + } +} + +func BenchmarkLiteral(b *testing.B) { + x := strings.Repeat("x", 50) + "y" + b.StopTimer() + re := MustCompile("y") + b.StartTimer() + for i := 0; i < b.N; i++ { + if !re.MatchString(x) { + b.Fatalf("no match!") + } + } +} + +func BenchmarkNotLiteral(b *testing.B) { + x := strings.Repeat("x", 50) + "y" + b.StopTimer() + re := MustCompile(".y") + b.StartTimer() + for i := 0; i < b.N; i++ { + if !re.MatchString(x) { + b.Fatalf("no match!") + } + } +} + +func BenchmarkMatchClass(b *testing.B) { + b.StopTimer() + x := strings.Repeat("xxxx", 20) + "w" + re := MustCompile("[abcdw]") + b.StartTimer() + for i := 0; i < b.N; i++ { + if !re.MatchString(x) { + b.Fatalf("no match!") + } + } +} + +func BenchmarkMatchClass_InRange(b *testing.B) { + b.StopTimer() + // 'b' is between 'a' and 'c', so the charclass + // range checking is no help here. + x := strings.Repeat("bbbb", 20) + "c" + re := MustCompile("[ac]") + b.StartTimer() + for i := 0; i < b.N; i++ { + if !re.MatchString(x) { + b.Fatalf("no match!") + } + } +} + +func BenchmarkReplaceAll(b *testing.B) { + x := "abcdefghijklmnopqrstuvwxyz" + b.StopTimer() + re := MustCompile("[cjrw]") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.ReplaceAllString(x, "") + } +} + +func BenchmarkAnchoredLiteralShortNonMatch(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + re := MustCompile("^zbc(d|e)") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkAnchoredLiteralLongNonMatch(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + for i := 0; i < 15; i++ { + x = append(x, x...) + } + re := MustCompile("^zbc(d|e)") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkAnchoredShortMatch(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + re := MustCompile("^.bc(d|e)") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkAnchoredLongMatch(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + for i := 0; i < 15; i++ { + x = append(x, x...) + } + re := MustCompile("^.bc(d|e)") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkOnePassShortA(b *testing.B) { + b.StopTimer() + x := []byte("abcddddddeeeededd") + re := MustCompile("^.bc(d|e)*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkNotOnePassShortA(b *testing.B) { + b.StopTimer() + x := []byte("abcddddddeeeededd") + re := MustCompile(".bc(d|e)*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkOnePassShortB(b *testing.B) { + b.StopTimer() + x := []byte("abcddddddeeeededd") + re := MustCompile("^.bc(?:d|e)*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkNotOnePassShortB(b *testing.B) { + b.StopTimer() + x := []byte("abcddddddeeeededd") + re := MustCompile(".bc(?:d|e)*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkOnePassLongPrefix(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + re := MustCompile("^abcdefghijklmnopqrstuvwxyz.*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkOnePassLongNotPrefix(b *testing.B) { + b.StopTimer() + x := []byte("abcdefghijklmnopqrstuvwxyz") + re := MustCompile("^.bcdefghijklmnopqrstuvwxyz.*$") + b.StartTimer() + for i := 0; i < b.N; i++ { + re.Match(x) + } +} + +func BenchmarkMatchParallelShared(b *testing.B) { + x := []byte("this is a long line that contains foo bar baz") + re := MustCompile("foo (ba+r)? baz") + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + re.Match(x) + } + }) +} + +func BenchmarkMatchParallelCopied(b *testing.B) { + x := []byte("this is a long line that contains foo bar baz") + re := MustCompile("foo (ba+r)? baz") + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + re := re.Copy() + for pb.Next() { + re.Match(x) + } + }) +} + +var sink string + +func BenchmarkQuoteMetaAll(b *testing.B) { + specials := make([]byte, 0) + for i := byte(0); i < utf8.RuneSelf; i++ { + if special(i) { + specials = append(specials, i) + } + } + s := string(specials) + b.SetBytes(int64(len(s))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sink = QuoteMeta(s) + } +} + +func BenchmarkQuoteMetaNone(b *testing.B) { + s := "abcdefghijklmnopqrstuvwxyz" + b.SetBytes(int64(len(s))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sink = QuoteMeta(s) + } +} + +var compileBenchData = []struct{ name, re string }{ + {"Onepass", `^a.[l-nA-Cg-j]?e$`}, + {"Medium", `^((a|b|[d-z0-9])*(日){4,5}.)+$`}, + {"Hard", strings.Repeat(`((abc)*|`, 50) + strings.Repeat(`)`, 50)}, +} + +func BenchmarkCompile(b *testing.B) { + for _, data := range compileBenchData { + b.Run(data.name, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if _, err := Compile(data.re); err != nil { + b.Fatal(err) + } + } + }) + } +} + +func TestDeepEqual(t *testing.T) { + re1 := MustCompile("a.*b.*c.*d") + re2 := MustCompile("a.*b.*c.*d") + if !reflect.DeepEqual(re1, re2) { // has always been true, since Go 1. + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re1.MatchString("abcdefghijklmn") + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re2.MatchString("abcdefghijklmn") + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re2.MatchString(strings.Repeat("abcdefghijklmn", 100)) + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } +} + +var minInputLenTests = []struct { + Regexp string + min int +}{ + {``, 0}, + {`a`, 1}, + {`aa`, 2}, + {`(aa)a`, 3}, + {`(?:aa)a`, 3}, + {`a?a`, 1}, + {`(aaa)|(aa)`, 2}, + {`(aa)+a`, 3}, + {`(aa)*a`, 1}, + {`(aa){3,5}`, 6}, + {`[a-z]`, 1}, + {`日`, 3}, +} + +func TestMinInputLen(t *testing.T) { + for _, tt := range minInputLenTests { + re, _ := syntax.Parse(tt.Regexp, syntax.Perl) + m := minInputLen(re) + if m != tt.min { + t.Errorf("regexp %#q has minInputLen %d, should be %d", tt.Regexp, m, tt.min) + } + } +} + +func TestUnmarshalText(t *testing.T) { + unmarshaled := new(Regexp) + for i := range goodRe { + re := compileTest(t, goodRe[i], "") + marshaled, err := re.MarshalText() + if err != nil { + t.Errorf("regexp %#q failed to marshal: %s", re, err) + continue + } + if err := unmarshaled.UnmarshalText(marshaled); err != nil { + t.Errorf("regexp %#q failed to unmarshal: %s", re, err) + continue + } + if unmarshaled.String() != goodRe[i] { + t.Errorf("UnmarshalText returned unexpected value: %s", unmarshaled.String()) + } + } + t.Run("invalid pattern", func(t *testing.T) { + re := new(Regexp) + err := re.UnmarshalText([]byte(`\`)) + if err == nil { + t.Error("unexpected success") + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/regexp/backtrack.go b/platform/dbops/binaries/go/go/src/regexp/backtrack.go new file mode 100644 index 0000000000000000000000000000000000000000..7c37c66a80c787e0ed1ea4ccc886eecb9df05d2d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/backtrack.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// backtrack is a regular expression search with submatch +// tracking for small regular expressions and texts. It allocates +// a bit vector with (length of input) * (length of prog) bits, +// to make sure it never explores the same (character position, instruction) +// state multiple times. This limits the search to run in time linear in +// the length of the test. +// +// backtrack is a fast replacement for the NFA code on small +// regexps when onepass cannot be used. + +package regexp + +import ( + "regexp/syntax" + "sync" +) + +// A job is an entry on the backtracker's job stack. It holds +// the instruction pc and the position in the input. +type job struct { + pc uint32 + arg bool + pos int +} + +const ( + visitedBits = 32 + maxBacktrackProg = 500 // len(prog.Inst) <= max + maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits) +) + +// bitState holds state for the backtracker. +type bitState struct { + end int + cap []int + matchcap []int + jobs []job + visited []uint32 + + inputs inputs +} + +var bitStatePool sync.Pool + +func newBitState() *bitState { + b, ok := bitStatePool.Get().(*bitState) + if !ok { + b = new(bitState) + } + return b +} + +func freeBitState(b *bitState) { + b.inputs.clear() + bitStatePool.Put(b) +} + +// maxBitStateLen returns the maximum length of a string to search with +// the backtracker using prog. +func maxBitStateLen(prog *syntax.Prog) int { + if !shouldBacktrack(prog) { + return 0 + } + return maxBacktrackVector / len(prog.Inst) +} + +// shouldBacktrack reports whether the program is too +// long for the backtracker to run. +func shouldBacktrack(prog *syntax.Prog) bool { + return len(prog.Inst) <= maxBacktrackProg +} + +// reset resets the state of the backtracker. +// end is the end position in the input. +// ncap is the number of captures. +func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { + b.end = end + + if cap(b.jobs) == 0 { + b.jobs = make([]job, 0, 256) + } else { + b.jobs = b.jobs[:0] + } + + visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits + if cap(b.visited) < visitedSize { + b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) + } else { + b.visited = b.visited[:visitedSize] + clear(b.visited) // set to 0 + } + + if cap(b.cap) < ncap { + b.cap = make([]int, ncap) + } else { + b.cap = b.cap[:ncap] + } + for i := range b.cap { + b.cap[i] = -1 + } + + if cap(b.matchcap) < ncap { + b.matchcap = make([]int, ncap) + } else { + b.matchcap = b.matchcap[:ncap] + } + for i := range b.matchcap { + b.matchcap[i] = -1 + } +} + +// shouldVisit reports whether the combination of (pc, pos) has not +// been visited yet. +func (b *bitState) shouldVisit(pc uint32, pos int) bool { + n := uint(int(pc)*(b.end+1) + pos) + if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 { + return false + } + b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1)) + return true +} + +// push pushes (pc, pos, arg) onto the job stack if it should be +// visited. +func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) { + // Only check shouldVisit when arg is false. + // When arg is true, we are continuing a previous visit. + if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { + b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos}) + } +} + +// tryBacktrack runs a backtracking search starting at pos. +func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { + longest := re.longest + + b.push(re, pc, pos, false) + for len(b.jobs) > 0 { + l := len(b.jobs) - 1 + // Pop job off the stack. + pc := b.jobs[l].pc + pos := b.jobs[l].pos + arg := b.jobs[l].arg + b.jobs = b.jobs[:l] + + // Optimization: rather than push and pop, + // code that is going to Push and continue + // the loop simply updates ip, p, and arg + // and jumps to CheckAndLoop. We have to + // do the ShouldVisit check that Push + // would have, but we avoid the stack + // manipulation. + goto Skip + CheckAndLoop: + if !b.shouldVisit(pc, pos) { + continue + } + Skip: + + inst := &re.prog.Inst[pc] + + switch inst.Op { + default: + panic("bad inst") + case syntax.InstFail: + panic("unexpected InstFail") + case syntax.InstAlt: + // Cannot just + // b.push(inst.Out, pos, false) + // b.push(inst.Arg, pos, false) + // If during the processing of inst.Out, we encounter + // inst.Arg via another path, we want to process it then. + // Pushing it here will inhibit that. Instead, re-push + // inst with arg==true as a reminder to push inst.Arg out + // later. + if arg { + // Finished inst.Out; try inst.Arg. + arg = false + pc = inst.Arg + goto CheckAndLoop + } else { + b.push(re, pc, pos, true) + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstAltMatch: + // One opcode consumes runes; the other leads to match. + switch re.prog.Inst[inst.Out].Op { + case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + // inst.Arg is the match. + b.push(re, inst.Arg, pos, false) + pc = inst.Arg + pos = b.end + goto CheckAndLoop + } + // inst.Out is the match - non-greedy + b.push(re, inst.Out, b.end, false) + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune: + r, width := i.step(pos) + if !inst.MatchRune(r) { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune1: + r, width := i.step(pos) + if r != inst.Rune[0] { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAnyNotNL: + r, width := i.step(pos) + if r == '\n' || r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAny: + r, width := i.step(pos) + if r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstCapture: + if arg { + // Finished inst.Out; restore the old value. + b.cap[inst.Arg] = pos + continue + } else { + if inst.Arg < uint32(len(b.cap)) { + // Capture pos to register, but save old value. + b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done. + b.cap[inst.Arg] = pos + } + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstEmptyWidth: + flag := i.context(pos) + if !flag.match(syntax.EmptyOp(inst.Arg)) { + continue + } + pc = inst.Out + goto CheckAndLoop + + case syntax.InstNop: + pc = inst.Out + goto CheckAndLoop + + case syntax.InstMatch: + // We found a match. If the caller doesn't care + // where the match is, no point going further. + if len(b.cap) == 0 { + return true + } + + // Record best match so far. + // Only need to check end point, because this entire + // call is only considering one start position. + if len(b.cap) > 1 { + b.cap[1] = pos + } + if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) { + copy(b.matchcap, b.cap) + } + + // If going for first match, we're done. + if !longest { + return true + } + + // If we used the entire text, no longer match is possible. + if pos == b.end { + return true + } + + // Otherwise, continue on in hope of a longer match. + continue + } + } + + return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0 +} + +// backtrack runs a backtracking search of prog on the input starting at pos. +func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + return nil + } + + b := newBitState() + i, end := b.inputs.init(nil, ib, is) + b.reset(re.prog, end, ncap) + + // Anchored search must start at the beginning of the input + if startCond&syntax.EmptyBeginText != 0 { + if len(b.cap) > 0 { + b.cap[0] = pos + } + if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + freeBitState(b) + return nil + } + } else { + + // Unanchored search, starting from each possible text position. + // Notice that we have to try the empty string at the end of + // the text, so the loop condition is pos <= end, not pos < end. + // This looks like it's quadratic in the size of the text, + // but we are not clearing visited between calls to TrySearch, + // so no work is duplicated and it ends up still being linear. + width := -1 + for ; pos <= end && width != 0; pos += width { + if len(re.prefix) > 0 { + // Match requires literal prefix; fast search for it. + advance := i.index(re, pos) + if advance < 0 { + freeBitState(b) + return nil + } + pos += advance + } + + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) + } + freeBitState(b) + return nil + } + +Match: + dstCap = append(dstCap, b.matchcap...) + freeBitState(b) + return dstCap +} diff --git a/platform/dbops/binaries/go/go/src/regexp/example_test.go b/platform/dbops/binaries/go/go/src/regexp/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..707445f9ff02f70b463be4b359a08395f6c1a6bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/example_test.go @@ -0,0 +1,447 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp_test + +import ( + "fmt" + "regexp" + "strings" +) + +func Example() { + // Compile the expression once, usually at init time. + // Use raw strings to avoid having to quote the backslashes. + var validID = regexp.MustCompile(`^[a-z]+\[[0-9]+\]$`) + + fmt.Println(validID.MatchString("adam[23]")) + fmt.Println(validID.MatchString("eve[7]")) + fmt.Println(validID.MatchString("Job[48]")) + fmt.Println(validID.MatchString("snakey")) + // Output: + // true + // true + // false + // false +} + +func ExampleMatch() { + matched, err := regexp.Match(`foo.*`, []byte(`seafood`)) + fmt.Println(matched, err) + matched, err = regexp.Match(`bar.*`, []byte(`seafood`)) + fmt.Println(matched, err) + matched, err = regexp.Match(`a(b`, []byte(`seafood`)) + fmt.Println(matched, err) + + // Output: + // true + // false + // false error parsing regexp: missing closing ): `a(b` +} + +func ExampleMatchString() { + matched, err := regexp.MatchString(`foo.*`, "seafood") + fmt.Println(matched, err) + matched, err = regexp.MatchString(`bar.*`, "seafood") + fmt.Println(matched, err) + matched, err = regexp.MatchString(`a(b`, "seafood") + fmt.Println(matched, err) + // Output: + // true + // false + // false error parsing regexp: missing closing ): `a(b` +} + +func ExampleQuoteMeta() { + fmt.Println(regexp.QuoteMeta(`Escaping symbols like: .+*?()|[]{}^$`)) + // Output: + // Escaping symbols like: \.\+\*\?\(\)\|\[\]\{\}\^\$ +} + +func ExampleRegexp_Find() { + re := regexp.MustCompile(`foo.?`) + fmt.Printf("%q\n", re.Find([]byte(`seafood fool`))) + + // Output: + // "food" +} + +func ExampleRegexp_FindAll() { + re := regexp.MustCompile(`foo.?`) + fmt.Printf("%q\n", re.FindAll([]byte(`seafood fool`), -1)) + + // Output: + // ["food" "fool"] +} + +func ExampleRegexp_FindAllSubmatch() { + re := regexp.MustCompile(`foo(.?)`) + fmt.Printf("%q\n", re.FindAllSubmatch([]byte(`seafood fool`), -1)) + + // Output: + // [["food" "d"] ["fool" "l"]] +} + +func ExampleRegexp_FindSubmatch() { + re := regexp.MustCompile(`foo(.?)`) + fmt.Printf("%q\n", re.FindSubmatch([]byte(`seafood fool`))) + + // Output: + // ["food" "d"] +} + +func ExampleRegexp_Match() { + re := regexp.MustCompile(`foo.?`) + fmt.Println(re.Match([]byte(`seafood fool`))) + fmt.Println(re.Match([]byte(`something else`))) + + // Output: + // true + // false +} + +func ExampleRegexp_FindString() { + re := regexp.MustCompile(`foo.?`) + fmt.Printf("%q\n", re.FindString("seafood fool")) + fmt.Printf("%q\n", re.FindString("meat")) + // Output: + // "food" + // "" +} + +func ExampleRegexp_FindStringIndex() { + re := regexp.MustCompile(`ab?`) + fmt.Println(re.FindStringIndex("tablett")) + fmt.Println(re.FindStringIndex("foo") == nil) + // Output: + // [1 3] + // true +} + +func ExampleRegexp_FindStringSubmatch() { + re := regexp.MustCompile(`a(x*)b(y|z)c`) + fmt.Printf("%q\n", re.FindStringSubmatch("-axxxbyc-")) + fmt.Printf("%q\n", re.FindStringSubmatch("-abzc-")) + // Output: + // ["axxxbyc" "xxx" "y"] + // ["abzc" "" "z"] +} + +func ExampleRegexp_FindAllString() { + re := regexp.MustCompile(`a.`) + fmt.Println(re.FindAllString("paranormal", -1)) + fmt.Println(re.FindAllString("paranormal", 2)) + fmt.Println(re.FindAllString("graal", -1)) + fmt.Println(re.FindAllString("none", -1)) + // Output: + // [ar an al] + // [ar an] + // [aa] + // [] +} + +func ExampleRegexp_FindAllStringSubmatch() { + re := regexp.MustCompile(`a(x*)b`) + fmt.Printf("%q\n", re.FindAllStringSubmatch("-ab-", -1)) + fmt.Printf("%q\n", re.FindAllStringSubmatch("-axxb-", -1)) + fmt.Printf("%q\n", re.FindAllStringSubmatch("-ab-axb-", -1)) + fmt.Printf("%q\n", re.FindAllStringSubmatch("-axxb-ab-", -1)) + // Output: + // [["ab" ""]] + // [["axxb" "xx"]] + // [["ab" ""] ["axb" "x"]] + // [["axxb" "xx"] ["ab" ""]] +} + +func ExampleRegexp_FindAllStringSubmatchIndex() { + re := regexp.MustCompile(`a(x*)b`) + // Indices: + // 01234567 012345678 + // -ab-axb- -axxb-ab- + fmt.Println(re.FindAllStringSubmatchIndex("-ab-", -1)) + fmt.Println(re.FindAllStringSubmatchIndex("-axxb-", -1)) + fmt.Println(re.FindAllStringSubmatchIndex("-ab-axb-", -1)) + fmt.Println(re.FindAllStringSubmatchIndex("-axxb-ab-", -1)) + fmt.Println(re.FindAllStringSubmatchIndex("-foo-", -1)) + // Output: + // [[1 3 2 2]] + // [[1 5 2 4]] + // [[1 3 2 2] [4 7 5 6]] + // [[1 5 2 4] [6 8 7 7]] + // [] +} + +func ExampleRegexp_FindSubmatchIndex() { + re := regexp.MustCompile(`a(x*)b`) + // Indices: + // 01234567 012345678 + // -ab-axb- -axxb-ab- + fmt.Println(re.FindSubmatchIndex([]byte("-ab-"))) + fmt.Println(re.FindSubmatchIndex([]byte("-axxb-"))) + fmt.Println(re.FindSubmatchIndex([]byte("-ab-axb-"))) + fmt.Println(re.FindSubmatchIndex([]byte("-axxb-ab-"))) + fmt.Println(re.FindSubmatchIndex([]byte("-foo-"))) + // Output: + // [1 3 2 2] + // [1 5 2 4] + // [1 3 2 2] + // [1 5 2 4] + // [] +} + +func ExampleRegexp_Longest() { + re := regexp.MustCompile(`a(|b)`) + fmt.Println(re.FindString("ab")) + re.Longest() + fmt.Println(re.FindString("ab")) + // Output: + // a + // ab +} + +func ExampleRegexp_MatchString() { + re := regexp.MustCompile(`(gopher){2}`) + fmt.Println(re.MatchString("gopher")) + fmt.Println(re.MatchString("gophergopher")) + fmt.Println(re.MatchString("gophergophergopher")) + // Output: + // false + // true + // true +} + +func ExampleRegexp_NumSubexp() { + re0 := regexp.MustCompile(`a.`) + fmt.Printf("%d\n", re0.NumSubexp()) + + re := regexp.MustCompile(`(.*)((a)b)(.*)a`) + fmt.Println(re.NumSubexp()) + // Output: + // 0 + // 4 +} + +func ExampleRegexp_ReplaceAll() { + re := regexp.MustCompile(`a(x*)b`) + fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("T"))) + fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("$1"))) + fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("$1W"))) + fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("${1}W"))) + + re2 := regexp.MustCompile(`a(?P<1W>x*)b`) + fmt.Printf("%s\n", re2.ReplaceAll([]byte("-ab-axxb-"), []byte("$1W"))) + fmt.Printf("%s\n", re2.ReplaceAll([]byte("-ab-axxb-"), []byte("${1}W"))) + + // Output: + // -T-T- + // --xx- + // --- + // -W-xxW- + // --xx- + // -W-xxW- +} + +func ExampleRegexp_ReplaceAllLiteralString() { + re := regexp.MustCompile(`a(x*)b`) + fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "T")) + fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "$1")) + fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "${1}")) + // Output: + // -T-T- + // -$1-$1- + // -${1}-${1}- +} + +func ExampleRegexp_ReplaceAllString() { + re := regexp.MustCompile(`a(x*)b`) + fmt.Println(re.ReplaceAllString("-ab-axxb-", "T")) + fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1")) + fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1W")) + fmt.Println(re.ReplaceAllString("-ab-axxb-", "${1}W")) + + re2 := regexp.MustCompile(`a(?P<1W>x*)b`) + fmt.Printf("%s\n", re2.ReplaceAllString("-ab-axxb-", "$1W")) + fmt.Println(re.ReplaceAllString("-ab-axxb-", "${1}W")) + + // Output: + // -T-T- + // --xx- + // --- + // -W-xxW- + // --xx- + // -W-xxW- +} + +func ExampleRegexp_ReplaceAllStringFunc() { + re := regexp.MustCompile(`[^aeiou]`) + fmt.Println(re.ReplaceAllStringFunc("seafood fool", strings.ToUpper)) + // Output: + // SeaFooD FooL +} + +func ExampleRegexp_SubexpNames() { + re := regexp.MustCompile(`(?P[a-zA-Z]+) (?P[a-zA-Z]+)`) + fmt.Println(re.MatchString("Alan Turing")) + fmt.Printf("%q\n", re.SubexpNames()) + reversed := fmt.Sprintf("${%s} ${%s}", re.SubexpNames()[2], re.SubexpNames()[1]) + fmt.Println(reversed) + fmt.Println(re.ReplaceAllString("Alan Turing", reversed)) + // Output: + // true + // ["" "first" "last"] + // ${last} ${first} + // Turing Alan +} + +func ExampleRegexp_SubexpIndex() { + re := regexp.MustCompile(`(?P[a-zA-Z]+) (?P[a-zA-Z]+)`) + fmt.Println(re.MatchString("Alan Turing")) + matches := re.FindStringSubmatch("Alan Turing") + lastIndex := re.SubexpIndex("last") + fmt.Printf("last => %d\n", lastIndex) + fmt.Println(matches[lastIndex]) + // Output: + // true + // last => 2 + // Turing +} + +func ExampleRegexp_Split() { + a := regexp.MustCompile(`a`) + fmt.Println(a.Split("banana", -1)) + fmt.Println(a.Split("banana", 0)) + fmt.Println(a.Split("banana", 1)) + fmt.Println(a.Split("banana", 2)) + zp := regexp.MustCompile(`z+`) + fmt.Println(zp.Split("pizza", -1)) + fmt.Println(zp.Split("pizza", 0)) + fmt.Println(zp.Split("pizza", 1)) + fmt.Println(zp.Split("pizza", 2)) + // Output: + // [b n n ] + // [] + // [banana] + // [b nana] + // [pi a] + // [] + // [pizza] + // [pi a] +} + +func ExampleRegexp_Expand() { + content := []byte(` + # comment line + option1: value1 + option2: value2 + + # another comment line + option3: value3 +`) + + // Regex pattern captures "key: value" pair from the content. + pattern := regexp.MustCompile(`(?m)(?P\w+):\s+(?P\w+)$`) + + // Template to convert "key: value" to "key=value" by + // referencing the values captured by the regex pattern. + template := []byte("$key=$value\n") + + result := []byte{} + + // For each match of the regex in the content. + for _, submatches := range pattern.FindAllSubmatchIndex(content, -1) { + // Apply the captured submatches to the template and append the output + // to the result. + result = pattern.Expand(result, template, content, submatches) + } + fmt.Println(string(result)) + // Output: + // option1=value1 + // option2=value2 + // option3=value3 +} + +func ExampleRegexp_ExpandString() { + content := ` + # comment line + option1: value1 + option2: value2 + + # another comment line + option3: value3 +` + + // Regex pattern captures "key: value" pair from the content. + pattern := regexp.MustCompile(`(?m)(?P\w+):\s+(?P\w+)$`) + + // Template to convert "key: value" to "key=value" by + // referencing the values captured by the regex pattern. + template := "$key=$value\n" + + result := []byte{} + + // For each match of the regex in the content. + for _, submatches := range pattern.FindAllStringSubmatchIndex(content, -1) { + // Apply the captured submatches to the template and append the output + // to the result. + result = pattern.ExpandString(result, template, content, submatches) + } + fmt.Println(string(result)) + // Output: + // option1=value1 + // option2=value2 + // option3=value3 +} + +func ExampleRegexp_FindIndex() { + content := []byte(` + # comment line + option1: value1 + option2: value2 +`) + // Regex pattern captures "key: value" pair from the content. + pattern := regexp.MustCompile(`(?m)(?P\w+):\s+(?P\w+)$`) + + loc := pattern.FindIndex(content) + fmt.Println(loc) + fmt.Println(string(content[loc[0]:loc[1]])) + // Output: + // [18 33] + // option1: value1 +} + +func ExampleRegexp_FindAllSubmatchIndex() { + content := []byte(` + # comment line + option1: value1 + option2: value2 +`) + // Regex pattern captures "key: value" pair from the content. + pattern := regexp.MustCompile(`(?m)(?P\w+):\s+(?P\w+)$`) + allIndexes := pattern.FindAllSubmatchIndex(content, -1) + for _, loc := range allIndexes { + fmt.Println(loc) + fmt.Println(string(content[loc[0]:loc[1]])) + fmt.Println(string(content[loc[2]:loc[3]])) + fmt.Println(string(content[loc[4]:loc[5]])) + } + // Output: + // [18 33 18 25 27 33] + // option1: value1 + // option1 + // value1 + // [35 50 35 42 44 50] + // option2: value2 + // option2 + // value2 +} + +func ExampleRegexp_FindAllIndex() { + content := []byte("London") + re := regexp.MustCompile(`o.`) + fmt.Println(re.FindAllIndex(content, 1)) + fmt.Println(re.FindAllIndex(content, -1)) + // Output: + // [[1 3]] + // [[1 3] [4 6]] +} diff --git a/platform/dbops/binaries/go/go/src/regexp/exec.go b/platform/dbops/binaries/go/go/src/regexp/exec.go new file mode 100644 index 0000000000000000000000000000000000000000..3fc4b684febd6034230df4a4c00a082961c7d0c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/exec.go @@ -0,0 +1,554 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "io" + "regexp/syntax" + "sync" +) + +// A queue is a 'sparse array' holding pending threads of execution. +// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html +type queue struct { + sparse []uint32 + dense []entry +} + +// An entry is an entry on a queue. +// It holds both the instruction pc and the actual thread. +// Some queue entries are just place holders so that the machine +// knows it has considered that pc. Such entries have t == nil. +type entry struct { + pc uint32 + t *thread +} + +// A thread is the state of a single path through the machine: +// an instruction and a corresponding capture array. +// See https://swtch.com/~rsc/regexp/regexp2.html +type thread struct { + inst *syntax.Inst + cap []int +} + +// A machine holds all the state during an NFA simulation for p. +type machine struct { + re *Regexp // corresponding Regexp + p *syntax.Prog // compiled program + q0, q1 queue // two queues for runq, nextq + pool []*thread // pool of available threads + matched bool // whether a match was found + matchcap []int // capture information for the match + + inputs inputs +} + +type inputs struct { + // cached inputs, to avoid allocation + bytes inputBytes + string inputString + reader inputReader +} + +func (i *inputs) newBytes(b []byte) input { + i.bytes.str = b + return &i.bytes +} + +func (i *inputs) newString(s string) input { + i.string.str = s + return &i.string +} + +func (i *inputs) newReader(r io.RuneReader) input { + i.reader.r = r + i.reader.atEOT = false + i.reader.pos = 0 + return &i.reader +} + +func (i *inputs) clear() { + // We need to clear 1 of these. + // Avoid the expense of clearing the others (pointer write barrier). + if i.bytes.str != nil { + i.bytes.str = nil + } else if i.reader.r != nil { + i.reader.r = nil + } else { + i.string.str = "" + } +} + +func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) { + if r != nil { + return i.newReader(r), 0 + } + if b != nil { + return i.newBytes(b), len(b) + } + return i.newString(s), len(s) +} + +func (m *machine) init(ncap int) { + for _, t := range m.pool { + t.cap = t.cap[:ncap] + } + m.matchcap = m.matchcap[:ncap] +} + +// alloc allocates a new thread with the given instruction. +// It uses the free pool if possible. +func (m *machine) alloc(i *syntax.Inst) *thread { + var t *thread + if n := len(m.pool); n > 0 { + t = m.pool[n-1] + m.pool = m.pool[:n-1] + } else { + t = new(thread) + t.cap = make([]int, len(m.matchcap), cap(m.matchcap)) + } + t.inst = i + return t +} + +// A lazyFlag is a lazily-evaluated syntax.EmptyOp, +// for checking zero-width flags like ^ $ \A \z \B \b. +// It records the pair of relevant runes and does not +// determine the implied flags until absolutely necessary +// (most of the time, that means never). +type lazyFlag uint64 + +func newLazyFlag(r1, r2 rune) lazyFlag { + return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2))) +} + +func (f lazyFlag) match(op syntax.EmptyOp) bool { + if op == 0 { + return true + } + r1 := rune(f >> 32) + if op&syntax.EmptyBeginLine != 0 { + if r1 != '\n' && r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginLine + } + if op&syntax.EmptyBeginText != 0 { + if r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginText + } + if op == 0 { + return true + } + r2 := rune(f) + if op&syntax.EmptyEndLine != 0 { + if r2 != '\n' && r2 >= 0 { + return false + } + op &^= syntax.EmptyEndLine + } + if op&syntax.EmptyEndText != 0 { + if r2 >= 0 { + return false + } + op &^= syntax.EmptyEndText + } + if op == 0 { + return true + } + if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) { + op &^= syntax.EmptyWordBoundary + } else { + op &^= syntax.EmptyNoWordBoundary + } + return op == 0 +} + +// match runs the machine over the input starting at pos. +// It reports whether a match was found. +// If so, m.matchcap holds the submatch information. +func (m *machine) match(i input, pos int) bool { + startCond := m.re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return false + } + m.matched = false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + runq, nextq := &m.q0, &m.q1 + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + for { + if len(runq.dense) == 0 { + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + break + } + if m.matched { + // Have match; finished exploring alternatives. + break + } + if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + advance := i.index(m.re, pos) + if advance < 0 { + break + } + pos += advance + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + } + } + if !m.matched { + if len(m.matchcap) > 0 { + m.matchcap[0] = pos + } + m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil) + } + flag = newLazyFlag(r, r1) + m.step(runq, nextq, pos, pos+width, r, &flag) + if width == 0 { + break + } + if len(m.matchcap) == 0 && m.matched { + // Found a match and not paying attention + // to where it is, so any match will do. + break + } + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + runq, nextq = nextq, runq + } + m.clear(nextq) + return m.matched +} + +// clear frees all threads on the thread queue. +func (m *machine) clear(q *queue) { + for _, d := range q.dense { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + q.dense = q.dense[:0] +} + +// step executes one step of the machine, running each of the threads +// on runq and appending new threads to nextq. +// The step processes the rune c (which may be endOfText), +// which starts at position pos and ends at nextPos. +// nextCond gives the setting for the empty-width flags after c. +func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) { + longest := m.re.longest + for j := 0; j < len(runq.dense); j++ { + d := &runq.dense[j] + t := d.t + if t == nil { + continue + } + if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] { + m.pool = append(m.pool, t) + continue + } + i := t.inst + add := false + switch i.Op { + default: + panic("bad inst") + + case syntax.InstMatch: + if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) { + t.cap[1] = pos + copy(m.matchcap, t.cap) + } + if !longest { + // First-match mode: cut off all lower-priority threads. + for _, d := range runq.dense[j+1:] { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + runq.dense = runq.dense[:0] + } + m.matched = true + + case syntax.InstRune: + add = i.MatchRune(c) + case syntax.InstRune1: + add = c == i.Rune[0] + case syntax.InstRuneAny: + add = true + case syntax.InstRuneAnyNotNL: + add = c != '\n' + } + if add { + t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t) + } + if t != nil { + m.pool = append(m.pool, t) + } + } + runq.dense = runq.dense[:0] +} + +// add adds an entry to q for pc, unless the q already has such an entry. +// It also recursively adds an entry for all instructions reachable from pc by following +// empty-width conditions satisfied by cond. pos gives the current position +// in the input. +func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread { +Again: + if pc == 0 { + return t + } + if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc { + return t + } + + j := len(q.dense) + q.dense = q.dense[:j+1] + d := &q.dense[j] + d.t = nil + d.pc = pc + q.sparse[pc] = uint32(j) + + i := &m.p.Inst[pc] + switch i.Op { + default: + panic("unhandled") + case syntax.InstFail: + // nothing + case syntax.InstAlt, syntax.InstAltMatch: + t = m.add(q, i.Out, pos, cap, cond, t) + pc = i.Arg + goto Again + case syntax.InstEmptyWidth: + if cond.match(syntax.EmptyOp(i.Arg)) { + pc = i.Out + goto Again + } + case syntax.InstNop: + pc = i.Out + goto Again + case syntax.InstCapture: + if int(i.Arg) < len(cap) { + opos := cap[i.Arg] + cap[i.Arg] = pos + m.add(q, i.Out, pos, cap, cond, nil) + cap[i.Arg] = opos + } else { + pc = i.Out + goto Again + } + case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + if t == nil { + t = m.alloc(i) + } else { + t.inst = i + } + if len(cap) > 0 && &t.cap[0] != &cap[0] { + copy(t.cap, cap) + } + d.t = t + t = nil + } + return t +} + +type onePassMachine struct { + inputs inputs + matchcap []int +} + +var onePassPool sync.Pool + +func newOnePassMachine() *onePassMachine { + m, ok := onePassPool.Get().(*onePassMachine) + if !ok { + m = new(onePassMachine) + } + return m +} + +func freeOnePassMachine(m *onePassMachine) { + m.inputs.clear() + onePassPool.Put(m) +} + +// doOnePass implements r.doExecute using the one-pass execution engine. +func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + + m := newOnePassMachine() + if cap(m.matchcap) < ncap { + m.matchcap = make([]int, ncap) + } else { + m.matchcap = m.matchcap[:ncap] + } + + matched := false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + pc := re.onepass.Start + inst := &re.onepass.Inst[pc] + // If there is a simple literal prefix, skip over it. + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + flag = i.context(pos) + pc = int(re.prefixEnd) + } + for { + inst = &re.onepass.Inst[pc] + pc = int(inst.Out) + switch inst.Op { + default: + panic("bad inst") + case syntax.InstMatch: + matched = true + if len(m.matchcap) > 0 { + m.matchcap[0] = 0 + m.matchcap[1] = pos + } + goto Return + case syntax.InstRune: + if !inst.MatchRune(r) { + goto Return + } + case syntax.InstRune1: + if r != inst.Rune[0] { + goto Return + } + case syntax.InstRuneAny: + // Nothing + case syntax.InstRuneAnyNotNL: + if r == '\n' { + goto Return + } + // peek at the input rune to see which branch of the Alt to take + case syntax.InstAlt, syntax.InstAltMatch: + pc = int(onePassNext(inst, r)) + continue + case syntax.InstFail: + goto Return + case syntax.InstNop: + continue + case syntax.InstEmptyWidth: + if !flag.match(syntax.EmptyOp(inst.Arg)) { + goto Return + } + continue + case syntax.InstCapture: + if int(inst.Arg) < len(m.matchcap) { + m.matchcap[inst.Arg] = pos + } + continue + } + if width == 0 { + break + } + flag = newLazyFlag(r, r1) + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + } + +Return: + if !matched { + freeOnePassMachine(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + freeOnePassMachine(m) + return dstCap +} + +// doMatch reports whether either r, b or s match the regexp. +func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool { + return re.doExecute(r, b, s, 0, 0, nil) != nil +} + +// doExecute finds the leftmost match in the input, appends the position +// of its subexpressions to dstCap and returns dstCap. +// +// nil is returned if no matches are found and non-nil if matches are found. +func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int { + if dstCap == nil { + // Make sure 'return dstCap' is non-nil. + dstCap = arrayNoInts[:0:0] + } + + if r == nil && len(b)+len(s) < re.minInputLen { + return nil + } + + if re.onepass != nil { + return re.doOnePass(r, b, s, pos, ncap, dstCap) + } + if r == nil && len(b)+len(s) < re.maxBitStateLen { + return re.backtrack(b, s, pos, ncap, dstCap) + } + + m := re.get() + i, _ := m.inputs.init(r, b, s) + + m.init(ncap) + if !m.match(i, pos) { + re.put(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + re.put(m) + return dstCap +} + +// arrayNoInts is returned by doExecute match if nil dstCap is passed +// to it with ncap=0. +var arrayNoInts [0]int diff --git a/platform/dbops/binaries/go/go/src/regexp/exec2_test.go b/platform/dbops/binaries/go/go/src/regexp/exec2_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b6dac4a058f0cecdba4bca8f35fbd433c4c6a334 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/exec2_test.go @@ -0,0 +1,20 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package regexp + +import ( + "testing" +) + +// This test is excluded when running under the race detector because +// it is a very expensive test and takes too long. +func TestRE2Exhaustive(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestRE2Exhaustive during short test") + } + testRE2(t, "testdata/re2-exhaustive.txt.bz2") +} diff --git a/platform/dbops/binaries/go/go/src/regexp/exec_test.go b/platform/dbops/binaries/go/go/src/regexp/exec_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1694230345177cb3c23b7d7d10b1875cf23e7b96 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/exec_test.go @@ -0,0 +1,747 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "bufio" + "compress/bzip2" + "fmt" + "internal/testenv" + "io" + "os" + "path/filepath" + "regexp/syntax" + "strconv" + "strings" + "testing" + "unicode/utf8" +) + +// TestRE2 tests this package's regexp API against test cases +// considered during RE2's exhaustive tests, which run all possible +// regexps over a given set of atoms and operators, up to a given +// complexity, over all possible strings over a given alphabet, +// up to a given size. Rather than try to link with RE2, we read a +// log file containing the test cases and the expected matches. +// The log file, re2-exhaustive.txt, is generated by running 'make log' +// in the open source RE2 distribution https://github.com/google/re2/. +// +// The test file format is a sequence of stanzas like: +// +// strings +// "abc" +// "123x" +// regexps +// "[a-z]+" +// 0-3;0-3 +// -;- +// "([0-9])([0-9])([0-9])" +// -;- +// -;0-3 0-1 1-2 2-3 +// +// The stanza begins by defining a set of strings, quoted +// using Go double-quote syntax, one per line. Then the +// regexps section gives a sequence of regexps to run on +// the strings. In the block that follows a regexp, each line +// gives the semicolon-separated match results of running +// the regexp on the corresponding string. +// Each match result is either a single -, meaning no match, or a +// space-separated sequence of pairs giving the match and +// submatch indices. An unmatched subexpression formats +// its pair as a single - (not illustrated above). For now +// each regexp run produces two match results, one for a +// “full match” that restricts the regexp to matching the entire +// string or nothing, and one for a “partial match” that gives +// the leftmost first match found in the string. +// +// Lines beginning with # are comments. Lines beginning with +// a capital letter are test names printed during RE2's test suite +// and are echoed into t but otherwise ignored. +// +// At time of writing, re2-exhaustive.txt is 59 MB but compresses to 385 kB, +// so we store re2-exhaustive.txt.bz2 in the repository and decompress it on the fly. +func TestRE2Search(t *testing.T) { + testRE2(t, "testdata/re2-search.txt") +} + +func testRE2(t *testing.T, file string) { + f, err := os.Open(file) + if err != nil { + t.Fatal(err) + } + defer f.Close() + var txt io.Reader + if strings.HasSuffix(file, ".bz2") { + z := bzip2.NewReader(f) + txt = z + file = file[:len(file)-len(".bz2")] // for error messages + } else { + txt = f + } + lineno := 0 + scanner := bufio.NewScanner(txt) + var ( + str []string + input []string + inStrings bool + re *Regexp + refull *Regexp + nfail int + ncase int + ) + for lineno := 1; scanner.Scan(); lineno++ { + line := scanner.Text() + switch { + case line == "": + t.Fatalf("%s:%d: unexpected blank line", file, lineno) + case line[0] == '#': + continue + case 'A' <= line[0] && line[0] <= 'Z': + // Test name. + t.Logf("%s\n", line) + continue + case line == "strings": + str = str[:0] + inStrings = true + case line == "regexps": + inStrings = false + case line[0] == '"': + q, err := strconv.Unquote(line) + if err != nil { + // Fatal because we'll get out of sync. + t.Fatalf("%s:%d: unquote %s: %v", file, lineno, line, err) + } + if inStrings { + str = append(str, q) + continue + } + // Is a regexp. + if len(input) != 0 { + t.Fatalf("%s:%d: out of sync: have %d strings left before %#q", file, lineno, len(input), q) + } + re, err = tryCompile(q) + if err != nil { + if err.Error() == "error parsing regexp: invalid escape sequence: `\\C`" { + // We don't and likely never will support \C; keep going. + continue + } + t.Errorf("%s:%d: compile %#q: %v", file, lineno, q, err) + if nfail++; nfail >= 100 { + t.Fatalf("stopping after %d errors", nfail) + } + continue + } + full := `\A(?:` + q + `)\z` + refull, err = tryCompile(full) + if err != nil { + // Fatal because q worked, so this should always work. + t.Fatalf("%s:%d: compile full %#q: %v", file, lineno, full, err) + } + input = str + case line[0] == '-' || '0' <= line[0] && line[0] <= '9': + // A sequence of match results. + ncase++ + if re == nil { + // Failed to compile: skip results. + continue + } + if len(input) == 0 { + t.Fatalf("%s:%d: out of sync: no input remaining", file, lineno) + } + var text string + text, input = input[0], input[1:] + if !isSingleBytes(text) && strings.Contains(re.String(), `\B`) { + // RE2's \B considers every byte position, + // so it sees 'not word boundary' in the + // middle of UTF-8 sequences. This package + // only considers the positions between runes, + // so it disagrees. Skip those cases. + continue + } + res := strings.Split(line, ";") + if len(res) != len(run) { + t.Fatalf("%s:%d: have %d test results, want %d", file, lineno, len(res), len(run)) + } + for i := range res { + have, suffix := run[i](re, refull, text) + want := parseResult(t, file, lineno, res[i]) + if !same(have, want) { + t.Errorf("%s:%d: %#q%s.FindSubmatchIndex(%#q) = %v, want %v", file, lineno, re, suffix, text, have, want) + if nfail++; nfail >= 100 { + t.Fatalf("stopping after %d errors", nfail) + } + continue + } + b, suffix := match[i](re, refull, text) + if b != (want != nil) { + t.Errorf("%s:%d: %#q%s.MatchString(%#q) = %v, want %v", file, lineno, re, suffix, text, b, !b) + if nfail++; nfail >= 100 { + t.Fatalf("stopping after %d errors", nfail) + } + continue + } + } + + default: + t.Fatalf("%s:%d: out of sync: %s\n", file, lineno, line) + } + } + if err := scanner.Err(); err != nil { + t.Fatalf("%s:%d: %v", file, lineno, err) + } + if len(input) != 0 { + t.Fatalf("%s:%d: out of sync: have %d strings left at EOF", file, lineno, len(input)) + } + t.Logf("%d cases tested", ncase) +} + +var run = []func(*Regexp, *Regexp, string) ([]int, string){ + runFull, + runPartial, + runFullLongest, + runPartialLongest, +} + +func runFull(re, refull *Regexp, text string) ([]int, string) { + refull.longest = false + return refull.FindStringSubmatchIndex(text), "[full]" +} + +func runPartial(re, refull *Regexp, text string) ([]int, string) { + re.longest = false + return re.FindStringSubmatchIndex(text), "" +} + +func runFullLongest(re, refull *Regexp, text string) ([]int, string) { + refull.longest = true + return refull.FindStringSubmatchIndex(text), "[full,longest]" +} + +func runPartialLongest(re, refull *Regexp, text string) ([]int, string) { + re.longest = true + return re.FindStringSubmatchIndex(text), "[longest]" +} + +var match = []func(*Regexp, *Regexp, string) (bool, string){ + matchFull, + matchPartial, + matchFullLongest, + matchPartialLongest, +} + +func matchFull(re, refull *Regexp, text string) (bool, string) { + refull.longest = false + return refull.MatchString(text), "[full]" +} + +func matchPartial(re, refull *Regexp, text string) (bool, string) { + re.longest = false + return re.MatchString(text), "" +} + +func matchFullLongest(re, refull *Regexp, text string) (bool, string) { + refull.longest = true + return refull.MatchString(text), "[full,longest]" +} + +func matchPartialLongest(re, refull *Regexp, text string) (bool, string) { + re.longest = true + return re.MatchString(text), "[longest]" +} + +func isSingleBytes(s string) bool { + for _, c := range s { + if c >= utf8.RuneSelf { + return false + } + } + return true +} + +func tryCompile(s string) (re *Regexp, err error) { + // Protect against panic during Compile. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v", r) + } + }() + return Compile(s) +} + +func parseResult(t *testing.T, file string, lineno int, res string) []int { + // A single - indicates no match. + if res == "-" { + return nil + } + // Otherwise, a space-separated list of pairs. + n := 1 + for j := 0; j < len(res); j++ { + if res[j] == ' ' { + n++ + } + } + out := make([]int, 2*n) + i := 0 + n = 0 + for j := 0; j <= len(res); j++ { + if j == len(res) || res[j] == ' ' { + // Process a single pair. - means no submatch. + pair := res[i:j] + if pair == "-" { + out[n] = -1 + out[n+1] = -1 + } else { + loStr, hiStr, _ := strings.Cut(pair, "-") + lo, err1 := strconv.Atoi(loStr) + hi, err2 := strconv.Atoi(hiStr) + if err1 != nil || err2 != nil || lo > hi { + t.Fatalf("%s:%d: invalid pair %s", file, lineno, pair) + } + out[n] = lo + out[n+1] = hi + } + n += 2 + i = j + 1 + } + } + return out +} + +func same(x, y []int) bool { + if len(x) != len(y) { + return false + } + for i, xi := range x { + if xi != y[i] { + return false + } + } + return true +} + +// TestFowler runs this package's regexp API against the +// POSIX regular expression tests collected by Glenn Fowler +// at http://www2.research.att.com/~astopen/testregex/testregex.html. +func TestFowler(t *testing.T) { + files, err := filepath.Glob("testdata/*.dat") + if err != nil { + t.Fatal(err) + } + for _, file := range files { + t.Log(file) + testFowler(t, file) + } +} + +var notab = MustCompilePOSIX(`[^\t]+`) + +func testFowler(t *testing.T, file string) { + f, err := os.Open(file) + if err != nil { + t.Error(err) + return + } + defer f.Close() + b := bufio.NewReader(f) + lineno := 0 + lastRegexp := "" +Reading: + for { + lineno++ + line, err := b.ReadString('\n') + if err != nil { + if err != io.EOF { + t.Errorf("%s:%d: %v", file, lineno, err) + } + break Reading + } + + // http://www2.research.att.com/~astopen/man/man1/testregex.html + // + // INPUT FORMAT + // Input lines may be blank, a comment beginning with #, or a test + // specification. A specification is five fields separated by one + // or more tabs. NULL denotes the empty string and NIL denotes the + // 0 pointer. + if line[0] == '#' || line[0] == '\n' { + continue Reading + } + line = line[:len(line)-1] + field := notab.FindAllString(line, -1) + for i, f := range field { + if f == "NULL" { + field[i] = "" + } + if f == "NIL" { + t.Logf("%s:%d: skip: %s", file, lineno, line) + continue Reading + } + } + if len(field) == 0 { + continue Reading + } + + // Field 1: the regex(3) flags to apply, one character per REG_feature + // flag. The test is skipped if REG_feature is not supported by the + // implementation. If the first character is not [BEASKLP] then the + // specification is a global control line. One or more of [BEASKLP] may be + // specified; the test will be repeated for each mode. + // + // B basic BRE (grep, ed, sed) + // E REG_EXTENDED ERE (egrep) + // A REG_AUGMENTED ARE (egrep with negation) + // S REG_SHELL SRE (sh glob) + // K REG_SHELL|REG_AUGMENTED KRE (ksh glob) + // L REG_LITERAL LRE (fgrep) + // + // a REG_LEFT|REG_RIGHT implicit ^...$ + // b REG_NOTBOL lhs does not match ^ + // c REG_COMMENT ignore space and #...\n + // d REG_SHELL_DOT explicit leading . match + // e REG_NOTEOL rhs does not match $ + // f REG_MULTIPLE multiple \n separated patterns + // g FNM_LEADING_DIR testfnmatch only -- match until / + // h REG_MULTIREF multiple digit backref + // i REG_ICASE ignore case + // j REG_SPAN . matches \n + // k REG_ESCAPE \ to escape [...] delimiter + // l REG_LEFT implicit ^... + // m REG_MINIMAL minimal match + // n REG_NEWLINE explicit \n match + // o REG_ENCLOSED (|&) magic inside [@|&](...) + // p REG_SHELL_PATH explicit / match + // q REG_DELIMITED delimited pattern + // r REG_RIGHT implicit ...$ + // s REG_SHELL_ESCAPED \ not special + // t REG_MUSTDELIM all delimiters must be specified + // u standard unspecified behavior -- errors not counted + // v REG_CLASS_ESCAPE \ special inside [...] + // w REG_NOSUB no subexpression match array + // x REG_LENIENT let some errors slide + // y REG_LEFT regexec() implicit ^... + // z REG_NULL NULL subexpressions ok + // $ expand C \c escapes in fields 2 and 3 + // / field 2 is a regsubcomp() expression + // = field 3 is a regdecomp() expression + // + // Field 1 control lines: + // + // C set LC_COLLATE and LC_CTYPE to locale in field 2 + // + // ?test ... output field 5 if passed and != EXPECTED, silent otherwise + // &test ... output field 5 if current and previous passed + // |test ... output field 5 if current passed and previous failed + // ; ... output field 2 if previous failed + // {test ... skip if failed until } + // } end of skip + // + // : comment comment copied as output NOTE + // :comment:test :comment: ignored + // N[OTE] comment comment copied as output NOTE + // T[EST] comment comment + // + // number use number for nmatch (20 by default) + flag := field[0] + switch flag[0] { + case '?', '&', '|', ';', '{', '}': + // Ignore all the control operators. + // Just run everything. + flag = flag[1:] + if flag == "" { + continue Reading + } + case ':': + var ok bool + if _, flag, ok = strings.Cut(flag[1:], ":"); !ok { + t.Logf("skip: %s", line) + continue Reading + } + case 'C', 'N', 'T', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + t.Logf("skip: %s", line) + continue Reading + } + + // Can check field count now that we've handled the myriad comment formats. + if len(field) < 4 { + t.Errorf("%s:%d: too few fields: %s", file, lineno, line) + continue Reading + } + + // Expand C escapes (a.k.a. Go escapes). + if strings.Contains(flag, "$") { + f := `"` + field[1] + `"` + if field[1], err = strconv.Unquote(f); err != nil { + t.Errorf("%s:%d: cannot unquote %s", file, lineno, f) + } + f = `"` + field[2] + `"` + if field[2], err = strconv.Unquote(f); err != nil { + t.Errorf("%s:%d: cannot unquote %s", file, lineno, f) + } + } + + // Field 2: the regular expression pattern; SAME uses the pattern from + // the previous specification. + // + if field[1] == "SAME" { + field[1] = lastRegexp + } + lastRegexp = field[1] + + // Field 3: the string to match. + text := field[2] + + // Field 4: the test outcome... + ok, shouldCompile, shouldMatch, pos := parseFowlerResult(field[3]) + if !ok { + t.Errorf("%s:%d: cannot parse result %#q", file, lineno, field[3]) + continue Reading + } + + // Field 5: optional comment appended to the report. + + Testing: + // Run test once for each specified capital letter mode that we support. + for _, c := range flag { + pattern := field[1] + syn := syntax.POSIX | syntax.ClassNL + switch c { + default: + continue Testing + case 'E': + // extended regexp (what we support) + case 'L': + // literal + pattern = QuoteMeta(pattern) + } + + for _, c := range flag { + switch c { + case 'i': + syn |= syntax.FoldCase + } + } + + re, err := compile(pattern, syn, true) + if err != nil { + if shouldCompile { + t.Errorf("%s:%d: %#q did not compile", file, lineno, pattern) + } + continue Testing + } + if !shouldCompile { + t.Errorf("%s:%d: %#q should not compile", file, lineno, pattern) + continue Testing + } + match := re.MatchString(text) + if match != shouldMatch { + t.Errorf("%s:%d: %#q.Match(%#q) = %v, want %v", file, lineno, pattern, text, match, shouldMatch) + continue Testing + } + have := re.FindStringSubmatchIndex(text) + if (len(have) > 0) != match { + t.Errorf("%s:%d: %#q.Match(%#q) = %v, but %#q.FindSubmatchIndex(%#q) = %v", file, lineno, pattern, text, match, pattern, text, have) + continue Testing + } + if len(have) > len(pos) { + have = have[:len(pos)] + } + if !same(have, pos) { + t.Errorf("%s:%d: %#q.FindSubmatchIndex(%#q) = %v, want %v", file, lineno, pattern, text, have, pos) + } + } + } +} + +func parseFowlerResult(s string) (ok, compiled, matched bool, pos []int) { + // Field 4: the test outcome. This is either one of the posix error + // codes (with REG_ omitted) or the match array, a list of (m,n) + // entries with m and n being first and last+1 positions in the + // field 3 string, or NULL if REG_NOSUB is in effect and success + // is expected. BADPAT is acceptable in place of any regcomp(3) + // error code. The match[] array is initialized to (-2,-2) before + // each test. All array elements from 0 to nmatch-1 must be specified + // in the outcome. Unspecified endpoints (offset -1) are denoted by ?. + // Unset endpoints (offset -2) are denoted by X. {x}(o:n) denotes a + // matched (?{...}) expression, where x is the text enclosed by {...}, + // o is the expression ordinal counting from 1, and n is the length of + // the unmatched portion of the subject string. If x starts with a + // number then that is the return value of re_execf(), otherwise 0 is + // returned. + switch { + case s == "": + // Match with no position information. + ok = true + compiled = true + matched = true + return + case s == "NOMATCH": + // Match failure. + ok = true + compiled = true + matched = false + return + case 'A' <= s[0] && s[0] <= 'Z': + // All the other error codes are compile errors. + ok = true + compiled = false + return + } + compiled = true + + var x []int + for s != "" { + var end byte = ')' + if len(x)%2 == 0 { + if s[0] != '(' { + ok = false + return + } + s = s[1:] + end = ',' + } + i := 0 + for i < len(s) && s[i] != end { + i++ + } + if i == 0 || i == len(s) { + ok = false + return + } + var v = -1 + var err error + if s[:i] != "?" { + v, err = strconv.Atoi(s[:i]) + if err != nil { + ok = false + return + } + } + x = append(x, v) + s = s[i+1:] + } + if len(x)%2 != 0 { + ok = false + return + } + ok = true + matched = true + pos = x + return +} + +var text []byte + +func makeText(n int) []byte { + if len(text) >= n { + return text[:n] + } + text = make([]byte, n) + x := ^uint32(0) + for i := range text { + x += x + x ^= 1 + if int32(x) < 0 { + x ^= 0x88888eef + } + if x%31 == 0 { + text[i] = '\n' + } else { + text[i] = byte(x%(0x7E+1-0x20) + 0x20) + } + } + return text +} + +func BenchmarkMatch(b *testing.B) { + isRaceBuilder := strings.HasSuffix(testenv.Builder(), "-race") + + for _, data := range benchData { + r := MustCompile(data.re) + for _, size := range benchSizes { + if (isRaceBuilder || testing.Short()) && size.n > 1<<10 { + continue + } + t := makeText(size.n) + b.Run(data.name+"/"+size.name, func(b *testing.B) { + b.SetBytes(int64(size.n)) + for i := 0; i < b.N; i++ { + if r.Match(t) { + b.Fatal("match!") + } + } + }) + } + } +} + +func BenchmarkMatch_onepass_regex(b *testing.B) { + isRaceBuilder := strings.HasSuffix(testenv.Builder(), "-race") + r := MustCompile(`(?s)\A.*\z`) + if r.onepass == nil { + b.Fatalf("want onepass regex, but %q is not onepass", r) + } + for _, size := range benchSizes { + if (isRaceBuilder || testing.Short()) && size.n > 1<<10 { + continue + } + t := makeText(size.n) + b.Run(size.name, func(b *testing.B) { + b.SetBytes(int64(size.n)) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if !r.Match(t) { + b.Fatal("not match!") + } + } + }) + } +} + +var benchData = []struct{ name, re string }{ + {"Easy0", "ABCDEFGHIJKLMNOPQRSTUVWXYZ$"}, + {"Easy0i", "(?i)ABCDEFGHIJklmnopqrstuvwxyz$"}, + {"Easy1", "A[AB]B[BC]C[CD]D[DE]E[EF]F[FG]G[GH]H[HI]I[IJ]J$"}, + {"Medium", "[XYZ]ABCDEFGHIJKLMNOPQRSTUVWXYZ$"}, + {"Hard", "[ -~]*ABCDEFGHIJKLMNOPQRSTUVWXYZ$"}, + {"Hard1", "ABCD|CDEF|EFGH|GHIJ|IJKL|KLMN|MNOP|OPQR|QRST|STUV|UVWX|WXYZ"}, +} + +var benchSizes = []struct { + name string + n int +}{ + {"16", 16}, + {"32", 32}, + {"1K", 1 << 10}, + {"32K", 32 << 10}, + {"1M", 1 << 20}, + {"32M", 32 << 20}, +} + +func TestLongest(t *testing.T) { + re, err := Compile(`a(|b)`) + if err != nil { + t.Fatal(err) + } + if g, w := re.FindString("ab"), "a"; g != w { + t.Errorf("first match was %q, want %q", g, w) + } + re.Longest() + if g, w := re.FindString("ab"), "ab"; g != w { + t.Errorf("longest match was %q, want %q", g, w) + } +} + +// TestProgramTooLongForBacktrack tests that a regex which is too long +// for the backtracker still executes properly. +func TestProgramTooLongForBacktrack(t *testing.T) { + longRegex := MustCompile(`(one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|twentyone|twentytwo|twentythree|twentyfour|twentyfive|twentysix|twentyseven|twentyeight|twentynine|thirty|thirtyone|thirtytwo|thirtythree|thirtyfour|thirtyfive|thirtysix|thirtyseven|thirtyeight|thirtynine|forty|fortyone|fortytwo|fortythree|fortyfour|fortyfive|fortysix|fortyseven|fortyeight|fortynine|fifty|fiftyone|fiftytwo|fiftythree|fiftyfour|fiftyfive|fiftysix|fiftyseven|fiftyeight|fiftynine|sixty|sixtyone|sixtytwo|sixtythree|sixtyfour|sixtyfive|sixtysix|sixtyseven|sixtyeight|sixtynine|seventy|seventyone|seventytwo|seventythree|seventyfour|seventyfive|seventysix|seventyseven|seventyeight|seventynine|eighty|eightyone|eightytwo|eightythree|eightyfour|eightyfive|eightysix|eightyseven|eightyeight|eightynine|ninety|ninetyone|ninetytwo|ninetythree|ninetyfour|ninetyfive|ninetysix|ninetyseven|ninetyeight|ninetynine|onehundred)`) + if !longRegex.MatchString("two") { + t.Errorf("longRegex.MatchString(\"two\") was false, want true") + } + if longRegex.MatchString("xxx") { + t.Errorf("longRegex.MatchString(\"xxx\") was true, want false") + } +} diff --git a/platform/dbops/binaries/go/go/src/regexp/find_test.go b/platform/dbops/binaries/go/go/src/regexp/find_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2edbe9b86e61547d11c50040e8a39150378371ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/find_test.go @@ -0,0 +1,518 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "fmt" + "strings" + "testing" +) + +// For each pattern/text pair, what is the expected output of each function? +// We can derive the textual results from the indexed results, the non-submatch +// results from the submatched results, the single results from the 'all' results, +// and the byte results from the string results. Therefore the table includes +// only the FindAllStringSubmatchIndex result. +type FindTest struct { + pat string + text string + matches [][]int +} + +func (t FindTest) String() string { + return fmt.Sprintf("pat: %#q text: %#q", t.pat, t.text) +} + +var findTests = []FindTest{ + {``, ``, build(1, 0, 0)}, + {`^abcdefg`, "abcdefg", build(1, 0, 7)}, + {`a+`, "baaab", build(1, 1, 4)}, + {"abcd..", "abcdef", build(1, 0, 6)}, + {`a`, "a", build(1, 0, 1)}, + {`x`, "y", nil}, + {`b`, "abc", build(1, 1, 2)}, + {`.`, "a", build(1, 0, 1)}, + {`.*`, "abcdef", build(1, 0, 6)}, + {`^`, "abcde", build(1, 0, 0)}, + {`$`, "abcde", build(1, 5, 5)}, + {`^abcd$`, "abcd", build(1, 0, 4)}, + {`^bcd'`, "abcdef", nil}, + {`^abcd$`, "abcde", nil}, + {`a+`, "baaab", build(1, 1, 4)}, + {`a*`, "baaab", build(3, 0, 0, 1, 4, 5, 5)}, + {`[a-z]+`, "abcd", build(1, 0, 4)}, + {`[^a-z]+`, "ab1234cd", build(1, 2, 6)}, + {`[a\-\]z]+`, "az]-bcz", build(2, 0, 4, 6, 7)}, + {`[^\n]+`, "abcd\n", build(1, 0, 4)}, + {`[日本語]+`, "日本語日本語", build(1, 0, 18)}, + {`日本語+`, "日本語", build(1, 0, 9)}, + {`日本語+`, "日本語語語語", build(1, 0, 18)}, + {`()`, "", build(1, 0, 0, 0, 0)}, + {`(a)`, "a", build(1, 0, 1, 0, 1)}, + {`(.)(.)`, "日a", build(1, 0, 4, 0, 3, 3, 4)}, + {`(.*)`, "", build(1, 0, 0, 0, 0)}, + {`(.*)`, "abcd", build(1, 0, 4, 0, 4)}, + {`(..)(..)`, "abcd", build(1, 0, 4, 0, 2, 2, 4)}, + {`(([^xyz]*)(d))`, "abcd", build(1, 0, 4, 0, 4, 0, 3, 3, 4)}, + {`((a|b|c)*(d))`, "abcd", build(1, 0, 4, 0, 4, 2, 3, 3, 4)}, + {`(((a|b|c)*)(d))`, "abcd", build(1, 0, 4, 0, 4, 0, 3, 2, 3, 3, 4)}, + {`\a\f\n\r\t\v`, "\a\f\n\r\t\v", build(1, 0, 6)}, + {`[\a\f\n\r\t\v]+`, "\a\f\n\r\t\v", build(1, 0, 6)}, + + {`a*(|(b))c*`, "aacc", build(1, 0, 4, 2, 2, -1, -1)}, + {`(.*).*`, "ab", build(1, 0, 2, 0, 2)}, + {`[.]`, ".", build(1, 0, 1)}, + {`/$`, "/abc/", build(1, 4, 5)}, + {`/$`, "/abc", nil}, + + // multiple matches + {`.`, "abc", build(3, 0, 1, 1, 2, 2, 3)}, + {`(.)`, "abc", build(3, 0, 1, 0, 1, 1, 2, 1, 2, 2, 3, 2, 3)}, + {`.(.)`, "abcd", build(2, 0, 2, 1, 2, 2, 4, 3, 4)}, + {`ab*`, "abbaab", build(3, 0, 3, 3, 4, 4, 6)}, + {`a(b*)`, "abbaab", build(3, 0, 3, 1, 3, 3, 4, 4, 4, 4, 6, 5, 6)}, + + // fixed bugs + {`ab$`, "cab", build(1, 1, 3)}, + {`axxb$`, "axxcb", nil}, + {`data`, "daXY data", build(1, 5, 9)}, + {`da(.)a$`, "daXY data", build(1, 5, 9, 7, 8)}, + {`zx+`, "zzx", build(1, 1, 3)}, + {`ab$`, "abcab", build(1, 3, 5)}, + {`(aa)*$`, "a", build(1, 1, 1, -1, -1)}, + {`(?:.|(?:.a))`, "", nil}, + {`(?:A(?:A|a))`, "Aa", build(1, 0, 2)}, + {`(?:A|(?:A|a))`, "a", build(1, 0, 1)}, + {`(a){0}`, "", build(1, 0, 0, -1, -1)}, + {`(?-s)(?:(?:^).)`, "\n", nil}, + {`(?s)(?:(?:^).)`, "\n", build(1, 0, 1)}, + {`(?:(?:^).)`, "\n", nil}, + {`\b`, "x", build(2, 0, 0, 1, 1)}, + {`\b`, "xx", build(2, 0, 0, 2, 2)}, + {`\b`, "x y", build(4, 0, 0, 1, 1, 2, 2, 3, 3)}, + {`\b`, "xx yy", build(4, 0, 0, 2, 2, 3, 3, 5, 5)}, + {`\B`, "x", nil}, + {`\B`, "xx", build(1, 1, 1)}, + {`\B`, "x y", nil}, + {`\B`, "xx yy", build(2, 1, 1, 4, 4)}, + {`(|a)*`, "aa", build(3, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2)}, + + // RE2 tests + {`[^\S\s]`, "abcd", nil}, + {`[^\S[:space:]]`, "abcd", nil}, + {`[^\D\d]`, "abcd", nil}, + {`[^\D[:digit:]]`, "abcd", nil}, + {`(?i)\W`, "x", nil}, + {`(?i)\W`, "k", nil}, + {`(?i)\W`, "s", nil}, + + // can backslash-escape any punctuation + {`\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\{\|\}\~`, + `!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`, build(1, 0, 31)}, + {`[\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\{\|\}\~]+`, + `!"#$%&'()*+,-./:;<=>?@[\]^_{|}~`, build(1, 0, 31)}, + {"\\`", "`", build(1, 0, 1)}, + {"[\\`]+", "`", build(1, 0, 1)}, + + {"\ufffd", "\xff", build(1, 0, 1)}, + {"\ufffd", "hello\xffworld", build(1, 5, 6)}, + {`.*`, "hello\xffworld", build(1, 0, 11)}, + {`\x{fffd}`, "\xc2\x00", build(1, 0, 1)}, + {"[\ufffd]", "\xff", build(1, 0, 1)}, + {`[\x{fffd}]`, "\xc2\x00", build(1, 0, 1)}, + + // long set of matches (longer than startSize) + { + ".", + "qwertyuiopasdfghjklzxcvbnm1234567890", + build(36, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, + 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, + 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36), + }, +} + +// build is a helper to construct a [][]int by extracting n sequences from x. +// This represents n matches with len(x)/n submatches each. +func build(n int, x ...int) [][]int { + ret := make([][]int, n) + runLength := len(x) / n + j := 0 + for i := range ret { + ret[i] = make([]int, runLength) + copy(ret[i], x[j:]) + j += runLength + if j > len(x) { + panic("invalid build entry") + } + } + return ret +} + +// First the simple cases. + +func TestFind(t *testing.T) { + for _, test := range findTests { + re := MustCompile(test.pat) + if re.String() != test.pat { + t.Errorf("String() = `%s`; should be `%s`", re.String(), test.pat) + } + result := re.Find([]byte(test.text)) + switch { + case len(test.matches) == 0 && len(result) == 0: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + expect := test.text[test.matches[0][0]:test.matches[0][1]] + if len(result) != cap(result) { + t.Errorf("expected capacity %d got %d: %s", len(result), cap(result), test) + } + if expect != string(result) { + t.Errorf("expected %q got %q: %s", expect, result, test) + } + } + } +} + +func TestFindString(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindString(test.text) + switch { + case len(test.matches) == 0 && len(result) == 0: + // ok + case test.matches == nil && result != "": + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == "": + // Tricky because an empty result has two meanings: no match or empty match. + if test.matches[0][0] != test.matches[0][1] { + t.Errorf("expected match; got none: %s", test) + } + case test.matches != nil && result != "": + expect := test.text[test.matches[0][0]:test.matches[0][1]] + if expect != result { + t.Errorf("expected %q got %q: %s", expect, result, test) + } + } + } +} + +func testFindIndex(test *FindTest, result []int, t *testing.T) { + switch { + case len(test.matches) == 0 && len(result) == 0: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + expect := test.matches[0] + if expect[0] != result[0] || expect[1] != result[1] { + t.Errorf("expected %v got %v: %s", expect, result, test) + } + } +} + +func TestFindIndex(t *testing.T) { + for _, test := range findTests { + testFindIndex(&test, MustCompile(test.pat).FindIndex([]byte(test.text)), t) + } +} + +func TestFindStringIndex(t *testing.T) { + for _, test := range findTests { + testFindIndex(&test, MustCompile(test.pat).FindStringIndex(test.text), t) + } +} + +func TestFindReaderIndex(t *testing.T) { + for _, test := range findTests { + testFindIndex(&test, MustCompile(test.pat).FindReaderIndex(strings.NewReader(test.text)), t) + } +} + +// Now come the simple All cases. + +func TestFindAll(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindAll([]byte(test.text), -1) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Fatalf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + if len(test.matches) != len(result) { + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + continue + } + for k, e := range test.matches { + got := result[k] + if len(got) != cap(got) { + t.Errorf("match %d: expected capacity %d got %d: %s", k, len(got), cap(got), test) + } + expect := test.text[e[0]:e[1]] + if expect != string(got) { + t.Errorf("match %d: expected %q got %q: %s", k, expect, got, test) + } + } + } + } +} + +func TestFindAllString(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindAllString(test.text, -1) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + if len(test.matches) != len(result) { + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + continue + } + for k, e := range test.matches { + expect := test.text[e[0]:e[1]] + if expect != result[k] { + t.Errorf("expected %q got %q: %s", expect, result, test) + } + } + } + } +} + +func testFindAllIndex(test *FindTest, result [][]int, t *testing.T) { + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + if len(test.matches) != len(result) { + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + return + } + for k, e := range test.matches { + if e[0] != result[k][0] || e[1] != result[k][1] { + t.Errorf("match %d: expected %v got %v: %s", k, e, result[k], test) + } + } + } +} + +func TestFindAllIndex(t *testing.T) { + for _, test := range findTests { + testFindAllIndex(&test, MustCompile(test.pat).FindAllIndex([]byte(test.text), -1), t) + } +} + +func TestFindAllStringIndex(t *testing.T) { + for _, test := range findTests { + testFindAllIndex(&test, MustCompile(test.pat).FindAllStringIndex(test.text, -1), t) + } +} + +// Now come the Submatch cases. + +func testSubmatchBytes(test *FindTest, n int, submatches []int, result [][]byte, t *testing.T) { + if len(submatches) != len(result)*2 { + t.Errorf("match %d: expected %d submatches; got %d: %s", n, len(submatches)/2, len(result), test) + return + } + for k := 0; k < len(submatches); k += 2 { + if submatches[k] == -1 { + if result[k/2] != nil { + t.Errorf("match %d: expected nil got %q: %s", n, result, test) + } + continue + } + got := result[k/2] + if len(got) != cap(got) { + t.Errorf("match %d: expected capacity %d got %d: %s", n, len(got), cap(got), test) + return + } + expect := test.text[submatches[k]:submatches[k+1]] + if expect != string(got) { + t.Errorf("match %d: expected %q got %q: %s", n, expect, got, test) + return + } + } +} + +func TestFindSubmatch(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindSubmatch([]byte(test.text)) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + testSubmatchBytes(&test, 0, test.matches[0], result, t) + } + } +} + +func testSubmatchString(test *FindTest, n int, submatches []int, result []string, t *testing.T) { + if len(submatches) != len(result)*2 { + t.Errorf("match %d: expected %d submatches; got %d: %s", n, len(submatches)/2, len(result), test) + return + } + for k := 0; k < len(submatches); k += 2 { + if submatches[k] == -1 { + if result[k/2] != "" { + t.Errorf("match %d: expected nil got %q: %s", n, result, test) + } + continue + } + expect := test.text[submatches[k]:submatches[k+1]] + if expect != result[k/2] { + t.Errorf("match %d: expected %q got %q: %s", n, expect, result, test) + return + } + } +} + +func TestFindStringSubmatch(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindStringSubmatch(test.text) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + testSubmatchString(&test, 0, test.matches[0], result, t) + } + } +} + +func testSubmatchIndices(test *FindTest, n int, expect, result []int, t *testing.T) { + if len(expect) != len(result) { + t.Errorf("match %d: expected %d matches; got %d: %s", n, len(expect)/2, len(result)/2, test) + return + } + for k, e := range expect { + if e != result[k] { + t.Errorf("match %d: submatch error: expected %v got %v: %s", n, expect, result, test) + } + } +} + +func testFindSubmatchIndex(test *FindTest, result []int, t *testing.T) { + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case test.matches != nil && result != nil: + testSubmatchIndices(test, 0, test.matches[0], result, t) + } +} + +func TestFindSubmatchIndex(t *testing.T) { + for _, test := range findTests { + testFindSubmatchIndex(&test, MustCompile(test.pat).FindSubmatchIndex([]byte(test.text)), t) + } +} + +func TestFindStringSubmatchIndex(t *testing.T) { + for _, test := range findTests { + testFindSubmatchIndex(&test, MustCompile(test.pat).FindStringSubmatchIndex(test.text), t) + } +} + +func TestFindReaderSubmatchIndex(t *testing.T) { + for _, test := range findTests { + testFindSubmatchIndex(&test, MustCompile(test.pat).FindReaderSubmatchIndex(strings.NewReader(test.text)), t) + } +} + +// Now come the monster AllSubmatch cases. + +func TestFindAllSubmatch(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindAllSubmatch([]byte(test.text), -1) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case len(test.matches) != len(result): + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + case test.matches != nil && result != nil: + for k, match := range test.matches { + testSubmatchBytes(&test, k, match, result[k], t) + } + } + } +} + +func TestFindAllStringSubmatch(t *testing.T) { + for _, test := range findTests { + result := MustCompile(test.pat).FindAllStringSubmatch(test.text, -1) + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case len(test.matches) != len(result): + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + case test.matches != nil && result != nil: + for k, match := range test.matches { + testSubmatchString(&test, k, match, result[k], t) + } + } + } +} + +func testFindAllSubmatchIndex(test *FindTest, result [][]int, t *testing.T) { + switch { + case test.matches == nil && result == nil: + // ok + case test.matches == nil && result != nil: + t.Errorf("expected no match; got one: %s", test) + case test.matches != nil && result == nil: + t.Errorf("expected match; got none: %s", test) + case len(test.matches) != len(result): + t.Errorf("expected %d matches; got %d: %s", len(test.matches), len(result), test) + case test.matches != nil && result != nil: + for k, match := range test.matches { + testSubmatchIndices(test, k, match, result[k], t) + } + } +} + +func TestFindAllSubmatchIndex(t *testing.T) { + for _, test := range findTests { + testFindAllSubmatchIndex(&test, MustCompile(test.pat).FindAllSubmatchIndex([]byte(test.text), -1), t) + } +} + +func TestFindAllStringSubmatchIndex(t *testing.T) { + for _, test := range findTests { + testFindAllSubmatchIndex(&test, MustCompile(test.pat).FindAllStringSubmatchIndex(test.text, -1), t) + } +} diff --git a/platform/dbops/binaries/go/go/src/regexp/onepass.go b/platform/dbops/binaries/go/go/src/regexp/onepass.go new file mode 100644 index 0000000000000000000000000000000000000000..b3066e88ee436bddc9f364d4c452f4f64c5c5613 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/onepass.go @@ -0,0 +1,507 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "regexp/syntax" + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// "One-pass" regexp execution. +// Some regexps can be analyzed to determine that they never need +// backtracking: they are guaranteed to run in one pass over the string +// without bothering to save all the usual NFA state. +// Detect those and execute them more quickly. + +// A onePassProg is a compiled one-pass regular expression program. +// It is the same as syntax.Prog except for the use of onePassInst. +type onePassProg struct { + Inst []onePassInst + Start int // index of start instruction + NumCap int // number of InstCapture insts in re +} + +// A onePassInst is a single instruction in a one-pass regular expression program. +// It is the same as syntax.Inst except for the new 'Next' field. +type onePassInst struct { + syntax.Inst + Next []uint32 +} + +// onePassPrefix returns a literal string that all matches for the +// regexp must start with. Complete is true if the prefix +// is the entire match. Pc is the index of the last rune instruction +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { + i := &p.Inst[p.Start] + if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + pc = i.Out + i = &p.Inst[pc] + for i.Op == syntax.InstNop { + pc = i.Out + i = &p.Inst[pc] + } + // Avoid allocation of buffer if prefix is empty. + if iop(i) != syntax.InstRune || len(i.Rune) != 1 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + + // Have prefix; gather characters. + var buf strings.Builder + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { + buf.WriteRune(i.Rune[0]) + pc, i = i.Out, &p.Inst[i.Out] + } + if i.Op == syntax.InstEmptyWidth && + syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 && + p.Inst[i.Out].Op == syntax.InstMatch { + complete = true + } + return buf.String(), complete, pc +} + +// onePassNext selects the next actionable state of the prog, based on the input character. +// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. +// One of the alternates may ultimately lead without input to end of line. If the instruction +// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. +func onePassNext(i *onePassInst, r rune) uint32 { + next := i.MatchRunePos(r) + if next >= 0 { + return i.Next[next] + } + if i.Op == syntax.InstAltMatch { + return i.Out + } + return 0 +} + +func iop(i *syntax.Inst) syntax.InstOp { + op := i.Op + switch op { + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + op = syntax.InstRune + } + return op +} + +// Sparse Array implementation is used as a queueOnePass. +type queueOnePass struct { + sparse []uint32 + dense []uint32 + size, nextIndex uint32 +} + +func (q *queueOnePass) empty() bool { + return q.nextIndex >= q.size +} + +func (q *queueOnePass) next() (n uint32) { + n = q.dense[q.nextIndex] + q.nextIndex++ + return +} + +func (q *queueOnePass) clear() { + q.size = 0 + q.nextIndex = 0 +} + +func (q *queueOnePass) contains(u uint32) bool { + if u >= uint32(len(q.sparse)) { + return false + } + return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u +} + +func (q *queueOnePass) insert(u uint32) { + if !q.contains(u) { + q.insertNew(u) + } +} + +func (q *queueOnePass) insertNew(u uint32) { + if u >= uint32(len(q.sparse)) { + return + } + q.sparse[u] = q.size + q.dense[q.size] = u + q.size++ +} + +func newQueue(size int) (q *queueOnePass) { + return &queueOnePass{ + sparse: make([]uint32, size), + dense: make([]uint32, size), + } +} + +// mergeRuneSets merges two non-intersecting runesets, and returns the merged result, +// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index +// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a +// NextIp array with the single element mergeFailed is returned. +// The code assumes that both inputs contain ordered and non-intersecting rune pairs. +const mergeFailed = uint32(0xffffffff) + +var ( + noRune = []rune{} + noNext = []uint32{mergeFailed} +) + +func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) { + leftLen := len(*leftRunes) + rightLen := len(*rightRunes) + if leftLen&0x1 != 0 || rightLen&0x1 != 0 { + panic("mergeRuneSets odd length []rune") + } + var ( + lx, rx int + ) + merged := make([]rune, 0) + next := make([]uint32, 0) + ok := true + defer func() { + if !ok { + merged = nil + next = nil + } + }() + + ix := -1 + extend := func(newLow *int, newArray *[]rune, pc uint32) bool { + if ix > 0 && (*newArray)[*newLow] <= merged[ix] { + return false + } + merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1]) + *newLow += 2 + ix += 2 + next = append(next, pc) + return true + } + + for lx < leftLen || rx < rightLen { + switch { + case rx >= rightLen: + ok = extend(&lx, leftRunes, leftPC) + case lx >= leftLen: + ok = extend(&rx, rightRunes, rightPC) + case (*rightRunes)[rx] < (*leftRunes)[lx]: + ok = extend(&rx, rightRunes, rightPC) + default: + ok = extend(&lx, leftRunes, leftPC) + } + if !ok { + return noRune, noNext + } + } + return merged, next +} + +// cleanupOnePass drops working memory, and restores certain shortcut instructions. +func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { + for ix, instOriginal := range original.Inst { + switch instOriginal.Op { + case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune: + case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail: + prog.Inst[ix].Next = nil + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + prog.Inst[ix].Next = nil + prog.Inst[ix] = onePassInst{Inst: instOriginal} + } + } +} + +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. +func onePassCopy(prog *syntax.Prog) *onePassProg { + p := &onePassProg{ + Start: prog.Start, + NumCap: prog.NumCap, + Inst: make([]onePassInst, len(prog.Inst)), + } + for i, inst := range prog.Inst { + p.Inst[i] = onePassInst{Inst: inst} + } + + // rewrites one or more common Prog constructs that enable some otherwise + // non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at + // ip A, that points to ips B & C. + // A:BC + B:DA => A:BC + B:CD + // A:BC + B:DC => A:DC + B:DC + for pc := range p.Inst { + switch p.Inst[pc].Op { + default: + continue + case syntax.InstAlt, syntax.InstAltMatch: + // A:Bx + B:Ay + p_A_Other := &p.Inst[pc].Out + p_A_Alt := &p.Inst[pc].Arg + // make sure a target is another Alt + instAlt := p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + p_A_Alt, p_A_Other = p_A_Other, p_A_Alt + instAlt = p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + continue + } + } + instOther := p.Inst[*p_A_Other] + // Analyzing both legs pointing to Alts is for another day + if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch { + // too complicated + continue + } + // simple empty transition loop + // A:BC + B:DA => A:BC + B:DC + p_B_Alt := &p.Inst[*p_A_Alt].Out + p_B_Other := &p.Inst[*p_A_Alt].Arg + patch := false + if instAlt.Out == uint32(pc) { + patch = true + } else if instAlt.Arg == uint32(pc) { + patch = true + p_B_Alt, p_B_Other = p_B_Other, p_B_Alt + } + if patch { + *p_B_Alt = *p_A_Other + } + + // empty transition to common target + // A:BC + B:DC => A:DC + B:DC + if *p_A_Other == *p_B_Alt { + *p_A_Alt = *p_B_Other + } + } + } + return p +} + +// runeSlice exists to permit sorting the case-folded rune sets. +type runeSlice []rune + +func (p runeSlice) Len() int { return len(p) } +func (p runeSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p runeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} +var anyRune = []rune{0, unicode.MaxRune} + +// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt, +// the match engine can always tell which branch to take. The routine may modify +// p if it is turned into a onepass Prog. If it isn't possible for this to be a +// onepass Prog, the Prog nil is returned. makeOnePass is recursive +// to the size of the Prog. +func makeOnePass(p *onePassProg) *onePassProg { + // If the machine is very long, it's not worth the time to check if we can use one pass. + if len(p.Inst) >= 1000 { + return nil + } + + var ( + instQueue = newQueue(len(p.Inst)) + visitQueue = newQueue(len(p.Inst)) + check func(uint32, []bool) bool + onePassRunes = make([][]rune, len(p.Inst)) + ) + + // check that paths from Alt instructions are unambiguous, and rebuild the new + // program as a onepass program + check = func(pc uint32, m []bool) (ok bool) { + ok = true + inst := &p.Inst[pc] + if visitQueue.contains(pc) { + return + } + visitQueue.insert(pc) + switch inst.Op { + case syntax.InstAlt, syntax.InstAltMatch: + ok = check(inst.Out, m) && check(inst.Arg, m) + // check no-input paths to InstMatch + matchOut := m[inst.Out] + matchArg := m[inst.Arg] + if matchOut && matchArg { + ok = false + break + } + // Match on empty goes in inst.Out + if matchArg { + inst.Out, inst.Arg = inst.Arg, inst.Out + matchOut, matchArg = matchArg, matchOut + } + if matchOut { + m[pc] = true + inst.Op = syntax.InstAltMatch + } + + // build a dispatch operator from the two legs of the alt. + onePassRunes[pc], inst.Next = mergeRuneSets( + &onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg) + if len(inst.Next) > 0 && inst.Next[0] == mergeFailed { + ok = false + break + } + case syntax.InstCapture, syntax.InstNop: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + // pass matching runes back through these no-ops. + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstEmptyWidth: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstMatch, syntax.InstFail: + m[pc] = inst.Op == syntax.InstMatch + case syntax.InstRune: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + if len(inst.Rune) == 0 { + onePassRunes[pc] = []rune{} + inst.Next = []uint32{inst.Out} + break + } + runes := make([]rune, 0) + if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + sort.Sort(runeSlice(runes)) + } else { + runes = append(runes, inst.Rune...) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRune1: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + runes := []rune{} + // expand case-folded runes + if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + sort.Sort(runeSlice(runes)) + } else { + runes = append(runes, inst.Rune[0], inst.Rune[0]) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRuneAny: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRune...) + inst.Next = []uint32{inst.Out} + case syntax.InstRuneAnyNotNL: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRuneNotNL...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + } + return + } + + instQueue.clear() + instQueue.insert(uint32(p.Start)) + m := make([]bool, len(p.Inst)) + for !instQueue.empty() { + visitQueue.clear() + pc := instQueue.next() + if !check(pc, m) { + p = nil + break + } + } + if p != nil { + for i := range p.Inst { + p.Inst[i].Rune = onePassRunes[i] + } + } + return p +} + +// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog +// can be recharacterized as a one-pass regexp program, or syntax.nil if the +// Prog cannot be converted. For a one pass prog, the fundamental condition that must +// be true is: at any InstAlt, there must be no ambiguity about what branch to take. +func compileOnePass(prog *syntax.Prog) (p *onePassProg) { + if prog.Start == 0 { + return nil + } + // onepass regexp is anchored + if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth || + syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { + return nil + } + // every instruction leading to InstMatch must be EmptyEndText + for _, inst := range prog.Inst { + opOut := prog.Inst[inst.Out].Op + switch inst.Op { + default: + if opOut == syntax.InstMatch { + return nil + } + case syntax.InstAlt, syntax.InstAltMatch: + if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch { + return nil + } + case syntax.InstEmptyWidth: + if opOut == syntax.InstMatch { + if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText { + continue + } + return nil + } + } + } + // Creates a slightly optimized copy of the original Prog + // that cleans up some Prog idioms that block valid onepass programs + p = onePassCopy(prog) + + // checkAmbiguity on InstAlts, build onepass Prog if possible + p = makeOnePass(p) + + if p != nil { + cleanupOnePass(p, prog) + } + return p +} diff --git a/platform/dbops/binaries/go/go/src/regexp/onepass_test.go b/platform/dbops/binaries/go/go/src/regexp/onepass_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6a42eda391c2c7ad09068c687ca1d84cbd95356f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/onepass_test.go @@ -0,0 +1,225 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "reflect" + "regexp/syntax" + "strings" + "testing" +) + +var runeMergeTests = []struct { + left, right, merged []rune + next []uint32 + leftPC, rightPC uint32 +}{ + { + // empty rhs + []rune{69, 69}, + []rune{}, + []rune{69, 69}, + []uint32{1}, + 1, 2, + }, + { + // identical runes, identical targets + []rune{69, 69}, + []rune{69, 69}, + []rune{}, + []uint32{mergeFailed}, + 1, 1, + }, + { + // identical runes, different targets + []rune{69, 69}, + []rune{69, 69}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // append right-first + []rune{69, 69}, + []rune{71, 71}, + []rune{69, 69, 71, 71}, + []uint32{1, 2}, + 1, 2, + }, + { + // append, left-first + []rune{71, 71}, + []rune{69, 69}, + []rune{69, 69, 71, 71}, + []uint32{2, 1}, + 1, 2, + }, + { + // successful interleave + []rune{60, 60, 71, 71, 101, 101}, + []rune{69, 69, 88, 88}, + []rune{60, 60, 69, 69, 71, 71, 88, 88, 101, 101}, + []uint32{1, 2, 1, 2, 1}, + 1, 2, + }, + { + // left surrounds right + []rune{69, 74}, + []rune{71, 71}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // right surrounds left + []rune{69, 74}, + []rune{68, 75}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // overlap at interval begin + []rune{69, 74}, + []rune{74, 75}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // overlap ar interval end + []rune{69, 74}, + []rune{65, 69}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // overlap from above + []rune{69, 74}, + []rune{71, 74}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // overlap from below + []rune{69, 74}, + []rune{65, 71}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, + { + // out of order []rune + []rune{69, 74, 60, 65}, + []rune{66, 67}, + []rune{}, + []uint32{mergeFailed}, + 1, 2, + }, +} + +func TestMergeRuneSet(t *testing.T) { + for ix, test := range runeMergeTests { + merged, next := mergeRuneSets(&test.left, &test.right, test.leftPC, test.rightPC) + if !reflect.DeepEqual(merged, test.merged) { + t.Errorf("mergeRuneSet :%d (%v, %v) merged\n have\n%v\nwant\n%v", ix, test.left, test.right, merged, test.merged) + } + if !reflect.DeepEqual(next, test.next) { + t.Errorf("mergeRuneSet :%d(%v, %v) next\n have\n%v\nwant\n%v", ix, test.left, test.right, next, test.next) + } + } +} + +var onePassTests = []struct { + re string + isOnePass bool +}{ + {`^(?:a|(?:a*))$`, false}, + {`^(?:(a)|(?:a*))$`, false}, + {`^(?:(?:(?:.(?:$))?))$`, true}, + {`^abcd$`, true}, + {`^(?:(?:a{0,})*?)$`, false}, + {`^(?:(?:a+)*)$`, true}, + {`^(?:(?:a|(?:aa)))$`, true}, + {`^(?:[^\s\S])$`, true}, + {`^(?:(?:a{3,4}){0,})$`, false}, + {`^(?:(?:(?:a*)+))$`, true}, + {`^[a-c]+$`, true}, + {`^[a-c]*$`, true}, + {`^(?:a*)$`, true}, + {`^(?:(?:aa)|a)$`, true}, + {`^[a-c]*`, false}, + {`^...$`, true}, + {`^(?:a|(?:aa))$`, true}, + {`^a((b))c$`, true}, + {`^a.[l-nA-Cg-j]?e$`, true}, + {`^a((b))$`, true}, + {`^a(?:(b)|(c))c$`, true}, + {`^a(?:(b*)|(c))c$`, false}, + {`^a(?:b|c)$`, true}, + {`^a(?:b?|c)$`, true}, + {`^a(?:b?|c?)$`, false}, + {`^a(?:b?|c+)$`, true}, + {`^a(?:b+|(bc))d$`, false}, + {`^a(?:bc)+$`, true}, + {`^a(?:[bcd])+$`, true}, + {`^a((?:[bcd])+)$`, true}, + {`^a(:?b|c)*d$`, true}, + {`^.bc(d|e)*$`, true}, + {`^(?:(?:aa)|.)$`, false}, + {`^(?:(?:a{1,2}){1,2})$`, false}, + {`^l` + strings.Repeat("o", 2<<8) + `ng$`, true}, +} + +func TestCompileOnePass(t *testing.T) { + var ( + p *syntax.Prog + re *syntax.Regexp + err error + ) + for _, test := range onePassTests { + if re, err = syntax.Parse(test.re, syntax.Perl); err != nil { + t.Errorf("Parse(%q) got err:%s, want success", test.re, err) + continue + } + // needs to be done before compile... + re = re.Simplify() + if p, err = syntax.Compile(re); err != nil { + t.Errorf("Compile(%q) got err:%s, want success", test.re, err) + continue + } + isOnePass := compileOnePass(p) != nil + if isOnePass != test.isOnePass { + t.Errorf("CompileOnePass(%q) got isOnePass=%v, expected %v", test.re, isOnePass, test.isOnePass) + } + } +} + +// TODO(cespare): Unify with onePassTests and rationalize one-pass test cases. +var onePassTests1 = []struct { + re string + match string +}{ + {`^a(/b+(#c+)*)*$`, "a/b#c"}, // golang.org/issue/11905 +} + +func TestRunOnePass(t *testing.T) { + for _, test := range onePassTests1 { + re, err := Compile(test.re) + if err != nil { + t.Errorf("Compile(%q): got err: %s", test.re, err) + continue + } + if re.onepass == nil { + t.Errorf("Compile(%q): got nil, want one-pass", test.re) + continue + } + if !re.MatchString(test.match) { + t.Errorf("onepass %q did not match %q", test.re, test.match) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/regexp/regexp.go b/platform/dbops/binaries/go/go/src/regexp/regexp.go new file mode 100644 index 0000000000000000000000000000000000000000..462f235b1bb12fea85a2b538474e9217764cd5c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/regexp/regexp.go @@ -0,0 +1,1304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package regexp implements regular expression search. +// +// The syntax of the regular expressions accepted is the same +// general syntax used by Perl, Python, and other languages. +// More precisely, it is the syntax accepted by RE2 and described at +// https://golang.org/s/re2syntax, except for \C. +// For an overview of the syntax, see the [regexp/syntax] package. +// +// The regexp implementation provided by this package is +// guaranteed to run in time linear in the size of the input. +// (This is a property not guaranteed by most open source +// implementations of regular expressions.) For more information +// about this property, see +// +// https://swtch.com/~rsc/regexp/regexp1.html +// +// or any book about automata theory. +// +// All characters are UTF-8-encoded code points. +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence +// is treated as if it encoded utf8.RuneError (U+FFFD). +// +// There are 16 methods of [Regexp] that match a regular expression and identify +// the matched text. Their names are matched by this regular expression: +// +// Find(All)?(String)?(Submatch)?(Index)? +// +// If 'All' is present, the routine matches successive non-overlapping +// matches of the entire expression. Empty matches abutting a preceding +// match are ignored. The return value is a slice containing the successive +// return values of the corresponding non-'All' routine. These routines take +// an extra integer argument, n. If n >= 0, the function returns at most n +// matches/submatches; otherwise, it returns all of them. +// +// If 'String' is present, the argument is a string; otherwise it is a slice +// of bytes; return values are adjusted as appropriate. +// +// If 'Submatch' is present, the return value is a slice identifying the +// successive submatches of the expression. Submatches are matches of +// parenthesized subexpressions (also known as capturing groups) within the +// regular expression, numbered from left to right in order of opening +// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is +// the match of the first parenthesized subexpression, and so on. +// +// If 'Index' is present, matches and submatches are identified by byte index +// pairs within the input string: result[2*n:2*n+2] identifies the indexes of +// the nth submatch. The pair for n==0 identifies the match of the entire +// expression. If 'Index' is not present, the match is identified by the text +// of the match/submatch. If an index is negative or text is nil, it means that +// subexpression did not match any string in the input. For 'String' versions +// an empty string means either no match or an empty match. +// +// There is also a subset of the methods that can be applied to text read +// from a RuneReader: +// +// MatchReader, FindReaderIndex, FindReaderSubmatchIndex +// +// This set may grow. Note that regular expression matches may need to +// examine text beyond the text returned by a match, so the methods that +// match text from a RuneReader may read arbitrarily far into the input +// before returning. +// +// (There are a few other methods that do not match this pattern.) +package regexp + +import ( + "bytes" + "io" + "regexp/syntax" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Regexp is the representation of a compiled regular expression. +// A Regexp is safe for concurrent use by multiple goroutines, +// except for configuration methods, such as [Regexp.Longest]. +type Regexp struct { + expr string // as passed to Compile + prog *syntax.Prog // compiled program + onepass *onePassProg // onepass program or nil + numSubexp int + maxBitStateLen int + subexpNames []string + prefix string // required prefix in unanchored matches + prefixBytes []byte // prefix, as a []byte + prefixRune rune // first rune in prefix + prefixEnd uint32 // pc for last rune in prefix + mpool int // pool for machines + matchcap int // size of recorded match lengths + prefixComplete bool // prefix is the entire regexp + cond syntax.EmptyOp // empty-width conditions required at start of match + minInputLen int // minimum length of the input in bytes + + // This field can be modified by the Longest method, + // but it is otherwise read-only. + longest bool // whether regexp prefers leftmost-longest match +} + +// String returns the source text used to compile the regular expression. +func (re *Regexp) String() string { + return re.expr +} + +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. +// +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, +// giving each goroutine its own copy helped to avoid lock contention. +// As of Go 1.12, using Copy is no longer necessary to avoid lock contention. +// Copy may still be appropriate if the reason for its use is to make +// two copies with different [Regexp.Longest] settings. +func (re *Regexp) Copy() *Regexp { + re2 := *re + return &re2 +} + +// Compile parses a regular expression and returns, if successful, +// a [Regexp] object that can be used to match against text. +// +// When matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses the one that a backtracking search would have found first. +// This so-called leftmost-first matching is the same semantics +// that Perl, Python, and other implementations use, although this +// package implements it without the expense of backtracking. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. +func Compile(expr string) (*Regexp, error) { + return compile(expr, syntax.Perl, false) +} + +// CompilePOSIX is like [Compile] but restricts the regular expression +// to POSIX ERE (egrep) syntax and changes the match semantics to +// leftmost-longest. +// +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This so-called leftmost-longest matching is the same semantics +// that early regular expression implementations used and that POSIX +// specifies. +// +// However, there can be multiple leftmost-longest matches, with different +// submatch choices, and here this package diverges from POSIX. +// Among the possible leftmost-longest matches, this package chooses +// the one that a backtracking search would have found first, while POSIX +// specifies that the match be chosen to maximize the length of the first +// subexpression, then the second, and so on from left to right. +// The POSIX rule is computationally prohibitive and not even well-defined. +// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details. +func CompilePOSIX(expr string) (*Regexp, error) { + return compile(expr, syntax.POSIX, true) +} + +// Longest makes future searches prefer the leftmost-longest match. +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This method modifies the [Regexp] and may not be called concurrently +// with any other methods. +func (re *Regexp) Longest() { + re.longest = true +} + +func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { + re, err := syntax.Parse(expr, mode) + if err != nil { + return nil, err + } + maxCap := re.MaxCap() + capNames := re.CapNames() + + re = re.Simplify() + prog, err := syntax.Compile(re) + if err != nil { + return nil, err + } + matchcap := prog.NumCap + if matchcap < 2 { + matchcap = 2 + } + regexp := &Regexp{ + expr: expr, + prog: prog, + onepass: compileOnePass(prog), + numSubexp: maxCap, + subexpNames: capNames, + cond: prog.StartCond(), + longest: longest, + matchcap: matchcap, + minInputLen: minInputLen(re), + } + if regexp.onepass == nil { + regexp.prefix, regexp.prefixComplete = prog.Prefix() + regexp.maxBitStateLen = maxBitStateLen(prog) + } else { + regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) + } + if regexp.prefix != "" { + // TODO(rsc): Remove this allocation by adding + // IndexString to package bytes. + regexp.prefixBytes = []byte(regexp.prefix) + regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix) + } + + n := len(prog.Inst) + i := 0 + for matchSize[i] != 0 && matchSize[i] < n { + i++ + } + regexp.mpool = i + + return regexp, nil +} + +// Pools of *machine for use during (*Regexp).doExecute, +// split up by the size of the execution queues. +// matchPool[i] machines have queue size matchSize[i]. +// On a 64-bit system each queue entry is 16 bytes, +// so matchPool[0] has 16*2*128 = 4kB queues, etc. +// The final matchPool is a catch-all for very large queues. +var ( + matchSize = [...]int{128, 512, 2048, 16384, 0} + matchPool [len(matchSize)]sync.Pool +) + +// get returns a machine to use for matching re. +// It uses the re's machine cache if possible, to avoid +// unnecessary allocation. +func (re *Regexp) get() *machine { + m, ok := matchPool[re.mpool].Get().(*machine) + if !ok { + m = new(machine) + } + m.re = re + m.p = re.prog + if cap(m.matchcap) < re.matchcap { + m.matchcap = make([]int, re.matchcap) + for _, t := range m.pool { + t.cap = make([]int, re.matchcap) + } + } + + // Allocate queues if needed. + // Or reallocate, for "large" match pool. + n := matchSize[re.mpool] + if n == 0 { // large pool + n = len(re.prog.Inst) + } + if len(m.q0.sparse) < n { + m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} + m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} + } + return m +} + +// put returns a machine to the correct machine pool. +func (re *Regexp) put(m *machine) { + m.re = nil + m.p = nil + m.inputs.clear() + matchPool[re.mpool].Put(m) +} + +// minInputLen walks the regexp to find the minimum length of any matchable input. +func minInputLen(re *syntax.Regexp) int { + switch re.Op { + default: + return 0 + case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass: + return 1 + case syntax.OpLiteral: + l := 0 + for _, r := range re.Rune { + if r == utf8.RuneError { + l++ + } else { + l += utf8.RuneLen(r) + } + } + return l + case syntax.OpCapture, syntax.OpPlus: + return minInputLen(re.Sub[0]) + case syntax.OpRepeat: + return re.Min * minInputLen(re.Sub[0]) + case syntax.OpConcat: + l := 0 + for _, sub := range re.Sub { + l += minInputLen(sub) + } + return l + case syntax.OpAlternate: + l := minInputLen(re.Sub[0]) + var lnext int + for _, sub := range re.Sub[1:] { + lnext = minInputLen(sub) + if lnext < l { + l = lnext + } + } + return l + } +} + +// MustCompile is like [Compile] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompile(str string) *Regexp { + regexp, err := Compile(str) + if err != nil { + panic(`regexp: Compile(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompilePOSIX(str string) *Regexp { + regexp, err := CompilePOSIX(str) + if err != nil { + panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +func quote(s string) string { + if strconv.CanBackquote(s) { + return "`" + s + "`" + } + return strconv.Quote(s) +} + +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. +func (re *Regexp) NumSubexp() int { + return re.numSubexp +} + +// SubexpNames returns the names of the parenthesized subexpressions +// in this [Regexp]. The name for the first sub-expression is names[1], +// so that if m is a match slice, the name for m[i] is SubexpNames()[i]. +// Since the Regexp as a whole cannot be named, names[0] is always +// the empty string. The slice should not be modified. +func (re *Regexp) SubexpNames() []string { + return re.subexpNames +} + +// SubexpIndex returns the index of the first subexpression with the given name, +// or -1 if there is no subexpression with that name. +// +// Note that multiple subexpressions can be written using the same name, as in +// (?Pa+)(?Pb+), which declares two subexpressions named "bob". +// In this case, SubexpIndex returns the index of the leftmost such subexpression +// in the regular expression. +func (re *Regexp) SubexpIndex(name string) int { + if name != "" { + for i, s := range re.subexpNames { + if name == s { + return i + } + } + } + return -1 +} + +const endOfText rune = -1 + +// input abstracts different representations of the input text. It provides +// one-character lookahead. +type input interface { + step(pos int) (r rune, width int) // advance one rune + canCheckPrefix() bool // can we look ahead without losing info? + hasPrefix(re *Regexp) bool + index(re *Regexp, pos int) int + context(pos int) lazyFlag +} + +// inputString scans a string. +type inputString struct { + str string +} + +func (i *inputString) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRuneInString(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputString) canCheckPrefix() bool { + return true +} + +func (i *inputString) hasPrefix(re *Regexp) bool { + return strings.HasPrefix(i.str, re.prefix) +} + +func (i *inputString) index(re *Regexp, pos int) int { + return strings.Index(i.str[pos:], re.prefix) +} + +func (i *inputString) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRuneInString(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRuneInString(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputBytes scans a byte slice. +type inputBytes struct { + str []byte +} + +func (i *inputBytes) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRune(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputBytes) canCheckPrefix() bool { + return true +} + +func (i *inputBytes) hasPrefix(re *Regexp) bool { + return bytes.HasPrefix(i.str, re.prefixBytes) +} + +func (i *inputBytes) index(re *Regexp, pos int) int { + return bytes.Index(i.str[pos:], re.prefixBytes) +} + +func (i *inputBytes) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRune(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRune(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputReader scans a RuneReader. +type inputReader struct { + r io.RuneReader + atEOT bool + pos int +} + +func (i *inputReader) step(pos int) (rune, int) { + if !i.atEOT && pos != i.pos { + return endOfText, 0 + + } + r, w, err := i.r.ReadRune() + if err != nil { + i.atEOT = true + return endOfText, 0 + } + i.pos += w + return r, w +} + +func (i *inputReader) canCheckPrefix() bool { + return false +} + +func (i *inputReader) hasPrefix(re *Regexp) bool { + return false +} + +func (i *inputReader) index(re *Regexp, pos int) int { + return -1 +} + +func (i *inputReader) context(pos int) lazyFlag { + return 0 // not used +} + +// LiteralPrefix returns a literal string that must begin any match +// of the regular expression re. It returns the boolean true if the +// literal string comprises the entire regular expression. +func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { + return re.prefix, re.prefixComplete +} + +// MatchReader reports whether the text returned by the [io.RuneReader] +// contains any match of the regular expression re. +func (re *Regexp) MatchReader(r io.RuneReader) bool { + return re.doMatch(r, nil, "") +} + +// MatchString reports whether the string s +// contains any match of the regular expression re. +func (re *Regexp) MatchString(s string) bool { + return re.doMatch(nil, nil, s) +} + +// Match reports whether the byte slice b +// contains any match of the regular expression re. +func (re *Regexp) Match(b []byte) bool { + return re.doMatch(nil, b, "") +} + +// MatchReader reports whether the text returned by the RuneReader +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchReader(r), nil +} + +// MatchString reports whether the string s +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchString(pattern string, s string) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchString(s), nil +} + +// Match reports whether the byte slice b +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func Match(pattern string, b []byte) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.Match(b), nil +} + +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAllString(src, repl string) string { + n := 2 + if strings.Contains(repl, "$") { + n = 2 * (re.numSubexp + 1) + } + b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte { + return re.expand(dst, repl, nil, src, match) + }) + return string(b) +} + +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { + return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + })) +} + +// ReplaceAllStringFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched substring. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) + return string(b) +} + +func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte { + lastMatchEnd := 0 // end position of the most recent match + searchPos := 0 // position where we next look for a match + var buf []byte + var endPos int + if bsrc != nil { + endPos = len(bsrc) + } else { + endPos = len(src) + } + if nmatch > re.prog.NumCap { + nmatch = re.prog.NumCap + } + + var dstCap [2]int + for searchPos <= endPos { + a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0]) + if len(a) == 0 { + break // no more matches + } + + // Copy the unmatched characters before this match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:a[0]]...) + } else { + buf = append(buf, src[lastMatchEnd:a[0]]...) + } + + // Now insert a copy of the replacement string, but not for a + // match of the empty string immediately after another match. + // (Otherwise, we get double replacement for patterns that + // match both empty and nonempty strings.) + if a[1] > lastMatchEnd || a[0] == 0 { + buf = repl(buf, a) + } + lastMatchEnd = a[1] + + // Advance past this match; always advance at least one character. + var width int + if bsrc != nil { + _, width = utf8.DecodeRune(bsrc[searchPos:]) + } else { + _, width = utf8.DecodeRuneInString(src[searchPos:]) + } + if searchPos+width > a[1] { + searchPos += width + } else if searchPos+1 > a[1] { + // This clause is only needed at the end of the input + // string. In that case, DecodeRuneInString returns width=0. + searchPos++ + } else { + searchPos = a[1] + } + } + + // Copy the unmatched characters after the last match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:]...) + } else { + buf = append(buf, src[lastMatchEnd:]...) + } + + return buf +} + +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + n := 2 + if bytes.IndexByte(repl, '$') >= 0 { + n = 2 * (re.numSubexp + 1) + } + srepl := "" + b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte { + if len(srepl) != len(repl) { + srepl = string(repl) + } + return re.expand(dst, srepl, src, "", match) + }) + return b +} + +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] +// with the replacement bytes repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + }) +} + +// ReplaceAllFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched byte slice. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) +} + +// Bitmap used by func special to check whether a character needs to be escaped. +var specialBytes [16]byte + +// special reports whether byte b needs to be escaped by QuoteMeta. +func special(b byte) bool { + return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0 +} + +func init() { + for _, b := range []byte(`\.+*?()|[]{}^$`) { + specialBytes[b%16] |= 1 << (b / 16) + } +} + +// QuoteMeta returns a string that escapes all regular expression metacharacters +// inside the argument text; the returned string is a regular expression matching +// the literal text. +func QuoteMeta(s string) string { + // A byte loop is correct because all metacharacters are ASCII. + var i int + for i = 0; i < len(s); i++ { + if special(s[i]) { + break + } + } + // No meta characters found, so return original string. + if i >= len(s) { + return s + } + + b := make([]byte, 2*len(s)-i) + copy(b, s[:i]) + j := i + for ; i < len(s); i++ { + if special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + return string(b[:j]) +} + +// The number of capture values in the program may correspond +// to fewer capturing expressions than are in the regexp. +// For example, "(a){0}" turns into an empty program, so the +// maximum capture in the program is 0 but we need to return +// an expression for \1. Pad appends -1s to the slice a as needed. +func (re *Regexp) pad(a []int) []int { + if a == nil { + // No match. + return nil + } + n := (1 + re.numSubexp) * 2 + for len(a) < n { + a = append(a, -1) + } + return a +} + +// allMatches calls deliver at most n times +// with the location of successive matches in the input text. +// The input text is b if non-nil, otherwise s. +func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) { + var end int + if b == nil { + end = len(s) + } else { + end = len(b) + } + + for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; { + matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil) + if len(matches) == 0 { + break + } + + accept := true + if matches[1] == pos { + // We've found an empty match. + if matches[0] == prevMatchEnd { + // We don't allow an empty match right + // after a previous match, so ignore it. + accept = false + } + var width int + if b == nil { + is := inputString{str: s} + _, width = is.step(pos) + } else { + ib := inputBytes{str: b} + _, width = ib.step(pos) + } + if width > 0 { + pos += width + } else { + pos = end + 1 + } + } else { + pos = matches[1] + } + prevMatchEnd = matches[1] + + if accept { + deliver(re.pad(matches)) + i++ + } + } +} + +// Find returns a slice holding the text of the leftmost match in b of the regular expression. +// A return value of nil indicates no match. +func (re *Regexp) Find(b []byte) []byte { + var dstCap [2]int + a := re.doExecute(nil, b, "", 0, 2, dstCap[:0]) + if a == nil { + return nil + } + return b[a[0]:a[1]:a[1]] +} + +// FindIndex returns a two-element slice of integers defining the location of +// the leftmost match in b of the regular expression. The match itself is at +// b[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindIndex(b []byte) (loc []int) { + a := re.doExecute(nil, b, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindString returns a string holding the text of the leftmost match in s of the regular +// expression. If there is no match, the return value is an empty string, +// but it will also be empty if the regular expression successfully matches +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is +// necessary to distinguish these cases. +func (re *Regexp) FindString(s string) string { + var dstCap [2]int + a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0]) + if a == nil { + return "" + } + return s[a[0]:a[1]] +} + +// FindStringIndex returns a two-element slice of integers defining the +// location of the leftmost match in s of the regular expression. The match +// itself is at s[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindStringIndex(s string) (loc []int) { + a := re.doExecute(nil, nil, s, 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindReaderIndex returns a two-element slice of integers defining the +// location of the leftmost match of the regular expression in text read from +// the [io.RuneReader]. The match text was found in the input stream at +// byte offset loc[0] through loc[1]-1. +// A return value of nil indicates no match. +func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { + a := re.doExecute(r, nil, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindSubmatch returns a slice of slices holding the text of the leftmost +// match of the regular expression in b and the matches, if any, of its +// subexpressions, as defined by the 'Submatch' descriptions in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatch(b []byte) [][]byte { + var dstCap [4]int + a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([][]byte, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]] + } + } + return ret +} + +// Expand appends template to dst and returns the result; during the +// append, Expand replaces variables in the template with corresponding +// matches drawn from src. The match slice should have been returned by +// [Regexp.FindSubmatchIndex]. +// +// In the template, a variable is denoted by a substring of the form +// $name or ${name}, where name is a non-empty sequence of letters, +// digits, and underscores. A purely numeric name like $1 refers to +// the submatch with the corresponding index; other names refer to +// capturing parentheses named with the (?P...) syntax. A +// reference to an out of range or unmatched index or a name that is not +// present in the regular expression is replaced with an empty slice. +// +// In the $name form, name is taken to be as long as possible: $1x is +// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0. +// +// To insert a literal $ in the output, use $$ in the template. +func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + return re.expand(dst, string(template), src, "", match) +} + +// ExpandString is like [Regexp.Expand] but the template and source are strings. +// It appends to and returns a byte slice in order to give the calling +// code control over allocation. +func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { + return re.expand(dst, template, nil, src, match) +} + +func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte { + for len(template) > 0 { + before, after, ok := strings.Cut(template, "$") + if !ok { + break + } + dst = append(dst, before...) + template = after + if template != "" && template[0] == '$' { + // Treat $$ as $. + dst = append(dst, '$') + template = template[1:] + continue + } + name, num, rest, ok := extract(template) + if !ok { + // Malformed; treat $ as raw text. + dst = append(dst, '$') + continue + } + template = rest + if num >= 0 { + if 2*num+1 < len(match) && match[2*num] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...) + } else { + dst = append(dst, src[match[2*num]:match[2*num+1]]...) + } + } + } else { + for i, namei := range re.subexpNames { + if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...) + } else { + dst = append(dst, src[match[2*i]:match[2*i+1]]...) + } + break + } + } + } + } + dst = append(dst, template...) + return dst +} + +// extract returns the name from a leading "name" or "{name}" in str. +// (The $ has already been removed by the caller.) +// If it is a number, extract returns num set to that number; otherwise num = -1. +func extract(str string) (name string, num int, rest string, ok bool) { + if str == "" { + return + } + brace := false + if str[0] == '{' { + brace = true + str = str[1:] + } + i := 0 + for i < len(str) { + rune, size := utf8.DecodeRuneInString(str[i:]) + if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' { + break + } + i += size + } + if i == 0 { + // empty name is not okay + return + } + name = str[:i] + if brace { + if i >= len(str) || str[i] != '}' { + // missing closing brace + return + } + i++ + } + + // Parse number. + num = 0 + for i := 0; i < len(name); i++ { + if name[i] < '0' || '9' < name[i] || num >= 1e8 { + num = -1 + break + } + num = num*10 + int(name[i]) - '0' + } + // Disallow leading zeros. + if name[0] == '0' && len(name) > 1 { + num = -1 + } + + rest = str[i:] + ok = true + return +} + +// FindSubmatchIndex returns a slice holding the index pairs identifying the +// leftmost match of the regular expression in b and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatchIndex(b []byte) []int { + return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil)) +} + +// FindStringSubmatch returns a slice of strings holding the text of the +// leftmost match of the regular expression in s and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatch(s string) []string { + var dstCap [4]int + a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([]string, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = s[a[2*i]:a[2*i+1]] + } + } + return ret +} + +// FindStringSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression in s and the +// matches, if any, of its subexpressions, as defined by the 'Submatch' and +// 'Index' descriptions in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatchIndex(s string) []int { + return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil)) +} + +// FindReaderSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression of text read by +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined +// by the 'Submatch' and 'Index' descriptions in the package comment. A +// return value of nil indicates no match. +func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { + return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil)) +} + +const startSize = 10 // The size at which to start a slice in the 'All' routines. + +// FindAll is the 'All' version of Find; it returns a slice of all successive +// matches of the expression, as defined by the 'All' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAll(b []byte, n int) [][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]byte, 0, startSize) + } + result = append(result, b[match[0]:match[1]:match[1]]) + }) + return result +} + +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllString(s string, n int) []string { + if n < 0 { + n = len(s) + 1 + } + var result []string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([]string, 0, startSize) + } + result = append(result, s[match[0]:match[1]]) + }) + return result +} + +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a +// slice of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice +// of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][][]byte, 0, startSize) + } + slice := make([][]byte, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns +// a slice of all successive matches of the expression, as defined by the +// 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it +// returns a slice of all successive matches of the expression, as defined by +// the 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + if n < 0 { + n = len(s) + 1 + } + var result [][]string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]string, 0, startSize) + } + slice := make([]string, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = s[match[2*j]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllStringSubmatchIndex is the 'All' version of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of +// the expression, as defined by the 'All' description in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// Split slices s into substrings separated by the expression and returns a slice of +// the substrings between those expression matches. +// +// The slice returned by this method consists of all the substrings of s +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. +// +// Example: +// +// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) +// // s: ["", "b", "b", "c", "cadaaae"] +// +// The count determines the number of substrings to return: +// +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +func (re *Regexp) Split(s string, n int) []string { + + if n == 0 { + return nil + } + + if len(re.expr) > 0 && len(s) == 0 { + return []string{""} + } + + matches := re.FindAllStringIndex(s, n) + strings := make([]string, 0, len(matches)) + + beg := 0 + end := 0 + for _, match := range matches { + if n > 0 && len(strings) >= n-1 { + break + } + + end = match[0] + if match[1] != 0 { + strings = append(strings, s[beg:end]) + } + beg = match[1] + } + + if end != len(s) { + strings = append(strings, s[beg:]) + } + + return strings +} + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/platform/dbops/binaries/go/go/src/runtime/HACKING.md b/platform/dbops/binaries/go/go/src/runtime/HACKING.md new file mode 100644 index 0000000000000000000000000000000000000000..ce0b42a354ae4bbaa9804358ccf71e87fc08f81d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/HACKING.md @@ -0,0 +1,332 @@ +This is a living document and at times it will be out of date. It is +intended to articulate how programming in the Go runtime differs from +writing normal Go. It focuses on pervasive concepts rather than +details of particular interfaces. + +Scheduler structures +==================== + +The scheduler manages three types of resources that pervade the +runtime: Gs, Ms, and Ps. It's important to understand these even if +you're not working on the scheduler. + +Gs, Ms, Ps +---------- + +A "G" is simply a goroutine. It's represented by type `g`. When a +goroutine exits, its `g` object is returned to a pool of free `g`s and +can later be reused for some other goroutine. + +An "M" is an OS thread that can be executing user Go code, runtime +code, a system call, or be idle. It's represented by type `m`. There +can be any number of Ms at a time since any number of threads may be +blocked in system calls. + +Finally, a "P" represents the resources required to execute user Go +code, such as scheduler and memory allocator state. It's represented +by type `p`. There are exactly `GOMAXPROCS` Ps. A P can be thought of +like a CPU in the OS scheduler and the contents of the `p` type like +per-CPU state. This is a good place to put state that needs to be +sharded for efficiency, but doesn't need to be per-thread or +per-goroutine. + +The scheduler's job is to match up a G (the code to execute), an M +(where to execute it), and a P (the rights and resources to execute +it). When an M stops executing user Go code, for example by entering a +system call, it returns its P to the idle P pool. In order to resume +executing user Go code, for example on return from a system call, it +must acquire a P from the idle pool. + +All `g`, `m`, and `p` objects are heap allocated, but are never freed, +so their memory remains type stable. As a result, the runtime can +avoid write barriers in the depths of the scheduler. + +`getg()` and `getg().m.curg` +---------------------------- + +To get the current user `g`, use `getg().m.curg`. + +`getg()` alone returns the current `g`, but when executing on the +system or signal stacks, this will return the current M's "g0" or +"gsignal", respectively. This is usually not what you want. + +To determine if you're running on the user stack or the system stack, +use `getg() == getg().m.curg`. + +Stacks +====== + +Every non-dead G has a *user stack* associated with it, which is what +user Go code executes on. User stacks start small (e.g., 2K) and grow +or shrink dynamically. + +Every M has a *system stack* associated with it (also known as the M's +"g0" stack because it's implemented as a stub G) and, on Unix +platforms, a *signal stack* (also known as the M's "gsignal" stack). +System and signal stacks cannot grow, but are large enough to execute +runtime and cgo code (8K in a pure Go binary; system-allocated in a +cgo binary). + +Runtime code often temporarily switches to the system stack using +`systemstack`, `mcall`, or `asmcgocall` to perform tasks that must not +be preempted, that must not grow the user stack, or that switch user +goroutines. Code running on the system stack is implicitly +non-preemptible and the garbage collector does not scan system stacks. +While running on the system stack, the current user stack is not used +for execution. + +nosplit functions +----------------- + +Most functions start with a prologue that inspects the stack pointer +and the current G's stack bound and calls `morestack` if the stack +needs to grow. + +Functions can be marked `//go:nosplit` (or `NOSPLIT` in assembly) to +indicate that they should not get this prologue. This has several +uses: + +- Functions that must run on the user stack, but must not call into + stack growth, for example because this would cause a deadlock, or + because they have untyped words on the stack. + +- Functions that must not be preempted on entry. + +- Functions that may run without a valid G. For example, functions + that run in early runtime start-up, or that may be entered from C + code such as cgo callbacks or the signal handler. + +Splittable functions ensure there's some amount of space on the stack +for nosplit functions to run in and the linker checks that any static +chain of nosplit function calls cannot exceed this bound. + +Any function with a `//go:nosplit` annotation should explain why it is +nosplit in its documentation comment. + +Error handling and reporting +============================ + +Errors that can reasonably be recovered from in user code should use +`panic` like usual. However, there are some situations where `panic` +will cause an immediate fatal error, such as when called on the system +stack or when called during `mallocgc`. + +Most errors in the runtime are not recoverable. For these, use +`throw`, which dumps the traceback and immediately terminates the +process. In general, `throw` should be passed a string constant to +avoid allocating in perilous situations. By convention, additional +details are printed before `throw` using `print` or `println` and the +messages are prefixed with "runtime:". + +For unrecoverable errors where user code is expected to be at fault for the +failure (such as racing map writes), use `fatal`. + +For runtime error debugging, it may be useful to run with `GOTRACEBACK=system` +or `GOTRACEBACK=crash`. The output of `panic` and `fatal` is as described by +`GOTRACEBACK`. The output of `throw` always includes runtime frames, metadata +and all goroutines regardless of `GOTRACEBACK` (i.e., equivalent to +`GOTRACEBACK=system`). Whether `throw` crashes or not is still controlled by +`GOTRACEBACK`. + +Synchronization +=============== + +The runtime has multiple synchronization mechanisms. They differ in +semantics and, in particular, in whether they interact with the +goroutine scheduler or the OS scheduler. + +The simplest is `mutex`, which is manipulated using `lock` and +`unlock`. This should be used to protect shared structures for short +periods. Blocking on a `mutex` directly blocks the M, without +interacting with the Go scheduler. This means it is safe to use from +the lowest levels of the runtime, but also prevents any associated G +and P from being rescheduled. `rwmutex` is similar. + +For one-shot notifications, use `note`, which provides `notesleep` and +`notewakeup`. Unlike traditional UNIX `sleep`/`wakeup`, `note`s are +race-free, so `notesleep` returns immediately if the `notewakeup` has +already happened. A `note` can be reset after use with `noteclear`, +which must not race with a sleep or wakeup. Like `mutex`, blocking on +a `note` blocks the M. However, there are different ways to sleep on a +`note`:`notesleep` also prevents rescheduling of any associated G and +P, while `notetsleepg` acts like a blocking system call that allows +the P to be reused to run another G. This is still less efficient than +blocking the G directly since it consumes an M. + +To interact directly with the goroutine scheduler, use `gopark` and +`goready`. `gopark` parks the current goroutine—putting it in the +"waiting" state and removing it from the scheduler's run queue—and +schedules another goroutine on the current M/P. `goready` puts a +parked goroutine back in the "runnable" state and adds it to the run +queue. + +In summary, + + + + + + + +
Blocks
InterfaceGMP
(rw)mutexYYY
noteYYY/N
parkYNN
+ +Atomics +======= + +The runtime uses its own atomics package at `runtime/internal/atomic`. +This corresponds to `sync/atomic`, but functions have different names +for historical reasons and there are a few additional functions needed +by the runtime. + +In general, we think hard about the uses of atomics in the runtime and +try to avoid unnecessary atomic operations. If access to a variable is +sometimes protected by another synchronization mechanism, the +already-protected accesses generally don't need to be atomic. There +are several reasons for this: + +1. Using non-atomic or atomic access where appropriate makes the code + more self-documenting. Atomic access to a variable implies there's + somewhere else that may concurrently access the variable. + +2. Non-atomic access allows for automatic race detection. The runtime + doesn't currently have a race detector, but it may in the future. + Atomic access defeats the race detector, while non-atomic access + allows the race detector to check your assumptions. + +3. Non-atomic access may improve performance. + +Of course, any non-atomic access to a shared variable should be +documented to explain how that access is protected. + +Some common patterns that mix atomic and non-atomic access are: + +* Read-mostly variables where updates are protected by a lock. Within + the locked region, reads do not need to be atomic, but the write + does. Outside the locked region, reads need to be atomic. + +* Reads that only happen during STW, where no writes can happen during + STW, do not need to be atomic. + +That said, the advice from the Go memory model stands: "Don't be +[too] clever." The performance of the runtime matters, but its +robustness matters more. + +Unmanaged memory +================ + +In general, the runtime tries to use regular heap allocation. However, +in some cases the runtime must allocate objects outside of the garbage +collected heap, in *unmanaged memory*. This is necessary if the +objects are part of the memory manager itself or if they must be +allocated in situations where the caller may not have a P. + +There are three mechanisms for allocating unmanaged memory: + +* sysAlloc obtains memory directly from the OS. This comes in whole + multiples of the system page size, but it can be freed with sysFree. + +* persistentalloc combines multiple smaller allocations into a single + sysAlloc to avoid fragmentation. However, there is no way to free + persistentalloced objects (hence the name). + +* fixalloc is a SLAB-style allocator that allocates objects of a fixed + size. fixalloced objects can be freed, but this memory can only be + reused by the same fixalloc pool, so it can only be reused for + objects of the same type. + +In general, types that are allocated using any of these should be +marked as not in heap by embedding `runtime/internal/sys.NotInHeap`. + +Objects that are allocated in unmanaged memory **must not** contain +heap pointers unless the following rules are also obeyed: + +1. Any pointers from unmanaged memory to the heap must be garbage + collection roots. More specifically, any pointer must either be + accessible through a global variable or be added as an explicit + garbage collection root in `runtime.markroot`. + +2. If the memory is reused, the heap pointers must be zero-initialized + before they become visible as GC roots. Otherwise, the GC may + observe stale heap pointers. See "Zero-initialization versus + zeroing". + +Zero-initialization versus zeroing +================================== + +There are two types of zeroing in the runtime, depending on whether +the memory is already initialized to a type-safe state. + +If memory is not in a type-safe state, meaning it potentially contains +"garbage" because it was just allocated and it is being initialized +for first use, then it must be *zero-initialized* using +`memclrNoHeapPointers` or non-pointer writes. This does not perform +write barriers. + +If memory is already in a type-safe state and is simply being set to +the zero value, this must be done using regular writes, `typedmemclr`, +or `memclrHasPointers`. This performs write barriers. + +Runtime-only compiler directives +================================ + +In addition to the "//go:" directives documented in "go doc compile", +the compiler supports additional directives only in the runtime. + +go:systemstack +-------------- + +`go:systemstack` indicates that a function must run on the system +stack. This is checked dynamically by a special function prologue. + +go:nowritebarrier +----------------- + +`go:nowritebarrier` directs the compiler to emit an error if the +following function contains any write barriers. (It *does not* +suppress the generation of write barriers; it is simply an assertion.) + +Usually you want `go:nowritebarrierrec`. `go:nowritebarrier` is +primarily useful in situations where it's "nice" not to have write +barriers, but not required for correctness. + +go:nowritebarrierrec and go:yeswritebarrierrec +---------------------------------------------- + +`go:nowritebarrierrec` directs the compiler to emit an error if the +following function or any function it calls recursively, up to a +`go:yeswritebarrierrec`, contains a write barrier. + +Logically, the compiler floods the call graph starting from each +`go:nowritebarrierrec` function and produces an error if it encounters +a function containing a write barrier. This flood stops at +`go:yeswritebarrierrec` functions. + +`go:nowritebarrierrec` is used in the implementation of the write +barrier to prevent infinite loops. + +Both directives are used in the scheduler. The write barrier requires +an active P (`getg().m.p != nil`) and scheduler code often runs +without an active P. In this case, `go:nowritebarrierrec` is used on +functions that release the P or may run without a P and +`go:yeswritebarrierrec` is used when code re-acquires an active P. +Since these are function-level annotations, code that releases or +acquires a P may need to be split across two functions. + +go:uintptrkeepalive +------------------- + +The //go:uintptrkeepalive directive must be followed by a function declaration. + +It specifies that the function's uintptr arguments may be pointer values that +have been converted to uintptr and must be kept alive for the duration of the +call, even though from the types alone it would appear that the object is no +longer needed during the call. + +This directive is similar to //go:uintptrescapes, but it does not force +arguments to escape. Since stack growth does not understand these arguments, +this directive must be used with //go:nosplit (in the marked function and all +transitive calls) to prevent stack growth. + +The conversion from pointer to uintptr must appear in the argument list of any +call to this function. This directive is used for some low-level system call +implementations. diff --git a/platform/dbops/binaries/go/go/src/runtime/Makefile b/platform/dbops/binaries/go/go/src/runtime/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..55087def044236037c11981b7cea4c60f89e079a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/Makefile @@ -0,0 +1,5 @@ +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +include ../Make.dist diff --git a/platform/dbops/binaries/go/go/src/runtime/abi_test.go b/platform/dbops/binaries/go/go/src/runtime/abi_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d7039e758a4bb503d54c94b0da9ab5f0b28858ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/abi_test.go @@ -0,0 +1,112 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.regabiargs + +// This file contains tests specific to making sure the register ABI +// works in a bunch of contexts in the runtime. + +package runtime_test + +import ( + "internal/abi" + "internal/testenv" + "os" + "os/exec" + "runtime" + "strings" + "testing" + "time" +) + +var regConfirmRun chan int + +//go:registerparams +func regFinalizerPointer(v *Tint) (int, float32, [10]byte) { + regConfirmRun <- *(*int)(v) + return 5151, 4.0, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +} + +//go:registerparams +func regFinalizerIface(v Tinter) (int, float32, [10]byte) { + regConfirmRun <- *(*int)(v.(*Tint)) + return 5151, 4.0, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +} + +func TestFinalizerRegisterABI(t *testing.T) { + testenv.MustHaveExec(t) + + // Actually run the test in a subprocess because we don't want + // finalizers from other tests interfering. + if os.Getenv("TEST_FINALIZER_REGABI") != "1" { + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestFinalizerRegisterABI$", "-test.v")) + cmd.Env = append(cmd.Env, "TEST_FINALIZER_REGABI=1") + out, err := cmd.CombinedOutput() + if !strings.Contains(string(out), "PASS\n") || err != nil { + t.Fatalf("%s\n(exit status %v)", string(out), err) + } + return + } + + // Optimistically clear any latent finalizers from e.g. the testing + // package before continuing. + // + // It's possible that a finalizer only becomes available to run + // after this point, which would interfere with the test and could + // cause a crash, but because we're running in a separate process + // it's extremely unlikely. + runtime.GC() + runtime.GC() + + // fing will only pick the new IntRegArgs up if it's currently + // sleeping and wakes up, so wait for it to go to sleep. + success := false + for i := 0; i < 100; i++ { + if runtime.FinalizerGAsleep() { + success = true + break + } + time.Sleep(20 * time.Millisecond) + } + if !success { + t.Fatal("finalizer not asleep?") + } + + argRegsBefore := runtime.SetIntArgRegs(abi.IntArgRegs) + defer runtime.SetIntArgRegs(argRegsBefore) + + tests := []struct { + name string + fin any + confirmValue int + }{ + {"Pointer", regFinalizerPointer, -1}, + {"Interface", regFinalizerIface, -2}, + } + for i := range tests { + test := &tests[i] + t.Run(test.name, func(t *testing.T) { + regConfirmRun = make(chan int) + + x := new(Tint) + *x = (Tint)(test.confirmValue) + runtime.SetFinalizer(x, test.fin) + + runtime.KeepAlive(x) + + // Queue the finalizer. + runtime.GC() + runtime.GC() + + select { + case <-time.After(time.Second): + t.Fatal("finalizer failed to execute") + case gotVal := <-regConfirmRun: + if gotVal != test.confirmValue { + t.Fatalf("wrong finalizer executed? got %d, want %d", gotVal, test.confirmValue) + } + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/alg.go b/platform/dbops/binaries/go/go/src/runtime/alg.go new file mode 100644 index 0000000000000000000000000000000000000000..ef4f859c231eca172f66d59d771600c8c4a660f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/alg.go @@ -0,0 +1,423 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/cpu" + "internal/goarch" + "unsafe" +) + +const ( + c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289) + c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503) +) + +func memhash0(p unsafe.Pointer, h uintptr) uintptr { + return h +} + +func memhash8(p unsafe.Pointer, h uintptr) uintptr { + return memhash(p, h, 1) +} + +func memhash16(p unsafe.Pointer, h uintptr) uintptr { + return memhash(p, h, 2) +} + +func memhash128(p unsafe.Pointer, h uintptr) uintptr { + return memhash(p, h, 16) +} + +//go:nosplit +func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr { + ptr := getclosureptr() + size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h))) + return memhash(p, h, size) +} + +// runtime variable to check if the processor we're running on +// actually supports the instructions used by the AES-based +// hash implementation. +var useAeshash bool + +// in asm_*.s +func memhash(p unsafe.Pointer, h, s uintptr) uintptr +func memhash32(p unsafe.Pointer, h uintptr) uintptr +func memhash64(p unsafe.Pointer, h uintptr) uintptr +func strhash(p unsafe.Pointer, h uintptr) uintptr + +func strhashFallback(a unsafe.Pointer, h uintptr) uintptr { + x := (*stringStruct)(a) + return memhashFallback(x.str, h, uintptr(x.len)) +} + +// NOTE: Because NaN != NaN, a map can contain any +// number of (mostly useless) entries keyed with NaNs. +// To avoid long hash chains, we assign a random number +// as the hash value for a NaN. + +func f32hash(p unsafe.Pointer, h uintptr) uintptr { + f := *(*float32)(p) + switch { + case f == 0: + return c1 * (c0 ^ h) // +0, -0 + case f != f: + return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN + default: + return memhash(p, h, 4) + } +} + +func f64hash(p unsafe.Pointer, h uintptr) uintptr { + f := *(*float64)(p) + switch { + case f == 0: + return c1 * (c0 ^ h) // +0, -0 + case f != f: + return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN + default: + return memhash(p, h, 8) + } +} + +func c64hash(p unsafe.Pointer, h uintptr) uintptr { + x := (*[2]float32)(p) + return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h)) +} + +func c128hash(p unsafe.Pointer, h uintptr) uintptr { + x := (*[2]float64)(p) + return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h)) +} + +func interhash(p unsafe.Pointer, h uintptr) uintptr { + a := (*iface)(p) + tab := a.tab + if tab == nil { + return h + } + t := tab._type + if t.Equal == nil { + // Check hashability here. We could do this check inside + // typehash, but we want to report the topmost type in + // the error text (e.g. in a struct with a field of slice type + // we want to report the struct, not the slice). + panic(errorString("hash of unhashable type " + toRType(t).string())) + } + if isDirectIface(t) { + return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) + } else { + return c1 * typehash(t, a.data, h^c0) + } +} + +func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { + a := (*eface)(p) + t := a._type + if t == nil { + return h + } + if t.Equal == nil { + // See comment in interhash above. + panic(errorString("hash of unhashable type " + toRType(t).string())) + } + if isDirectIface(t) { + return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) + } else { + return c1 * typehash(t, a.data, h^c0) + } +} + +// typehash computes the hash of the object of type t at address p. +// h is the seed. +// This function is seldom used. Most maps use for hashing either +// fixed functions (e.g. f32hash) or compiler-generated functions +// (e.g. for a type like struct { x, y string }). This implementation +// is slower but more general and is used for hashing interface types +// (called from interhash or nilinterhash, above) or for hashing in +// maps generated by reflect.MapOf (reflect_typehash, below). +// Note: this function must match the compiler generated +// functions exactly. See issue 37716. +func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { + if t.TFlag&abi.TFlagRegularMemory != 0 { + // Handle ptr sizes specially, see issue 37086. + switch t.Size_ { + case 4: + return memhash32(p, h) + case 8: + return memhash64(p, h) + default: + return memhash(p, h, t.Size_) + } + } + switch t.Kind_ & kindMask { + case kindFloat32: + return f32hash(p, h) + case kindFloat64: + return f64hash(p, h) + case kindComplex64: + return c64hash(p, h) + case kindComplex128: + return c128hash(p, h) + case kindString: + return strhash(p, h) + case kindInterface: + i := (*interfacetype)(unsafe.Pointer(t)) + if len(i.Methods) == 0 { + return nilinterhash(p, h) + } + return interhash(p, h) + case kindArray: + a := (*arraytype)(unsafe.Pointer(t)) + for i := uintptr(0); i < a.Len; i++ { + h = typehash(a.Elem, add(p, i*a.Elem.Size_), h) + } + return h + case kindStruct: + s := (*structtype)(unsafe.Pointer(t)) + for _, f := range s.Fields { + if f.Name.IsBlank() { + continue + } + h = typehash(f.Typ, add(p, f.Offset), h) + } + return h + default: + // Should never happen, as typehash should only be called + // with comparable types. + panic(errorString("hash of unhashable type " + toRType(t).string())) + } +} + +func mapKeyError(t *maptype, p unsafe.Pointer) error { + if !t.HashMightPanic() { + return nil + } + return mapKeyError2(t.Key, p) +} + +func mapKeyError2(t *_type, p unsafe.Pointer) error { + if t.TFlag&abi.TFlagRegularMemory != 0 { + return nil + } + switch t.Kind_ & kindMask { + case kindFloat32, kindFloat64, kindComplex64, kindComplex128, kindString: + return nil + case kindInterface: + i := (*interfacetype)(unsafe.Pointer(t)) + var t *_type + var pdata *unsafe.Pointer + if len(i.Methods) == 0 { + a := (*eface)(p) + t = a._type + if t == nil { + return nil + } + pdata = &a.data + } else { + a := (*iface)(p) + if a.tab == nil { + return nil + } + t = a.tab._type + pdata = &a.data + } + + if t.Equal == nil { + return errorString("hash of unhashable type " + toRType(t).string()) + } + + if isDirectIface(t) { + return mapKeyError2(t, unsafe.Pointer(pdata)) + } else { + return mapKeyError2(t, *pdata) + } + case kindArray: + a := (*arraytype)(unsafe.Pointer(t)) + for i := uintptr(0); i < a.Len; i++ { + if err := mapKeyError2(a.Elem, add(p, i*a.Elem.Size_)); err != nil { + return err + } + } + return nil + case kindStruct: + s := (*structtype)(unsafe.Pointer(t)) + for _, f := range s.Fields { + if f.Name.IsBlank() { + continue + } + if err := mapKeyError2(f.Typ, add(p, f.Offset)); err != nil { + return err + } + } + return nil + default: + // Should never happen, keep this case for robustness. + return errorString("hash of unhashable type " + toRType(t).string()) + } +} + +//go:linkname reflect_typehash reflect.typehash +func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { + return typehash(t, p, h) +} + +func memequal0(p, q unsafe.Pointer) bool { + return true +} +func memequal8(p, q unsafe.Pointer) bool { + return *(*int8)(p) == *(*int8)(q) +} +func memequal16(p, q unsafe.Pointer) bool { + return *(*int16)(p) == *(*int16)(q) +} +func memequal32(p, q unsafe.Pointer) bool { + return *(*int32)(p) == *(*int32)(q) +} +func memequal64(p, q unsafe.Pointer) bool { + return *(*int64)(p) == *(*int64)(q) +} +func memequal128(p, q unsafe.Pointer) bool { + return *(*[2]int64)(p) == *(*[2]int64)(q) +} +func f32equal(p, q unsafe.Pointer) bool { + return *(*float32)(p) == *(*float32)(q) +} +func f64equal(p, q unsafe.Pointer) bool { + return *(*float64)(p) == *(*float64)(q) +} +func c64equal(p, q unsafe.Pointer) bool { + return *(*complex64)(p) == *(*complex64)(q) +} +func c128equal(p, q unsafe.Pointer) bool { + return *(*complex128)(p) == *(*complex128)(q) +} +func strequal(p, q unsafe.Pointer) bool { + return *(*string)(p) == *(*string)(q) +} +func interequal(p, q unsafe.Pointer) bool { + x := *(*iface)(p) + y := *(*iface)(q) + return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data) +} +func nilinterequal(p, q unsafe.Pointer) bool { + x := *(*eface)(p) + y := *(*eface)(q) + return x._type == y._type && efaceeq(x._type, x.data, y.data) +} +func efaceeq(t *_type, x, y unsafe.Pointer) bool { + if t == nil { + return true + } + eq := t.Equal + if eq == nil { + panic(errorString("comparing uncomparable type " + toRType(t).string())) + } + if isDirectIface(t) { + // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof. + // Maps and funcs are not comparable, so they can't reach here. + // Ptrs, chans, and single-element items can be compared directly using ==. + return x == y + } + return eq(x, y) +} +func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { + if tab == nil { + return true + } + t := tab._type + eq := t.Equal + if eq == nil { + panic(errorString("comparing uncomparable type " + toRType(t).string())) + } + if isDirectIface(t) { + // See comment in efaceeq. + return x == y + } + return eq(x, y) +} + +// Testing adapters for hash quality tests (see hash_test.go) +func stringHash(s string, seed uintptr) uintptr { + return strhash(noescape(unsafe.Pointer(&s)), seed) +} + +func bytesHash(b []byte, seed uintptr) uintptr { + s := (*slice)(unsafe.Pointer(&b)) + return memhash(s.array, seed, uintptr(s.len)) +} + +func int32Hash(i uint32, seed uintptr) uintptr { + return memhash32(noescape(unsafe.Pointer(&i)), seed) +} + +func int64Hash(i uint64, seed uintptr) uintptr { + return memhash64(noescape(unsafe.Pointer(&i)), seed) +} + +func efaceHash(i any, seed uintptr) uintptr { + return nilinterhash(noescape(unsafe.Pointer(&i)), seed) +} + +func ifaceHash(i interface { + F() +}, seed uintptr) uintptr { + return interhash(noescape(unsafe.Pointer(&i)), seed) +} + +const hashRandomBytes = goarch.PtrSize / 4 * 64 + +// used in asm_{386,amd64,arm64}.s to seed the hash function +var aeskeysched [hashRandomBytes]byte + +// used in hash{32,64}.go to seed the hash function +var hashkey [4]uintptr + +func alginit() { + // Install AES hash algorithms if the instructions needed are present. + if (GOARCH == "386" || GOARCH == "amd64") && + cpu.X86.HasAES && // AESENC + cpu.X86.HasSSSE3 && // PSHUFB + cpu.X86.HasSSE41 { // PINSR{D,Q} + initAlgAES() + return + } + if GOARCH == "arm64" && cpu.ARM64.HasAES { + initAlgAES() + return + } + for i := range hashkey { + hashkey[i] = uintptr(bootstrapRand()) | 1 // make sure these numbers are odd + } +} + +func initAlgAES() { + useAeshash = true + // Initialize with random data so hash collisions will be hard to engineer. + key := (*[hashRandomBytes / 8]uint64)(unsafe.Pointer(&aeskeysched)) + for i := range key { + key[i] = bootstrapRand() + } +} + +// Note: These routines perform the read with a native endianness. +func readUnaligned32(p unsafe.Pointer) uint32 { + q := (*[4]byte)(p) + if goarch.BigEndian { + return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24 + } + return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24 +} + +func readUnaligned64(p unsafe.Pointer) uint64 { + q := (*[8]byte)(p) + if goarch.BigEndian { + return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 | + uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56 + } + return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/align_runtime_test.go b/platform/dbops/binaries/go/go/src/runtime/align_runtime_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d78b0b2d39b26150f96658311a14ceb2ab786f20 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/align_runtime_test.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file lives in the runtime package +// so we can get access to the runtime guts. +// The rest of the implementation of this test is in align_test.go. + +package runtime + +import "unsafe" + +// AtomicFields is the set of fields on which we perform 64-bit atomic +// operations (all the *64 operations in runtime/internal/atomic). +var AtomicFields = []uintptr{ + unsafe.Offsetof(m{}.procid), + unsafe.Offsetof(p{}.gcFractionalMarkTime), + unsafe.Offsetof(profBuf{}.overflow), + unsafe.Offsetof(profBuf{}.overflowTime), + unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount), + unsafe.Offsetof(heapStatsDelta{}.smallAllocCount), + unsafe.Offsetof(heapStatsDelta{}.smallFreeCount), + unsafe.Offsetof(heapStatsDelta{}.largeAlloc), + unsafe.Offsetof(heapStatsDelta{}.largeAllocCount), + unsafe.Offsetof(heapStatsDelta{}.largeFree), + unsafe.Offsetof(heapStatsDelta{}.largeFreeCount), + unsafe.Offsetof(heapStatsDelta{}.committed), + unsafe.Offsetof(heapStatsDelta{}.released), + unsafe.Offsetof(heapStatsDelta{}.inHeap), + unsafe.Offsetof(heapStatsDelta{}.inStacks), + unsafe.Offsetof(heapStatsDelta{}.inPtrScalarBits), + unsafe.Offsetof(heapStatsDelta{}.inWorkBufs), + unsafe.Offsetof(lfnode{}.next), + unsafe.Offsetof(mstats{}.last_gc_nanotime), + unsafe.Offsetof(mstats{}.last_gc_unix), + unsafe.Offsetof(workType{}.bytesMarked), +} + +// AtomicVariables is the set of global variables on which we perform +// 64-bit atomic operations. +var AtomicVariables = []unsafe.Pointer{ + unsafe.Pointer(&ncgocall), + unsafe.Pointer(&test_z64), + unsafe.Pointer(&blockprofilerate), + unsafe.Pointer(&mutexprofilerate), + unsafe.Pointer(&gcController), + unsafe.Pointer(&memstats), + unsafe.Pointer(&sched), + unsafe.Pointer(&ticks), + unsafe.Pointer(&work), +} diff --git a/platform/dbops/binaries/go/go/src/runtime/align_test.go b/platform/dbops/binaries/go/go/src/runtime/align_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2bad5b141c638fcb7b05b1fbe75cc7af29b34223 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/align_test.go @@ -0,0 +1,200 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/printer" + "go/token" + "go/types" + "internal/testenv" + "os" + "regexp" + "runtime" + "strings" + "testing" +) + +// Check that 64-bit fields on which we apply atomic operations +// are aligned to 8 bytes. This can be a problem on 32-bit systems. +func TestAtomicAlignment(t *testing.T) { + testenv.MustHaveGoBuild(t) // go command needed to resolve std .a files for importer.Default(). + + // Read the code making the tables above, to see which fields and + // variables we are currently checking. + checked := map[string]bool{} + x, err := os.ReadFile("./align_runtime_test.go") + if err != nil { + t.Fatalf("read failed: %v", err) + } + fieldDesc := map[int]string{} + r := regexp.MustCompile(`unsafe[.]Offsetof[(](\w+){}[.](\w+)[)]`) + matches := r.FindAllStringSubmatch(string(x), -1) + for i, v := range matches { + checked["field runtime."+v[1]+"."+v[2]] = true + fieldDesc[i] = v[1] + "." + v[2] + } + varDesc := map[int]string{} + r = regexp.MustCompile(`unsafe[.]Pointer[(]&(\w+)[)]`) + matches = r.FindAllStringSubmatch(string(x), -1) + for i, v := range matches { + checked["var "+v[1]] = true + varDesc[i] = v[1] + } + + // Check all of our alignments. This is the actual core of the test. + for i, d := range runtime.AtomicFields { + if d%8 != 0 { + t.Errorf("field alignment of %s failed: offset is %d", fieldDesc[i], d) + } + } + for i, p := range runtime.AtomicVariables { + if uintptr(p)%8 != 0 { + t.Errorf("variable alignment of %s failed: address is %x", varDesc[i], p) + } + } + + // The code above is the actual test. The code below attempts to check + // that the tables used by the code above are exhaustive. + + // Parse the whole runtime package, checking that arguments of + // appropriate atomic operations are in the list above. + fset := token.NewFileSet() + m, err := parser.ParseDir(fset, ".", nil, 0) + if err != nil { + t.Fatalf("parsing runtime failed: %v", err) + } + pkg := m["runtime"] // Note: ignore runtime_test and main packages + + // Filter files by those for the current architecture/os being tested. + fileMap := map[string]bool{} + for _, f := range buildableFiles(t, ".") { + fileMap[f] = true + } + var files []*ast.File + for fname, f := range pkg.Files { + if fileMap[fname] { + files = append(files, f) + } + } + + // Call go/types to analyze the runtime package. + var info types.Info + info.Types = map[ast.Expr]types.TypeAndValue{} + conf := types.Config{Importer: importer.Default()} + _, err = conf.Check("runtime", fset, files, &info) + if err != nil { + t.Fatalf("typechecking runtime failed: %v", err) + } + + // Analyze all atomic.*64 callsites. + v := Visitor{t: t, fset: fset, types: info.Types, checked: checked} + ast.Walk(&v, pkg) +} + +type Visitor struct { + fset *token.FileSet + types map[ast.Expr]types.TypeAndValue + checked map[string]bool + t *testing.T +} + +func (v *Visitor) Visit(n ast.Node) ast.Visitor { + c, ok := n.(*ast.CallExpr) + if !ok { + return v + } + f, ok := c.Fun.(*ast.SelectorExpr) + if !ok { + return v + } + p, ok := f.X.(*ast.Ident) + if !ok { + return v + } + if p.Name != "atomic" { + return v + } + if !strings.HasSuffix(f.Sel.Name, "64") { + return v + } + + a := c.Args[0] + + // This is a call to atomic.XXX64(a, ...). Make sure a is aligned to 8 bytes. + // XXX = one of Load, Store, Cas, etc. + // The arg we care about the alignment of is always the first one. + + if u, ok := a.(*ast.UnaryExpr); ok && u.Op == token.AND { + v.checkAddr(u.X) + return v + } + + // Other cases there's nothing we can check. Assume we're ok. + v.t.Logf("unchecked atomic operation %s %v", v.fset.Position(n.Pos()), v.print(n)) + + return v +} + +// checkAddr checks to make sure n is a properly aligned address for a 64-bit atomic operation. +func (v *Visitor) checkAddr(n ast.Node) { + switch n := n.(type) { + case *ast.IndexExpr: + // Alignment of an array element is the same as the whole array. + v.checkAddr(n.X) + return + case *ast.Ident: + key := "var " + v.print(n) + if !v.checked[key] { + v.t.Errorf("unchecked variable %s %s", v.fset.Position(n.Pos()), key) + } + return + case *ast.SelectorExpr: + t := v.types[n.X].Type + if t == nil { + // Not sure what is happening here, go/types fails to + // type the selector arg on some platforms. + return + } + if p, ok := t.(*types.Pointer); ok { + // Note: we assume here that the pointer p in p.foo is properly + // aligned. We just check that foo is at a properly aligned offset. + t = p.Elem() + } else { + v.checkAddr(n.X) + } + if t.Underlying() == t { + v.t.Errorf("analysis can't handle unnamed type %s %v", v.fset.Position(n.Pos()), t) + } + key := "field " + t.String() + "." + n.Sel.Name + if !v.checked[key] { + v.t.Errorf("unchecked field %s %s", v.fset.Position(n.Pos()), key) + } + default: + v.t.Errorf("unchecked atomic address %s %v", v.fset.Position(n.Pos()), v.print(n)) + + } +} + +func (v *Visitor) print(n ast.Node) string { + var b strings.Builder + printer.Fprint(&b, v.fset, n) + return b.String() +} + +// buildableFiles returns the list of files in the given directory +// that are actually used for the build, given GOOS/GOARCH restrictions. +func buildableFiles(t *testing.T, dir string) []string { + ctxt := build.Default + ctxt.CgoEnabled = true + pkg, err := ctxt.ImportDir(dir, 0) + if err != nil { + t.Fatalf("can't find buildable files: %v", err) + } + return pkg.GoFiles +} diff --git a/platform/dbops/binaries/go/go/src/runtime/arena.go b/platform/dbops/binaries/go/go/src/runtime/arena.go new file mode 100644 index 0000000000000000000000000000000000000000..e943817ceeabf7830f3582656b793eeb986fc134 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/arena.go @@ -0,0 +1,935 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Implementation of (safe) user arenas. +// +// This file contains the implementation of user arenas wherein Go values can +// be manually allocated and freed in bulk. The act of manually freeing memory, +// potentially before a GC cycle, means that a garbage collection cycle can be +// delayed, improving efficiency by reducing GC cycle frequency. There are other +// potential efficiency benefits, such as improved locality and access to a more +// efficient allocation strategy. +// +// What makes the arenas here safe is that once they are freed, accessing the +// arena's memory will cause an explicit program fault, and the arena's address +// space will not be reused until no more pointers into it are found. There's one +// exception to this: if an arena allocated memory that isn't exhausted, it's placed +// back into a pool for reuse. This means that a crash is not always guaranteed. +// +// While this may seem unsafe, it still prevents memory corruption, and is in fact +// necessary in order to make new(T) a valid implementation of arenas. Such a property +// is desirable to allow for a trivial implementation. (It also avoids complexities +// that arise from synchronization with the GC when trying to set the arena chunks to +// fault while the GC is active.) +// +// The implementation works in layers. At the bottom, arenas are managed in chunks. +// Each chunk must be a multiple of the heap arena size, or the heap arena size must +// be divisible by the arena chunks. The address space for each chunk, and each +// corresponding heapArena for that address space, are eternally reserved for use as +// arena chunks. That is, they can never be used for the general heap. Each chunk +// is also represented by a single mspan, and is modeled as a single large heap +// allocation. It must be, because each chunk contains ordinary Go values that may +// point into the heap, so it must be scanned just like any other object. Any +// pointer into a chunk will therefore always cause the whole chunk to be scanned +// while its corresponding arena is still live. +// +// Chunks may be allocated either from new memory mapped by the OS on our behalf, +// or by reusing old freed chunks. When chunks are freed, their underlying memory +// is returned to the OS, set to fault on access, and may not be reused until the +// program doesn't point into the chunk anymore (the code refers to this state as +// "quarantined"), a property checked by the GC. +// +// The sweeper handles moving chunks out of this quarantine state to be ready for +// reuse. When the chunk is placed into the quarantine state, its corresponding +// span is marked as noscan so that the GC doesn't try to scan memory that would +// cause a fault. +// +// At the next layer are the user arenas themselves. They consist of a single +// active chunk which new Go values are bump-allocated into and a list of chunks +// that were exhausted when allocating into the arena. Once the arena is freed, +// it frees all full chunks it references, and places the active one onto a reuse +// list for a future arena to use. Each arena keeps its list of referenced chunks +// explicitly live until it is freed. Each user arena also maps to an object which +// has a finalizer attached that ensures the arena's chunks are all freed even if +// the arena itself is never explicitly freed. +// +// Pointer-ful memory is bump-allocated from low addresses to high addresses in each +// chunk, while pointer-free memory is bump-allocated from high address to low +// addresses. The reason for this is to take advantage of a GC optimization wherein +// the GC will stop scanning an object when there are no more pointers in it, which +// also allows us to elide clearing the heap bitmap for pointer-free Go values +// allocated into arenas. +// +// Note that arenas are not safe to use concurrently. +// +// In summary, there are 2 resources: arenas, and arena chunks. They exist in the +// following lifecycle: +// +// (1) A new arena is created via newArena. +// (2) Chunks are allocated to hold memory allocated into the arena with new or slice. +// (a) Chunks are first allocated from the reuse list of partially-used chunks. +// (b) If there are no such chunks, then chunks on the ready list are taken. +// (c) Failing all the above, memory for a new chunk is mapped. +// (3) The arena is freed, or all references to it are dropped, triggering its finalizer. +// (a) If the GC is not active, exhausted chunks are set to fault and placed on a +// quarantine list. +// (b) If the GC is active, exhausted chunks are placed on a fault list and will +// go through step (a) at a later point in time. +// (c) Any remaining partially-used chunk is placed on a reuse list. +// (4) Once no more pointers are found into quarantined arena chunks, the sweeper +// takes these chunks out of quarantine and places them on the ready list. + +package runtime + +import ( + "internal/goarch" + "internal/goexperiment" + "runtime/internal/atomic" + "runtime/internal/math" + "unsafe" +) + +// Functions starting with arena_ are meant to be exported to downstream users +// of arenas. They should wrap these functions in a higher-lever API. +// +// The underlying arena and its resources are managed through an opaque unsafe.Pointer. + +// arena_newArena is a wrapper around newUserArena. +// +//go:linkname arena_newArena arena.runtime_arena_newArena +func arena_newArena() unsafe.Pointer { + return unsafe.Pointer(newUserArena()) +} + +// arena_arena_New is a wrapper around (*userArena).new, except that typ +// is an any (must be a *_type, still) and typ must be a type descriptor +// for a pointer to the type to actually be allocated, i.e. pass a *T +// to allocate a T. This is necessary because this function returns a *T. +// +//go:linkname arena_arena_New arena.runtime_arena_arena_New +func arena_arena_New(arena unsafe.Pointer, typ any) any { + t := (*_type)(efaceOf(&typ).data) + if t.Kind_&kindMask != kindPtr { + throw("arena_New: non-pointer type") + } + te := (*ptrtype)(unsafe.Pointer(t)).Elem + x := ((*userArena)(arena)).new(te) + var result any + e := efaceOf(&result) + e._type = t + e.data = x + return result +} + +// arena_arena_Slice is a wrapper around (*userArena).slice. +// +//go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice +func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int) { + ((*userArena)(arena)).slice(slice, cap) +} + +// arena_arena_Free is a wrapper around (*userArena).free. +// +//go:linkname arena_arena_Free arena.runtime_arena_arena_Free +func arena_arena_Free(arena unsafe.Pointer) { + ((*userArena)(arena)).free() +} + +// arena_heapify takes a value that lives in an arena and makes a copy +// of it on the heap. Values that don't live in an arena are returned unmodified. +// +//go:linkname arena_heapify arena.runtime_arena_heapify +func arena_heapify(s any) any { + var v unsafe.Pointer + e := efaceOf(&s) + t := e._type + switch t.Kind_ & kindMask { + case kindString: + v = stringStructOf((*string)(e.data)).str + case kindSlice: + v = (*slice)(e.data).array + case kindPtr: + v = e.data + default: + panic("arena: Clone only supports pointers, slices, and strings") + } + span := spanOf(uintptr(v)) + if span == nil || !span.isUserArenaChunk { + // Not stored in a user arena chunk. + return s + } + // Heap-allocate storage for a copy. + var x any + switch t.Kind_ & kindMask { + case kindString: + s1 := s.(string) + s2, b := rawstring(len(s1)) + copy(b, s1) + x = s2 + case kindSlice: + len := (*slice)(e.data).len + et := (*slicetype)(unsafe.Pointer(t)).Elem + sl := new(slice) + *sl = slice{makeslicecopy(et, len, len, (*slice)(e.data).array), len, len} + xe := efaceOf(&x) + xe._type = t + xe.data = unsafe.Pointer(sl) + case kindPtr: + et := (*ptrtype)(unsafe.Pointer(t)).Elem + e2 := newobject(et) + typedmemmove(et, e2, e.data) + xe := efaceOf(&x) + xe._type = t + xe.data = e2 + } + return x +} + +const ( + // userArenaChunkBytes is the size of a user arena chunk. + userArenaChunkBytesMax = 8 << 20 + userArenaChunkBytes = uintptr(int64(userArenaChunkBytesMax-heapArenaBytes)&(int64(userArenaChunkBytesMax-heapArenaBytes)>>63) + heapArenaBytes) // min(userArenaChunkBytesMax, heapArenaBytes) + + // userArenaChunkPages is the number of pages a user arena chunk uses. + userArenaChunkPages = userArenaChunkBytes / pageSize + + // userArenaChunkMaxAllocBytes is the maximum size of an object that can + // be allocated from an arena. This number is chosen to cap worst-case + // fragmentation of user arenas to 25%. Larger allocations are redirected + // to the heap. + userArenaChunkMaxAllocBytes = userArenaChunkBytes / 4 +) + +func init() { + if userArenaChunkPages*pageSize != userArenaChunkBytes { + throw("user arena chunk size is not a multiple of the page size") + } + if userArenaChunkBytes%physPageSize != 0 { + throw("user arena chunk size is not a multiple of the physical page size") + } + if userArenaChunkBytes < heapArenaBytes { + if heapArenaBytes%userArenaChunkBytes != 0 { + throw("user arena chunk size is smaller than a heap arena, but doesn't divide it") + } + } else { + if userArenaChunkBytes%heapArenaBytes != 0 { + throw("user arena chunks size is larger than a heap arena, but not a multiple") + } + } + lockInit(&userArenaState.lock, lockRankUserArenaState) +} + +// userArenaChunkReserveBytes returns the amount of additional bytes to reserve for +// heap metadata. +func userArenaChunkReserveBytes() uintptr { + if goexperiment.AllocHeaders { + // In the allocation headers experiment, we reserve the end of the chunk for + // a pointer/scalar bitmap. We also reserve space for a dummy _type that + // refers to the bitmap. The PtrBytes field of the dummy _type indicates how + // many of those bits are valid. + return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{}) + } + return 0 +} + +type userArena struct { + // full is a list of full chunks that have not enough free memory left, and + // that we'll free once this user arena is freed. + // + // Can't use mSpanList here because it's not-in-heap. + fullList *mspan + + // active is the user arena chunk we're currently allocating into. + active *mspan + + // refs is a set of references to the arena chunks so that they're kept alive. + // + // The last reference in the list always refers to active, while the rest of + // them correspond to fullList. Specifically, the head of fullList is the + // second-to-last one, fullList.next is the third-to-last, and so on. + // + // In other words, every time a new chunk becomes active, its appended to this + // list. + refs []unsafe.Pointer + + // defunct is true if free has been called on this arena. + // + // This is just a best-effort way to discover a concurrent allocation + // and free. Also used to detect a double-free. + defunct atomic.Bool +} + +// newUserArena creates a new userArena ready to be used. +func newUserArena() *userArena { + a := new(userArena) + SetFinalizer(a, func(a *userArena) { + // If arena handle is dropped without being freed, then call + // free on the arena, so the arena chunks are never reclaimed + // by the garbage collector. + a.free() + }) + a.refill() + return a +} + +// new allocates a new object of the provided type into the arena, and returns +// its pointer. +// +// This operation is not safe to call concurrently with other operations on the +// same arena. +func (a *userArena) new(typ *_type) unsafe.Pointer { + return a.alloc(typ, -1) +} + +// slice allocates a new slice backing store. slice must be a pointer to a slice +// (i.e. *[]T), because userArenaSlice will update the slice directly. +// +// cap determines the capacity of the slice backing store and must be non-negative. +// +// This operation is not safe to call concurrently with other operations on the +// same arena. +func (a *userArena) slice(sl any, cap int) { + if cap < 0 { + panic("userArena.slice: negative cap") + } + i := efaceOf(&sl) + typ := i._type + if typ.Kind_&kindMask != kindPtr { + panic("slice result of non-ptr type") + } + typ = (*ptrtype)(unsafe.Pointer(typ)).Elem + if typ.Kind_&kindMask != kindSlice { + panic("slice of non-ptr-to-slice type") + } + typ = (*slicetype)(unsafe.Pointer(typ)).Elem + // t is now the element type of the slice we want to allocate. + + *((*slice)(i.data)) = slice{a.alloc(typ, cap), cap, cap} +} + +// free returns the userArena's chunks back to mheap and marks it as defunct. +// +// Must be called at most once for any given arena. +// +// This operation is not safe to call concurrently with other operations on the +// same arena. +func (a *userArena) free() { + // Check for a double-free. + if a.defunct.Load() { + panic("arena double free") + } + + // Mark ourselves as defunct. + a.defunct.Store(true) + SetFinalizer(a, nil) + + // Free all the full arenas. + // + // The refs on this list are in reverse order from the second-to-last. + s := a.fullList + i := len(a.refs) - 2 + for s != nil { + a.fullList = s.next + s.next = nil + freeUserArenaChunk(s, a.refs[i]) + s = a.fullList + i-- + } + if a.fullList != nil || i >= 0 { + // There's still something left on the full list, or we + // failed to actually iterate over the entire refs list. + throw("full list doesn't match refs list in length") + } + + // Put the active chunk onto the reuse list. + // + // Note that active's reference is always the last reference in refs. + s = a.active + if s != nil { + if raceenabled || msanenabled || asanenabled { + // Don't reuse arenas with sanitizers enabled. We want to catch + // any use-after-free errors aggressively. + freeUserArenaChunk(s, a.refs[len(a.refs)-1]) + } else { + lock(&userArenaState.lock) + userArenaState.reuse = append(userArenaState.reuse, liveUserArenaChunk{s, a.refs[len(a.refs)-1]}) + unlock(&userArenaState.lock) + } + } + // nil out a.active so that a race with freeing will more likely cause a crash. + a.active = nil + a.refs = nil +} + +// alloc reserves space in the current chunk or calls refill and reserves space +// in a new chunk. If cap is negative, the type will be taken literally, otherwise +// it will be considered as an element type for a slice backing store with capacity +// cap. +func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer { + s := a.active + var x unsafe.Pointer + for { + x = s.userArenaNextFree(typ, cap) + if x != nil { + break + } + s = a.refill() + } + return x +} + +// refill inserts the current arena chunk onto the full list and obtains a new +// one, either from the partial list or allocating a new one, both from mheap. +func (a *userArena) refill() *mspan { + // If there's an active chunk, assume it's full. + s := a.active + if s != nil { + if s.userArenaChunkFree.size() > userArenaChunkMaxAllocBytes { + // It's difficult to tell when we're actually out of memory + // in a chunk because the allocation that failed may still leave + // some free space available. However, that amount of free space + // should never exceed the maximum allocation size. + throw("wasted too much memory in an arena chunk") + } + s.next = a.fullList + a.fullList = s + a.active = nil + s = nil + } + var x unsafe.Pointer + + // Check the partially-used list. + lock(&userArenaState.lock) + if len(userArenaState.reuse) > 0 { + // Pick off the last arena chunk from the list. + n := len(userArenaState.reuse) - 1 + x = userArenaState.reuse[n].x + s = userArenaState.reuse[n].mspan + userArenaState.reuse[n].x = nil + userArenaState.reuse[n].mspan = nil + userArenaState.reuse = userArenaState.reuse[:n] + } + unlock(&userArenaState.lock) + if s == nil { + // Allocate a new one. + x, s = newUserArenaChunk() + if s == nil { + throw("out of memory") + } + } + a.refs = append(a.refs, x) + a.active = s + return s +} + +type liveUserArenaChunk struct { + *mspan // Must represent a user arena chunk. + + // Reference to mspan.base() to keep the chunk alive. + x unsafe.Pointer +} + +var userArenaState struct { + lock mutex + + // reuse contains a list of partially-used and already-live + // user arena chunks that can be quickly reused for another + // arena. + // + // Protected by lock. + reuse []liveUserArenaChunk + + // fault contains full user arena chunks that need to be faulted. + // + // Protected by lock. + fault []liveUserArenaChunk +} + +// userArenaNextFree reserves space in the user arena for an item of the specified +// type. If cap is not -1, this is for an array of cap elements of type t. +func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { + size := typ.Size_ + if cap > 0 { + if size > ^uintptr(0)/uintptr(cap) { + // Overflow. + throw("out of memory") + } + size *= uintptr(cap) + } + if size == 0 || cap == 0 { + return unsafe.Pointer(&zerobase) + } + if size > userArenaChunkMaxAllocBytes { + // Redirect allocations that don't fit into a chunk well directly + // from the heap. + if cap >= 0 { + return newarray(typ, cap) + } + return newobject(typ) + } + + // Prevent preemption as we set up the space for a new object. + // + // Act like we're allocating. + mp := acquirem() + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + mp.mallocing = 1 + + var ptr unsafe.Pointer + if typ.PtrBytes == 0 { + // Allocate pointer-less objects from the tail end of the chunk. + v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_) + if ok { + ptr = unsafe.Pointer(v) + } + } else { + v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_) + if ok { + ptr = unsafe.Pointer(v) + } + } + if ptr == nil { + // Failed to allocate. + mp.mallocing = 0 + releasem(mp) + return nil + } + if s.needzero != 0 { + throw("arena chunk needs zeroing, but should already be zeroed") + } + // Set up heap bitmap and do extra accounting. + if typ.PtrBytes != 0 { + if cap >= 0 { + userArenaHeapBitsSetSliceType(typ, cap, ptr, s) + } else { + userArenaHeapBitsSetType(typ, ptr, s) + } + c := getMCache(mp) + if c == nil { + throw("mallocgc called without a P or outside bootstrapping") + } + if cap > 0 { + c.scanAlloc += size - (typ.Size_ - typ.PtrBytes) + } else { + c.scanAlloc += typ.PtrBytes + } + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make ptr observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + mp.mallocing = 0 + releasem(mp) + + return ptr +} + +// userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for +// Go slice backing store values allocated in a user arena chunk. It sets up the +// heap bitmap for n consecutive values with type typ allocated at address ptr. +func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) { + mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) + if overflow || n < 0 || mem > maxAlloc { + panic(plainError("runtime: allocation size out of range")) + } + for i := 0; i < n; i++ { + userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s) + } +} + +// newUserArenaChunk allocates a user arena chunk, which maps to a single +// heap arena and single span. Returns a pointer to the base of the chunk +// (this is really important: we need to keep the chunk alive) and the span. +func newUserArenaChunk() (unsafe.Pointer, *mspan) { + if gcphase == _GCmarktermination { + throw("newUserArenaChunk called with gcphase == _GCmarktermination") + } + + // Deduct assist credit. Because user arena chunks are modeled as one + // giant heap object which counts toward heapLive, we're obligated to + // assist the GC proportionally (and it's worth noting that the arena + // does represent additional work for the GC, but we also have no idea + // what that looks like until we actually allocate things into the + // arena). + deductAssistCredit(userArenaChunkBytes) + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + mp.mallocing = 1 + + // Allocate a new user arena. + var span *mspan + systemstack(func() { + span = mheap_.allocUserArenaChunk() + }) + if span == nil { + throw("out of memory") + } + x := unsafe.Pointer(span.base()) + + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + if gcphase != _GCoff { + gcmarknewobject(span, span.base()) + } + + if raceenabled { + // TODO(mknyszek): Track individual objects. + racemalloc(unsafe.Pointer(span.base()), span.elemsize) + } + + if msanenabled { + // TODO(mknyszek): Track individual objects. + msanmalloc(unsafe.Pointer(span.base()), span.elemsize) + } + + if asanenabled { + // TODO(mknyszek): Track individual objects. + rzSize := computeRZlog(span.elemsize) + span.elemsize -= rzSize + if goexperiment.AllocHeaders { + span.largeType.Size_ = span.elemsize + } + rzStart := span.base() + span.elemsize + span.userArenaChunkFree = makeAddrRange(span.base(), rzStart) + asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart) + asanunpoison(unsafe.Pointer(span.base()), span.elemsize) + } + + if rate := MemProfileRate; rate > 0 { + c := getMCache(mp) + if c == nil { + throw("newUserArenaChunk called without a P or outside bootstrapping") + } + // Note cache c only valid while m acquired; see #47302 + if rate != 1 && userArenaChunkBytes < c.nextSample { + c.nextSample -= userArenaChunkBytes + } else { + profilealloc(mp, unsafe.Pointer(span.base()), userArenaChunkBytes) + } + } + mp.mallocing = 0 + releasem(mp) + + // Again, because this chunk counts toward heapLive, potentially trigger a GC. + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + + if debug.malloc { + if debug.allocfreetrace != 0 { + tracealloc(unsafe.Pointer(span.base()), userArenaChunkBytes, nil) + } + + if inittrace.active && inittrace.id == getg().goid { + // Init functions are executed sequentially in a single goroutine. + inittrace.bytes += uint64(userArenaChunkBytes) + } + } + + // Double-check it's aligned to the physical page size. Based on the current + // implementation this is trivially true, but it need not be in the future. + // However, if it's not aligned to the physical page size then we can't properly + // set it to fault later. + if uintptr(x)%physPageSize != 0 { + throw("user arena chunk is not aligned to the physical page size") + } + + return x, span +} + +// isUnusedUserArenaChunk indicates that the arena chunk has been set to fault +// and doesn't contain any scannable memory anymore. However, it might still be +// mSpanInUse as it sits on the quarantine list, since it needs to be swept. +// +// This is not safe to execute unless the caller has ownership of the mspan or +// the world is stopped (preemption is prevented while the relevant state changes). +// +// This is really only meant to be used by accounting tests in the runtime to +// distinguish when a span shouldn't be counted (since mSpanInUse might not be +// enough). +func (s *mspan) isUnusedUserArenaChunk() bool { + return s.isUserArenaChunk && s.spanclass == makeSpanClass(0, true) +} + +// setUserArenaChunkToFault sets the address space for the user arena chunk to fault +// and releases any underlying memory resources. +// +// Must be in a non-preemptible state to ensure the consistency of statistics +// exported to MemStats. +func (s *mspan) setUserArenaChunkToFault() { + if !s.isUserArenaChunk { + throw("invalid span in heapArena for user arena") + } + if s.npages*pageSize != userArenaChunkBytes { + throw("span on userArena.faultList has invalid size") + } + + // Update the span class to be noscan. What we want to happen is that + // any pointer into the span keeps it from getting recycled, so we want + // the mark bit to get set, but we're about to set the address space to fault, + // so we have to prevent the GC from scanning this memory. + // + // It's OK to set it here because (1) a GC isn't in progress, so the scanning code + // won't make a bad decision, (2) we're currently non-preemptible and in the runtime, + // so a GC is blocked from starting. We might race with sweeping, which could + // put it on the "wrong" sweep list, but really don't care because the chunk is + // treated as a large object span and there's no meaningful difference between scan + // and noscan large objects in the sweeper. The STW at the start of the GC acts as a + // barrier for this update. + s.spanclass = makeSpanClass(0, true) + + // Actually set the arena chunk to fault, so we'll get dangling pointer errors. + // sysFault currently uses a method on each OS that forces it to evacuate all + // memory backing the chunk. + sysFault(unsafe.Pointer(s.base()), s.npages*pageSize) + + // Everything on the list is counted as in-use, however sysFault transitions to + // Reserved, not Prepared, so we skip updating heapFree or heapReleased and just + // remove the memory from the total altogether; it's just address space now. + gcController.heapInUse.add(-int64(s.npages * pageSize)) + + // Count this as a free of an object right now as opposed to when + // the span gets off the quarantine list. The main reason is so that the + // amount of bytes allocated doesn't exceed how much is counted as + // "mapped ready," which could cause a deadlock in the pacer. + gcController.totalFree.Add(int64(s.elemsize)) + + // Update consistent stats to match. + // + // We're non-preemptible, so it's safe to update consistent stats (our P + // won't change out from under us). + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize)) + atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize)) + atomic.Xadd64(&stats.largeFreeCount, 1) + atomic.Xadd64(&stats.largeFree, int64(s.elemsize)) + memstats.heapStats.release() + + // This counts as a free, so update heapLive. + gcController.update(-int64(s.elemsize), 0) + + // Mark it as free for the race detector. + if raceenabled { + racefree(unsafe.Pointer(s.base()), s.elemsize) + } + + systemstack(func() { + // Add the user arena to the quarantine list. + lock(&mheap_.lock) + mheap_.userArena.quarantineList.insert(s) + unlock(&mheap_.lock) + }) +} + +// inUserArenaChunk returns true if p points to a user arena chunk. +func inUserArenaChunk(p uintptr) bool { + s := spanOf(p) + if s == nil { + return false + } + return s.isUserArenaChunk +} + +// freeUserArenaChunk releases the user arena represented by s back to the runtime. +// +// x must be a live pointer within s. +// +// The runtime will set the user arena to fault once it's safe (the GC is no longer running) +// and then once the user arena is no longer referenced by the application, will allow it to +// be reused. +func freeUserArenaChunk(s *mspan, x unsafe.Pointer) { + if !s.isUserArenaChunk { + throw("span is not for a user arena") + } + if s.npages*pageSize != userArenaChunkBytes { + throw("invalid user arena span size") + } + + // Mark the region as free to various santizers immediately instead + // of handling them at sweep time. + if raceenabled { + racefree(unsafe.Pointer(s.base()), s.elemsize) + } + if msanenabled { + msanfree(unsafe.Pointer(s.base()), s.elemsize) + } + if asanenabled { + asanpoison(unsafe.Pointer(s.base()), s.elemsize) + } + + // Make ourselves non-preemptible as we manipulate state and statistics. + // + // Also required by setUserArenaChunksToFault. + mp := acquirem() + + // We can only set user arenas to fault if we're in the _GCoff phase. + if gcphase == _GCoff { + lock(&userArenaState.lock) + faultList := userArenaState.fault + userArenaState.fault = nil + unlock(&userArenaState.lock) + + s.setUserArenaChunkToFault() + for _, lc := range faultList { + lc.mspan.setUserArenaChunkToFault() + } + + // Until the chunks are set to fault, keep them alive via the fault list. + KeepAlive(x) + KeepAlive(faultList) + } else { + // Put the user arena on the fault list. + lock(&userArenaState.lock) + userArenaState.fault = append(userArenaState.fault, liveUserArenaChunk{s, x}) + unlock(&userArenaState.lock) + } + releasem(mp) +} + +// allocUserArenaChunk attempts to reuse a free user arena chunk represented +// as a span. +// +// Must be in a non-preemptible state to ensure the consistency of statistics +// exported to MemStats. +// +// Acquires the heap lock. Must run on the system stack for that reason. +// +//go:systemstack +func (h *mheap) allocUserArenaChunk() *mspan { + var s *mspan + var base uintptr + + // First check the free list. + lock(&h.lock) + if !h.userArena.readyList.isEmpty() { + s = h.userArena.readyList.first + h.userArena.readyList.remove(s) + base = s.base() + } else { + // Free list was empty, so allocate a new arena. + hintList := &h.userArena.arenaHints + if raceenabled { + // In race mode just use the regular heap hints. We might fragment + // the address space, but the race detector requires that the heap + // is mapped contiguously. + hintList = &h.arenaHints + } + v, size := h.sysAlloc(userArenaChunkBytes, hintList, false) + if size%userArenaChunkBytes != 0 { + throw("sysAlloc size is not divisible by userArenaChunkBytes") + } + if size > userArenaChunkBytes { + // We got more than we asked for. This can happen if + // heapArenaSize > userArenaChunkSize, or if sysAlloc just returns + // some extra as a result of trying to find an aligned region. + // + // Divide it up and put it on the ready list. + for i := userArenaChunkBytes; i < size; i += userArenaChunkBytes { + s := h.allocMSpanLocked() + s.init(uintptr(v)+i, userArenaChunkPages) + h.userArena.readyList.insertBack(s) + } + size = userArenaChunkBytes + } + base = uintptr(v) + if base == 0 { + // Out of memory. + unlock(&h.lock) + return nil + } + s = h.allocMSpanLocked() + } + unlock(&h.lock) + + // sysAlloc returns Reserved address space, and any span we're + // reusing is set to fault (so, also Reserved), so transition + // it to Prepared and then Ready. + // + // Unlike (*mheap).grow, just map in everything that we + // asked for. We're likely going to use it all. + sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased) + sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes) + + // Model the user arena as a heap span for a large object. + spc := makeSpanClass(0, false) + h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) + s.isUserArenaChunk = true + s.elemsize -= userArenaChunkReserveBytes() + s.limit = s.base() + s.elemsize + s.freeindex = 1 + s.allocCount = 1 + + // Account for this new arena chunk memory. + gcController.heapInUse.add(int64(userArenaChunkBytes)) + gcController.heapReleased.add(-int64(userArenaChunkBytes)) + + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.inHeap, int64(userArenaChunkBytes)) + atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes)) + + // Model the arena as a single large malloc. + atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize)) + atomic.Xadd64(&stats.largeAllocCount, 1) + memstats.heapStats.release() + + // Count the alloc in inconsistent, internal stats. + gcController.totalAlloc.Add(int64(s.elemsize)) + + // Update heapLive. + gcController.update(int64(s.elemsize), 0) + + // This must clear the entire heap bitmap so that it's safe + // to allocate noscan data without writing anything out. + s.initHeapBits(true) + + // Clear the span preemptively. It's an arena chunk, so let's assume + // everything is going to be used. + // + // This also seems to make a massive difference as to whether or + // not Linux decides to back this memory with transparent huge + // pages. There's latency involved in this zeroing, but the hugepage + // gains are almost always worth it. Note: it's important that we + // clear even if it's freshly mapped and we know there's no point + // to zeroing as *that* is the critical signal to use huge pages. + memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize) + s.needzero = 0 + + s.freeIndexForScan = 1 + + // Set up the range for allocation. + s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize) + + // Put the large span in the mcentral swept list so that it's + // visible to the background sweeper. + h.central[spc].mcentral.fullSwept(h.sweepgen).push(s) + + if goexperiment.AllocHeaders { + // Set up an allocation header. Avoid write barriers here because this type + // is not a real type, and it exists in an invalid location. + *(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit)) + *(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{}) + s.largeType.PtrBytes = 0 + s.largeType.Size_ = s.elemsize + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/runtime/arena_test.go b/platform/dbops/binaries/go/go/src/runtime/arena_test.go new file mode 100644 index 0000000000000000000000000000000000000000..018c42371256325d50ce86d86422ebf9986433a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/arena_test.go @@ -0,0 +1,526 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/goarch" + "reflect" + . "runtime" + "runtime/debug" + "runtime/internal/atomic" + "testing" + "time" + "unsafe" +) + +type smallScalar struct { + X uintptr +} +type smallPointer struct { + X *smallPointer +} +type smallPointerMix struct { + A *smallPointer + B byte + C *smallPointer + D [11]byte +} +type mediumScalarEven [8192]byte +type mediumScalarOdd [3321]byte +type mediumPointerEven [1024]*smallPointer +type mediumPointerOdd [1023]*smallPointer + +type largeScalar [UserArenaChunkBytes + 1]byte +type largePointer [UserArenaChunkBytes/unsafe.Sizeof(&smallPointer{}) + 1]*smallPointer + +func TestUserArena(t *testing.T) { + // Set GOMAXPROCS to 2 so we don't run too many of these + // tests in parallel. + defer GOMAXPROCS(GOMAXPROCS(2)) + + // Start a subtest so that we can clean up after any parallel tests within. + t.Run("Alloc", func(t *testing.T) { + ss := &smallScalar{5} + runSubTestUserArenaNew(t, ss, true) + + sp := &smallPointer{new(smallPointer)} + runSubTestUserArenaNew(t, sp, true) + + spm := &smallPointerMix{sp, 5, nil, [11]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}} + runSubTestUserArenaNew(t, spm, true) + + mse := new(mediumScalarEven) + for i := range mse { + mse[i] = 121 + } + runSubTestUserArenaNew(t, mse, true) + + mso := new(mediumScalarOdd) + for i := range mso { + mso[i] = 122 + } + runSubTestUserArenaNew(t, mso, true) + + mpe := new(mediumPointerEven) + for i := range mpe { + mpe[i] = sp + } + runSubTestUserArenaNew(t, mpe, true) + + mpo := new(mediumPointerOdd) + for i := range mpo { + mpo[i] = sp + } + runSubTestUserArenaNew(t, mpo, true) + + ls := new(largeScalar) + for i := range ls { + ls[i] = 123 + } + // Not in parallel because we don't want to hold this large allocation live. + runSubTestUserArenaNew(t, ls, false) + + lp := new(largePointer) + for i := range lp { + lp[i] = sp + } + // Not in parallel because we don't want to hold this large allocation live. + runSubTestUserArenaNew(t, lp, false) + + sss := make([]smallScalar, 25) + for i := range sss { + sss[i] = smallScalar{12} + } + runSubTestUserArenaSlice(t, sss, true) + + mpos := make([]mediumPointerOdd, 5) + for i := range mpos { + mpos[i] = *mpo + } + runSubTestUserArenaSlice(t, mpos, true) + + sps := make([]smallPointer, UserArenaChunkBytes/unsafe.Sizeof(smallPointer{})+1) + for i := range sps { + sps[i] = *sp + } + // Not in parallel because we don't want to hold this large allocation live. + runSubTestUserArenaSlice(t, sps, false) + + // Test zero-sized types. + t.Run("struct{}", func(t *testing.T) { + arena := NewUserArena() + var x any + x = (*struct{})(nil) + arena.New(&x) + if v := unsafe.Pointer(x.(*struct{})); v != ZeroBase { + t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) + } + arena.Free() + }) + t.Run("[]struct{}", func(t *testing.T) { + arena := NewUserArena() + var sl []struct{} + arena.Slice(&sl, 10) + if v := unsafe.Pointer(&sl[0]); v != ZeroBase { + t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) + } + arena.Free() + }) + t.Run("[]int (cap 0)", func(t *testing.T) { + arena := NewUserArena() + var sl []int + arena.Slice(&sl, 0) + if len(sl) != 0 { + t.Errorf("expected requested zero-sized slice to still have zero length: got %x, want 0", len(sl)) + } + arena.Free() + }) + }) + + // Run a GC cycle to get any arenas off the quarantine list. + GC() + + if n := GlobalWaitingArenaChunks(); n != 0 { + t.Errorf("expected zero waiting arena chunks, found %d", n) + } +} + +func runSubTestUserArenaNew[S comparable](t *testing.T, value *S, parallel bool) { + t.Run(reflect.TypeOf(value).Elem().Name(), func(t *testing.T) { + if parallel { + t.Parallel() + } + + // Allocate and write data, enough to exhaust the arena. + // + // This is an underestimate, likely leaving some space in the arena. That's a good thing, + // because it gives us coverage of boundary cases. + n := int(UserArenaChunkBytes / unsafe.Sizeof(*value)) + if n == 0 { + n = 1 + } + + // Create a new arena and do a bunch of operations on it. + arena := NewUserArena() + + arenaValues := make([]*S, 0, n) + for j := 0; j < n; j++ { + var x any + x = (*S)(nil) + arena.New(&x) + s := x.(*S) + *s = *value + arenaValues = append(arenaValues, s) + } + // Check integrity of allocated data. + for _, s := range arenaValues { + if *s != *value { + t.Errorf("failed integrity check: got %#v, want %#v", *s, *value) + } + } + + // Release the arena. + arena.Free() + }) +} + +func runSubTestUserArenaSlice[S comparable](t *testing.T, value []S, parallel bool) { + t.Run("[]"+reflect.TypeOf(value).Elem().Name(), func(t *testing.T) { + if parallel { + t.Parallel() + } + + // Allocate and write data, enough to exhaust the arena. + // + // This is an underestimate, likely leaving some space in the arena. That's a good thing, + // because it gives us coverage of boundary cases. + n := int(UserArenaChunkBytes / (unsafe.Sizeof(*new(S)) * uintptr(cap(value)))) + if n == 0 { + n = 1 + } + + // Create a new arena and do a bunch of operations on it. + arena := NewUserArena() + + arenaValues := make([][]S, 0, n) + for j := 0; j < n; j++ { + var sl []S + arena.Slice(&sl, cap(value)) + copy(sl, value) + arenaValues = append(arenaValues, sl) + } + // Check integrity of allocated data. + for _, sl := range arenaValues { + for i := range sl { + got := sl[i] + want := value[i] + if got != want { + t.Errorf("failed integrity check: got %#v, want %#v at index %d", got, want, i) + } + } + } + + // Release the arena. + arena.Free() + }) +} + +func TestUserArenaLiveness(t *testing.T) { + t.Run("Free", func(t *testing.T) { + testUserArenaLiveness(t, false) + }) + t.Run("Finalizer", func(t *testing.T) { + testUserArenaLiveness(t, true) + }) +} + +func testUserArenaLiveness(t *testing.T, useArenaFinalizer bool) { + // Disable the GC so that there's zero chance we try doing anything arena related *during* + // a mark phase, since otherwise a bunch of arenas could end up on the fault list. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + // Defensively ensure that any full arena chunks leftover from previous tests have been cleared. + GC() + GC() + + arena := NewUserArena() + + // Allocate a few pointer-ful but un-initialized objects so that later we can + // place a reference to heap object at a more interesting location. + for i := 0; i < 3; i++ { + var x any + x = (*mediumPointerOdd)(nil) + arena.New(&x) + } + + var x any + x = (*smallPointerMix)(nil) + arena.New(&x) + v := x.(*smallPointerMix) + + var safeToFinalize atomic.Bool + var finalized atomic.Bool + v.C = new(smallPointer) + SetFinalizer(v.C, func(_ *smallPointer) { + if !safeToFinalize.Load() { + t.Error("finalized arena-referenced object unexpectedly") + } + finalized.Store(true) + }) + + // Make sure it stays alive. + GC() + GC() + + // In order to ensure the object can be freed, we now need to make sure to use + // the entire arena. Exhaust the rest of the arena. + + for i := 0; i < int(UserArenaChunkBytes/unsafe.Sizeof(mediumScalarEven{})); i++ { + var x any + x = (*mediumScalarEven)(nil) + arena.New(&x) + } + + // Make sure it stays alive again. + GC() + GC() + + v = nil + + safeToFinalize.Store(true) + if useArenaFinalizer { + arena = nil + + // Try to queue the arena finalizer. + GC() + GC() + + // In order for the finalizer we actually want to run to execute, + // we need to make sure this one runs first. + if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { + t.Fatal("finalizer queue was never emptied") + } + } else { + // Free the arena explicitly. + arena.Free() + } + + // Try to queue the object's finalizer that we set earlier. + GC() + GC() + + if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { + t.Fatal("finalizer queue was never emptied") + } + if !finalized.Load() { + t.Error("expected arena-referenced object to be finalized") + } +} + +func TestUserArenaClearsPointerBits(t *testing.T) { + // This is a regression test for a serious issue wherein if pointer bits + // aren't properly cleared, it's possible to allocate scalar data down + // into a previously pointer-ful area, causing misinterpretation by the GC. + + // Create a large object, grab a pointer into it, and free it. + x := new([8 << 20]byte) + xp := uintptr(unsafe.Pointer(&x[124])) + var finalized atomic.Bool + SetFinalizer(x, func(_ *[8 << 20]byte) { + finalized.Store(true) + }) + + // Write three chunks worth of pointer data. Three gives us a + // high likelihood that when we write 2 later, we'll get the behavior + // we want. + a := NewUserArena() + for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*3); i++ { + var x any + x = (*smallPointer)(nil) + a.New(&x) + } + a.Free() + + // Recycle the arena chunks. + GC() + GC() + + a = NewUserArena() + for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*2); i++ { + var x any + x = (*smallScalar)(nil) + a.New(&x) + v := x.(*smallScalar) + // Write a pointer that should not keep x alive. + *v = smallScalar{xp} + } + KeepAlive(x) + x = nil + + // Try to free x. + GC() + GC() + + if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) { + t.Fatal("finalizer queue was never emptied") + } + if !finalized.Load() { + t.Fatal("heap allocation kept alive through non-pointer reference") + } + + // Clean up the arena. + a.Free() + GC() + GC() +} + +func TestUserArenaCloneString(t *testing.T) { + a := NewUserArena() + + // A static string (not on heap or arena) + var s = "abcdefghij" + + // Create a byte slice in the arena, initialize it with s + var b []byte + a.Slice(&b, len(s)) + copy(b, s) + + // Create a string as using the same memory as the byte slice, hence in + // the arena. This could be an arena API, but hasn't really been needed + // yet. + as := unsafe.String(&b[0], len(b)) + + // Clone should make a copy of as, since it is in the arena. + asCopy := UserArenaClone(as) + if unsafe.StringData(as) == unsafe.StringData(asCopy) { + t.Error("Clone did not make a copy") + } + + // Clone should make a copy of subAs, since subAs is just part of as and so is in the arena. + subAs := as[1:3] + subAsCopy := UserArenaClone(subAs) + if unsafe.StringData(subAs) == unsafe.StringData(subAsCopy) { + t.Error("Clone did not make a copy") + } + if len(subAs) != len(subAsCopy) { + t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(subAs), len(subAsCopy)) + } else { + for i := range subAs { + if subAs[i] != subAsCopy[i] { + t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, subAs[i], subAs[i]) + } + } + } + + // Clone should not make a copy of doubleAs, since doubleAs will be on the heap. + doubleAs := as + as + doubleAsCopy := UserArenaClone(doubleAs) + if unsafe.StringData(doubleAs) != unsafe.StringData(doubleAsCopy) { + t.Error("Clone should not have made a copy") + } + + // Clone should not make a copy of s, since s is a static string. + sCopy := UserArenaClone(s) + if unsafe.StringData(s) != unsafe.StringData(sCopy) { + t.Error("Clone should not have made a copy") + } + + a.Free() +} + +func TestUserArenaClonePointer(t *testing.T) { + a := NewUserArena() + + // Clone should not make a copy of a heap-allocated smallScalar. + x := Escape(new(smallScalar)) + xCopy := UserArenaClone(x) + if unsafe.Pointer(x) != unsafe.Pointer(xCopy) { + t.Errorf("Clone should not have made a copy: %#v -> %#v", x, xCopy) + } + + // Clone should make a copy of an arena-allocated smallScalar. + var i any + i = (*smallScalar)(nil) + a.New(&i) + xArena := i.(*smallScalar) + xArenaCopy := UserArenaClone(xArena) + if unsafe.Pointer(xArena) == unsafe.Pointer(xArenaCopy) { + t.Errorf("Clone should have made a copy: %#v -> %#v", xArena, xArenaCopy) + } + if *xArena != *xArenaCopy { + t.Errorf("Clone made an incorrect copy copy: %#v -> %#v", *xArena, *xArenaCopy) + } + + a.Free() +} + +func TestUserArenaCloneSlice(t *testing.T) { + a := NewUserArena() + + // A static string (not on heap or arena) + var s = "klmnopqrstuv" + + // Create a byte slice in the arena, initialize it with s + var b []byte + a.Slice(&b, len(s)) + copy(b, s) + + // Clone should make a copy of b, since it is in the arena. + bCopy := UserArenaClone(b) + if unsafe.Pointer(&b[0]) == unsafe.Pointer(&bCopy[0]) { + t.Errorf("Clone did not make a copy: %#v -> %#v", b, bCopy) + } + if len(b) != len(bCopy) { + t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(b), len(bCopy)) + } else { + for i := range b { + if b[i] != bCopy[i] { + t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, b[i], bCopy[i]) + } + } + } + + // Clone should make a copy of bSub, since bSub is just part of b and so is in the arena. + bSub := b[1:3] + bSubCopy := UserArenaClone(bSub) + if unsafe.Pointer(&bSub[0]) == unsafe.Pointer(&bSubCopy[0]) { + t.Errorf("Clone did not make a copy: %#v -> %#v", bSub, bSubCopy) + } + if len(bSub) != len(bSubCopy) { + t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(bSub), len(bSubCopy)) + } else { + for i := range bSub { + if bSub[i] != bSubCopy[i] { + t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, bSub[i], bSubCopy[i]) + } + } + } + + // Clone should not make a copy of bNotArena, since it will not be in an arena. + bNotArena := make([]byte, len(s)) + copy(bNotArena, s) + bNotArenaCopy := UserArenaClone(bNotArena) + if unsafe.Pointer(&bNotArena[0]) != unsafe.Pointer(&bNotArenaCopy[0]) { + t.Error("Clone should not have made a copy") + } + + a.Free() +} + +func TestUserArenaClonePanic(t *testing.T) { + var s string + func() { + x := smallScalar{2} + defer func() { + if v := recover(); v != nil { + s = v.(string) + } + }() + UserArenaClone(x) + }() + if s == "" { + t.Errorf("expected panic from Clone") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/asan.go b/platform/dbops/binaries/go/go/src/runtime/asan.go new file mode 100644 index 0000000000000000000000000000000000000000..25b83277e6d7a1ef638e136ec06a52cd094c2cf7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan.go @@ -0,0 +1,67 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +package runtime + +import ( + "unsafe" +) + +// Public address sanitizer API. +func ASanRead(addr unsafe.Pointer, len int) { + sp := getcallersp() + pc := getcallerpc() + doasanread(addr, uintptr(len), sp, pc) +} + +func ASanWrite(addr unsafe.Pointer, len int) { + sp := getcallersp() + pc := getcallerpc() + doasanwrite(addr, uintptr(len), sp, pc) +} + +// Private interface for the runtime. +const asanenabled = true + +// asan{read,write} are nosplit because they may be called between +// fork and exec, when the stack must not grow. See issue #50391. + +//go:nosplit +func asanread(addr unsafe.Pointer, sz uintptr) { + sp := getcallersp() + pc := getcallerpc() + doasanread(addr, sz, sp, pc) +} + +//go:nosplit +func asanwrite(addr unsafe.Pointer, sz uintptr) { + sp := getcallersp() + pc := getcallerpc() + doasanwrite(addr, sz, sp, pc) +} + +//go:noescape +func doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) + +//go:noescape +func doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) + +//go:noescape +func asanunpoison(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func asanpoison(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func asanregisterglobals(addr unsafe.Pointer, n uintptr) + +// These are called from asan_GOARCH.s +// +//go:cgo_import_static __asan_read_go +//go:cgo_import_static __asan_write_go +//go:cgo_import_static __asan_unpoison_go +//go:cgo_import_static __asan_poison_go +//go:cgo_import_static __asan_register_globals_go diff --git a/platform/dbops/binaries/go/go/src/runtime/asan0.go b/platform/dbops/binaries/go/go/src/runtime/asan0.go new file mode 100644 index 0000000000000000000000000000000000000000..bcfd96f1ab9160fc91dc90b673a1b66d2e511d13 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan0.go @@ -0,0 +1,23 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !asan + +// Dummy ASan support API, used when not built with -asan. + +package runtime + +import ( + "unsafe" +) + +const asanenabled = false + +// Because asanenabled is false, none of these functions should be called. + +func asanread(addr unsafe.Pointer, sz uintptr) { throw("asan") } +func asanwrite(addr unsafe.Pointer, sz uintptr) { throw("asan") } +func asanunpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") } +func asanpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") } +func asanregisterglobals(addr unsafe.Pointer, sz uintptr) { throw("asan") } diff --git a/platform/dbops/binaries/go/go/src/runtime/asan_amd64.s b/platform/dbops/binaries/go/go/src/runtime/asan_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..195faf4e6da1b4cc7bece42955ecd3244ff9d800 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan_amd64.s @@ -0,0 +1,91 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +// This is like race_amd64.s, but for the asan calls. +// See race_amd64.s for detailed comments. + +#ifdef GOOS_windows +#define RARG0 CX +#define RARG1 DX +#define RARG2 R8 +#define RARG3 R9 +#else +#define RARG0 DI +#define RARG1 SI +#define RARG2 DX +#define RARG3 CX +#endif + +// Called from instrumented code. +// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanread(SB), NOSPLIT, $0-32 + MOVQ addr+0(FP), RARG0 + MOVQ sz+8(FP), RARG1 + MOVQ sp+16(FP), RARG2 + MOVQ pc+24(FP), RARG3 + // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVQ $__asan_read_go(SB), AX + JMP asancall<>(SB) + +// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 + MOVQ addr+0(FP), RARG0 + MOVQ sz+8(FP), RARG1 + MOVQ sp+16(FP), RARG2 + MOVQ pc+24(FP), RARG3 + // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVQ $__asan_write_go(SB), AX + JMP asancall<>(SB) + +// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 + MOVQ addr+0(FP), RARG0 + MOVQ sz+8(FP), RARG1 + // void __asan_unpoison_go(void *addr, uintptr_t sz); + MOVQ $__asan_unpoison_go(SB), AX + JMP asancall<>(SB) + +// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 + MOVQ addr+0(FP), RARG0 + MOVQ sz+8(FP), RARG1 + // void __asan_poison_go(void *addr, uintptr_t sz); + MOVQ $__asan_poison_go(SB), AX + JMP asancall<>(SB) + +// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) +TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 + MOVQ addr+0(FP), RARG0 + MOVQ n+8(FP), RARG1 + // void __asan_register_globals_go(void *addr, uintptr_t n); + MOVQ $__asan_register_globals_go(SB), AX + JMP asancall<>(SB) + +// Switches SP to g0 stack and calls (AX). Arguments already set. +TEXT asancall<>(SB), NOSPLIT, $0-0 + get_tls(R12) + MOVQ g(R12), R14 + MOVQ SP, R12 // callee-saved, preserved across the CALL + CMPQ R14, $0 + JE call // no g; still on a system stack + + MOVQ g_m(R14), R13 + // Switch to g0 stack. + MOVQ m_g0(R13), R10 + CMPQ R10, R14 + JE call // already on g0 + + MOVQ (g_sched+gobuf_sp)(R10), SP +call: + ANDQ $~15, SP // alignment for gcc ABI + CALL AX + MOVQ R12, SP + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asan_arm64.s b/platform/dbops/binaries/go/go/src/runtime/asan_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..dfa3f81bf23f8e6827bc55d49188b68d8ad6a0e3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan_arm64.s @@ -0,0 +1,76 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +#include "go_asm.h" +#include "textflag.h" + +#define RARG0 R0 +#define RARG1 R1 +#define RARG2 R2 +#define RARG3 R3 +#define FARG R4 + +// Called from instrumented code. +// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanread(SB), NOSPLIT, $0-32 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + MOVD sp+16(FP), RARG2 + MOVD pc+24(FP), RARG3 + // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVD $__asan_read_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + MOVD sp+16(FP), RARG2 + MOVD pc+24(FP), RARG3 + // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVD $__asan_write_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + // void __asan_unpoison_go(void *addr, uintptr_t sz); + MOVD $__asan_unpoison_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + // void __asan_poison_go(void *addr, uintptr_t sz); + MOVD $__asan_poison_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) +TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 + MOVD addr+0(FP), RARG0 + MOVD n+8(FP), RARG1 + // void __asan_register_globals_go(void *addr, uintptr_t n); + MOVD $__asan_register_globals_go(SB), FARG + JMP asancall<>(SB) + +// Switches SP to g0 stack and calls (FARG). Arguments already set. +TEXT asancall<>(SB), NOSPLIT, $0-0 + MOVD RSP, R19 // callee-saved + CBZ g, g0stack // no g, still on a system stack + MOVD g_m(g), R10 + MOVD m_g0(R10), R11 + CMP R11, g + BEQ g0stack + + MOVD (g_sched+gobuf_sp)(R11), R5 + MOVD R5, RSP + +g0stack: + BL (FARG) + MOVD R19, RSP + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asan_loong64.s b/platform/dbops/binaries/go/go/src/runtime/asan_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..0034a316876ee39787e213d1efc5935eb238ba4a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan_loong64.s @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +#include "go_asm.h" +#include "textflag.h" + +#define RARG0 R4 +#define RARG1 R5 +#define RARG2 R6 +#define RARG3 R7 +#define FARG R8 + +// Called from instrumented code. +// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanread(SB), NOSPLIT, $0-32 + MOVV addr+0(FP), RARG0 + MOVV sz+8(FP), RARG1 + MOVV sp+16(FP), RARG2 + MOVV pc+24(FP), RARG3 + // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVV $__asan_read_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 + MOVV addr+0(FP), RARG0 + MOVV sz+8(FP), RARG1 + MOVV sp+16(FP), RARG2 + MOVV pc+24(FP), RARG3 + // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVV $__asan_write_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 + MOVV addr+0(FP), RARG0 + MOVV sz+8(FP), RARG1 + // void __asan_unpoison_go(void *addr, uintptr_t sz); + MOVV $__asan_unpoison_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 + MOVV addr+0(FP), RARG0 + MOVV sz+8(FP), RARG1 + // void __asan_poison_go(void *addr, uintptr_t sz); + MOVV $__asan_poison_go(SB), FARG + JMP asancall<>(SB) + +// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) +TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 + MOVV addr+0(FP), RARG0 + MOVV n+8(FP), RARG1 + // void __asan_register_globals_go(void *addr, uintptr_t n); + MOVV $__asan_register_globals_go(SB), FARG + JMP asancall<>(SB) + +// Switches SP to g0 stack and calls (FARG). Arguments already set. +TEXT asancall<>(SB), NOSPLIT, $0-0 + MOVV R3, R23 // callee-saved + BEQ g, g0stack // no g, still on a system stack + MOVV g_m(g), R14 + MOVV m_g0(R14), R15 + BEQ R15, g, g0stack + + MOVV (g_sched+gobuf_sp)(R15), R9 + MOVV R9, R3 + +g0stack: + JAL (FARG) + MOVV R23, R3 + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asan_ppc64le.s b/platform/dbops/binaries/go/go/src/runtime/asan_ppc64le.s new file mode 100644 index 0000000000000000000000000000000000000000..d13301a1b1d249a51863120adb398b9a97ec942c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan_ppc64le.s @@ -0,0 +1,87 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +#include "go_asm.h" +#include "textflag.h" + +#define RARG0 R3 +#define RARG1 R4 +#define RARG2 R5 +#define RARG3 R6 +#define FARG R12 + +// Called from instrumented code. +// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanread(SB),NOSPLIT|NOFRAME,$0-32 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + MOVD sp+16(FP), RARG2 + MOVD pc+24(FP), RARG3 + // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVD $__asan_read_go(SB), FARG + BR asancall<>(SB) + +// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanwrite(SB),NOSPLIT|NOFRAME,$0-32 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + MOVD sp+16(FP), RARG2 + MOVD pc+24(FP), RARG3 + // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); + MOVD $__asan_write_go(SB), FARG + BR asancall<>(SB) + +// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanunpoison(SB),NOSPLIT|NOFRAME,$0-16 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + // void __asan_unpoison_go(void *addr, uintptr_t sz); + MOVD $__asan_unpoison_go(SB), FARG + BR asancall<>(SB) + +// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanpoison(SB),NOSPLIT|NOFRAME,$0-16 + MOVD addr+0(FP), RARG0 + MOVD sz+8(FP), RARG1 + // void __asan_poison_go(void *addr, uintptr_t sz); + MOVD $__asan_poison_go(SB), FARG + BR asancall<>(SB) + +// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) +TEXT runtime·asanregisterglobals(SB),NOSPLIT|NOFRAME,$0-16 + MOVD addr+0(FP), RARG0 + MOVD n+8(FP), RARG1 + // void __asan_register_globals_go(void *addr, uintptr_t n); + MOVD $__asan_register_globals_go(SB), FARG + BR asancall<>(SB) + +// Switches SP to g0 stack and calls (FARG). Arguments already set. +TEXT asancall<>(SB), NOSPLIT, $0-0 + // LR saved in generated prologue + // Get info from the current goroutine + MOVD runtime·tls_g(SB), R10 // g offset in TLS + MOVD 0(R10), g + MOVD g_m(g), R7 // m for g + MOVD R1, R16 // callee-saved, preserved across C call + MOVD m_g0(R7), R10 // g0 for m + CMP R10, g // same g0? + BEQ call // already on g0 + MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1 +call: + // prepare frame for C ABI + SUB $32, R1 // create frame for callee saving LR, CR, R2 etc. + RLDCR $0, R1, $~15, R1 // align SP to 16 bytes + MOVD FARG, CTR // address of function to be called + MOVD R0, 0(R1) // clear back chain pointer + BL (CTR) + MOVD $0, R0 // C code can clobber R0 set it back to 0 + MOVD R16, R1 // restore R1; + MOVD runtime·tls_g(SB), R10 // find correct g + MOVD 0(R10), g + RET + +// tls_g, g value for each thread in TLS +GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8 diff --git a/platform/dbops/binaries/go/go/src/runtime/asan_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/asan_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..6fcd94d4b1fcf083eccb8ad405c010601a69345d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asan_riscv64.s @@ -0,0 +1,68 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +#include "go_asm.h" +#include "textflag.h" + +// Called from instrumented code. +// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanread(SB), NOSPLIT, $0-32 + MOV addr+0(FP), X10 + MOV sz+8(FP), X11 + MOV sp+16(FP), X12 + MOV pc+24(FP), X13 + // void __asan_read_go(void *addr, uintptr_t sz); + MOV $__asan_read_go(SB), X14 + JMP asancall<>(SB) + +// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) +TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 + MOV addr+0(FP), X10 + MOV sz+8(FP), X11 + MOV sp+16(FP), X12 + MOV pc+24(FP), X13 + // void __asan_write_go(void *addr, uintptr_t sz); + MOV $__asan_write_go(SB), X14 + JMP asancall<>(SB) + +// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 + MOV addr+0(FP), X10 + MOV sz+8(FP), X11 + // void __asan_unpoison_go(void *addr, uintptr_t sz); + MOV $__asan_unpoison_go(SB), X14 + JMP asancall<>(SB) + +// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) +TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 + MOV addr+0(FP), X10 + MOV sz+8(FP), X11 + // void __asan_poison_go(void *addr, uintptr_t sz); + MOV $__asan_poison_go(SB), X14 + JMP asancall<>(SB) + +// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) +TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 + MOV addr+0(FP), X10 + MOV n+8(FP), X11 + // void __asan_register_globals_go(void *addr, uintptr_t n); + MOV $__asan_register_globals_go(SB), X14 + JMP asancall<>(SB) + +// Switches SP to g0 stack and calls (X14). Arguments already set. +TEXT asancall<>(SB), NOSPLIT, $0-0 + MOV X2, X8 // callee-saved + BEQZ g, g0stack // no g, still on a system stack + MOV g_m(g), X21 + MOV m_g0(X21), X21 + BEQ X21, g, g0stack + + MOV (g_sched+gobuf_sp)(X21), X2 + +g0stack: + JALR RA, X14 + MOV X8, X2 + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asm.s b/platform/dbops/binaries/go/go/src/runtime/asm.s new file mode 100644 index 0000000000000000000000000000000000000000..24cd0c95db64ad21bdd4298c4fcdc428e60da954 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm.s @@ -0,0 +1,34 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +#ifndef GOARCH_amd64 +TEXT ·sigpanic0(SB),NOSPLIT,$0-0 + JMP ·sigpanic(SB) +#endif + +// See map.go comment on the need for this routine. +TEXT ·mapinitnoop(SB),NOSPLIT,$0-0 + RET + +#ifndef GOARCH_amd64 +#ifndef GOARCH_arm64 +#ifndef GOARCH_mips64 +#ifndef GOARCH_mips64le +#ifndef GOARCH_ppc64 +#ifndef GOARCH_ppc64le +#ifndef GOARCH_riscv64 +#ifndef GOARCH_wasm +// stub to appease shared build mode. +TEXT ·switchToCrashStack0(SB),NOSPLIT,$0-0 + UNDEF +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_386.s b/platform/dbops/binaries/go/go/src/runtime/asm_386.s new file mode 100644 index 0000000000000000000000000000000000000000..67ffc243539e984a11aa1cc3d0bea6e3d2b7c69f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_386.s @@ -0,0 +1,1653 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +// _rt0_386 is common startup code for most 386 systems when using +// internal linking. This is the entry point for the program from the +// kernel for an ordinary -buildmode=exe program. The stack holds the +// number of arguments and the C-style argv. +TEXT _rt0_386(SB),NOSPLIT,$8 + MOVL 8(SP), AX // argc + LEAL 12(SP), BX // argv + MOVL AX, 0(SP) + MOVL BX, 4(SP) + JMP runtime·rt0_go(SB) + +// _rt0_386_lib is common startup code for most 386 systems when +// using -buildmode=c-archive or -buildmode=c-shared. The linker will +// arrange to invoke this function as a global constructor (for +// c-archive) or when the shared library is loaded (for c-shared). +// We expect argc and argv to be passed on the stack following the +// usual C ABI. +TEXT _rt0_386_lib(SB),NOSPLIT,$0 + PUSHL BP + MOVL SP, BP + PUSHL BX + PUSHL SI + PUSHL DI + + MOVL 8(BP), AX + MOVL AX, _rt0_386_lib_argc<>(SB) + MOVL 12(BP), AX + MOVL AX, _rt0_386_lib_argv<>(SB) + + // Synchronous initialization. + CALL runtime·libpreinit(SB) + + SUBL $8, SP + + // Create a new thread to do the runtime initialization. + MOVL _cgo_sys_thread_create(SB), AX + TESTL AX, AX + JZ nocgo + + // Align stack to call C function. + // We moved SP to BP above, but BP was clobbered by the libpreinit call. + MOVL SP, BP + ANDL $~15, SP + + MOVL $_rt0_386_lib_go(SB), BX + MOVL BX, 0(SP) + MOVL $0, 4(SP) + + CALL AX + + MOVL BP, SP + + JMP restore + +nocgo: + MOVL $0x800000, 0(SP) // stacksize = 8192KB + MOVL $_rt0_386_lib_go(SB), AX + MOVL AX, 4(SP) // fn + CALL runtime·newosproc0(SB) + +restore: + ADDL $8, SP + POPL DI + POPL SI + POPL BX + POPL BP + RET + +// _rt0_386_lib_go initializes the Go runtime. +// This is started in a separate thread by _rt0_386_lib. +TEXT _rt0_386_lib_go(SB),NOSPLIT,$8 + MOVL _rt0_386_lib_argc<>(SB), AX + MOVL AX, 0(SP) + MOVL _rt0_386_lib_argv<>(SB), AX + MOVL AX, 4(SP) + JMP runtime·rt0_go(SB) + +DATA _rt0_386_lib_argc<>(SB)/4, $0 +GLOBL _rt0_386_lib_argc<>(SB),NOPTR, $4 +DATA _rt0_386_lib_argv<>(SB)/4, $0 +GLOBL _rt0_386_lib_argv<>(SB),NOPTR, $4 + +TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0 + // Copy arguments forward on an even stack. + // Users of this function jump to it, they don't call it. + MOVL 0(SP), AX + MOVL 4(SP), BX + SUBL $128, SP // plenty of scratch + ANDL $~15, SP + MOVL AX, 120(SP) // save argc, argv away + MOVL BX, 124(SP) + + // set default stack bounds. + // _cgo_init may update stackguard. + MOVL $runtime·g0(SB), BP + LEAL (-64*1024+104)(SP), BX + MOVL BX, g_stackguard0(BP) + MOVL BX, g_stackguard1(BP) + MOVL BX, (g_stack+stack_lo)(BP) + MOVL SP, (g_stack+stack_hi)(BP) + + // find out information about the processor we're on + // first see if CPUID instruction is supported. + PUSHFL + PUSHFL + XORL $(1<<21), 0(SP) // flip ID bit + POPFL + PUSHFL + POPL AX + XORL 0(SP), AX + POPFL // restore EFLAGS + TESTL $(1<<21), AX + JNE has_cpuid + +bad_proc: // show that the program requires MMX. + MOVL $2, 0(SP) + MOVL $bad_proc_msg<>(SB), 4(SP) + MOVL $0x3d, 8(SP) + CALL runtime·write(SB) + MOVL $1, 0(SP) + CALL runtime·exit(SB) + CALL runtime·abort(SB) + +has_cpuid: + MOVL $0, AX + CPUID + MOVL AX, SI + CMPL AX, $0 + JE nocpuinfo + + CMPL BX, $0x756E6547 // "Genu" + JNE notintel + CMPL DX, $0x49656E69 // "ineI" + JNE notintel + CMPL CX, $0x6C65746E // "ntel" + JNE notintel + MOVB $1, runtime·isIntel(SB) +notintel: + + // Load EAX=1 cpuid flags + MOVL $1, AX + CPUID + MOVL CX, DI // Move to global variable clobbers CX when generating PIC + MOVL AX, runtime·processorVersionInfo(SB) + + // Check for MMX support + TESTL $(1<<23), DX // MMX + JZ bad_proc + +nocpuinfo: + // if there is an _cgo_init, call it to let it + // initialize and to set up GS. if not, + // we set up GS ourselves. + MOVL _cgo_init(SB), AX + TESTL AX, AX + JZ needtls +#ifdef GOOS_android + // arg 4: TLS base, stored in slot 0 (Android's TLS_SLOT_SELF). + // Compensate for tls_g (+8). + MOVL -8(TLS), BX + MOVL BX, 12(SP) + MOVL $runtime·tls_g(SB), 8(SP) // arg 3: &tls_g +#else + MOVL $0, BX + MOVL BX, 12(SP) // arg 4: not used when using platform's TLS +#ifdef GOOS_windows + MOVL $runtime·tls_g(SB), 8(SP) // arg 3: &tls_g +#else + MOVL BX, 8(SP) // arg 3: not used when using platform's TLS +#endif +#endif + MOVL $setg_gcc<>(SB), BX + MOVL BX, 4(SP) // arg 2: setg_gcc + MOVL BP, 0(SP) // arg 1: g0 + CALL AX + + // update stackguard after _cgo_init + MOVL $runtime·g0(SB), CX + MOVL (g_stack+stack_lo)(CX), AX + ADDL $const_stackGuard, AX + MOVL AX, g_stackguard0(CX) + MOVL AX, g_stackguard1(CX) + +#ifndef GOOS_windows + // skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows + JMP ok +#endif +needtls: +#ifdef GOOS_openbsd + // skip runtime·ldt0setup(SB) and tls test on OpenBSD in all cases + JMP ok +#endif +#ifdef GOOS_plan9 + // skip runtime·ldt0setup(SB) and tls test on Plan 9 in all cases + JMP ok +#endif + + // set up %gs + CALL ldt0setup<>(SB) + + // store through it, to make sure it works + get_tls(BX) + MOVL $0x123, g(BX) + MOVL runtime·m0+m_tls(SB), AX + CMPL AX, $0x123 + JEQ ok + MOVL AX, 0 // abort +ok: + // set up m and g "registers" + get_tls(BX) + LEAL runtime·g0(SB), DX + MOVL DX, g(BX) + LEAL runtime·m0(SB), AX + + // save m->g0 = g0 + MOVL DX, m_g0(AX) + // save g0->m = m0 + MOVL AX, g_m(DX) + + CALL runtime·emptyfunc(SB) // fault if stack check is wrong + + // convention is D is always cleared + CLD + + CALL runtime·check(SB) + + // saved argc, argv + MOVL 120(SP), AX + MOVL AX, 0(SP) + MOVL 124(SP), AX + MOVL AX, 4(SP) + CALL runtime·args(SB) + CALL runtime·osinit(SB) + CALL runtime·schedinit(SB) + + // create a new goroutine to start program + PUSHL $runtime·mainPC(SB) // entry + CALL runtime·newproc(SB) + POPL AX + + // start this M + CALL runtime·mstart(SB) + + CALL runtime·abort(SB) + RET + +DATA bad_proc_msg<>+0x00(SB)/61, $"This program can only be run on processors with MMX support.\n" +GLOBL bad_proc_msg<>(SB), RODATA, $61 + +DATA runtime·mainPC+0(SB)/4,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$4 + +TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 + INT $3 + RET + +TEXT runtime·asminit(SB),NOSPLIT,$0-0 + // Linux and MinGW start the FPU in extended double precision. + // Other operating systems use double precision. + // Change to double precision to match them, + // and to match other hardware that only has double. + FLDCW runtime·controlWord64(SB) + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + CALL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT, $0-4 + MOVL buf+0(FP), BX // gobuf + MOVL gobuf_g(BX), DX + MOVL 0(DX), CX // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT, $0 + get_tls(CX) + MOVL DX, g(CX) + MOVL gobuf_sp(BX), SP // restore SP + MOVL gobuf_ret(BX), AX + MOVL gobuf_ctxt(BX), DX + MOVL $0, gobuf_sp(BX) // clear to help garbage collector + MOVL $0, gobuf_ret(BX) + MOVL $0, gobuf_ctxt(BX) + MOVL gobuf_pc(BX), BX + JMP BX + +// func mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT, $0-4 + MOVL fn+0(FP), DI + + get_tls(DX) + MOVL g(DX), AX // save state in g->sched + MOVL 0(SP), BX // caller's PC + MOVL BX, (g_sched+gobuf_pc)(AX) + LEAL fn+0(FP), BX // caller's SP + MOVL BX, (g_sched+gobuf_sp)(AX) + + // switch to m->g0 & its stack, call fn + MOVL g(DX), BX + MOVL g_m(BX), BX + MOVL m_g0(BX), SI + CMPL SI, AX // if g == m->g0 call badmcall + JNE 3(PC) + MOVL $runtime·badmcall(SB), AX + JMP AX + MOVL SI, g(DX) // g = m->g0 + MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp + PUSHL AX + MOVL DI, DX + MOVL 0(DI), DI + CALL DI + POPL AX + MOVL $runtime·badmcall2(SB), AX + JMP AX + RET + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-4 + MOVL fn+0(FP), DI // DI = fn + get_tls(CX) + MOVL g(CX), AX // AX = g + MOVL g_m(AX), BX // BX = m + + CMPL AX, m_gsignal(BX) + JEQ noswitch + + MOVL m_g0(BX), DX // DX = g0 + CMPL AX, DX + JEQ noswitch + + CMPL AX, m_curg(BX) + JNE bad + + // switch stacks + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + CALL gosave_systemstack_switch<>(SB) + + // switch to g0 + get_tls(CX) + MOVL DX, g(CX) + MOVL (g_sched+gobuf_sp)(DX), BX + MOVL BX, SP + + // call target function + MOVL DI, DX + MOVL 0(DI), DI + CALL DI + + // switch back to g + get_tls(CX) + MOVL g(CX), AX + MOVL g_m(AX), BX + MOVL m_curg(BX), AX + MOVL AX, g(CX) + MOVL (g_sched+gobuf_sp)(AX), SP + MOVL $0, (g_sched+gobuf_sp)(AX) + RET + +noswitch: + // already on system stack; tail call the function + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVL DI, DX + MOVL 0(DI), DI + JMP DI + +bad: + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVL $runtime·badsystemstack(SB), AX + CALL AX + INT $3 + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT,$0-0 + // Cannot grow scheduler stack (m->g0). + get_tls(CX) + MOVL g(CX), BX + MOVL g_m(BX), BX + MOVL m_g0(BX), SI + CMPL g(CX), SI + JNE 3(PC) + CALL runtime·badmorestackg0(SB) + CALL runtime·abort(SB) + + // Cannot grow signal stack. + MOVL m_gsignal(BX), SI + CMPL g(CX), SI + JNE 3(PC) + CALL runtime·badmorestackgsignal(SB) + CALL runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's caller. + NOP SP // tell vet SP changed - stop checking offsets + MOVL 4(SP), DI // f's caller's PC + MOVL DI, (m_morebuf+gobuf_pc)(BX) + LEAL 8(SP), CX // f's caller's SP + MOVL CX, (m_morebuf+gobuf_sp)(BX) + get_tls(CX) + MOVL g(CX), SI + MOVL SI, (m_morebuf+gobuf_g)(BX) + + // Set g->sched to context in f. + MOVL 0(SP), AX // f's PC + MOVL AX, (g_sched+gobuf_pc)(SI) + LEAL 4(SP), AX // f's SP + MOVL AX, (g_sched+gobuf_sp)(SI) + MOVL DX, (g_sched+gobuf_ctxt)(SI) + + // Call newstack on m->g0's stack. + MOVL m_g0(BX), BP + MOVL BP, g(CX) + MOVL (g_sched+gobuf_sp)(BP), AX + MOVL -4(AX), BX // fault if CALL would, before smashing SP + MOVL AX, SP + CALL runtime·newstack(SB) + CALL runtime·abort(SB) // crash if newstack returns + RET + +TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 + MOVL $0, DX + JMP runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + CMPL CX, $MAXSIZE; \ + JA 3(PC); \ + MOVL $NAME(SB), AX; \ + JMP AX +// Note: can't just "JMP NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT, $0-28 + MOVL frameSize+20(FP), CX + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVL $runtime·badreflectcall(SB), AX + JMP AX + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVL stackArgs+8(FP), SI; \ + MOVL stackArgsSize+12(FP), CX; \ + MOVL SP, DI; \ + REP;MOVSB; \ + /* call function */ \ + MOVL f+4(FP), DX; \ + MOVL (DX), AX; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + CALL AX; \ + /* copy return values back */ \ + MOVL stackArgsType+0(FP), DX; \ + MOVL stackArgs+8(FP), DI; \ + MOVL stackArgsSize+12(FP), CX; \ + MOVL stackRetOffset+16(FP), BX; \ + MOVL SP, SI; \ + ADDL BX, DI; \ + ADDL BX, SI; \ + SUBL BX, CX; \ + CALL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $20-0 + MOVL DX, 0(SP) + MOVL DI, 4(SP) + MOVL SI, 8(SP) + MOVL CX, 12(SP) + MOVL $0, 16(SP) + CALL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + MOVL cycles+0(FP), AX +again: + PAUSE + SUBL $1, AX + JNZ again + RET + +TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 + // Stores are already ordered on x86, so this is just a + // compile barrier. + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0 + PUSHL AX + PUSHL BX + get_tls(BX) + MOVL g(BX), BX + LEAL arg+0(FP), AX + MOVL AX, (g_sched+gobuf_sp)(BX) + MOVL $runtime·systemstack_switch(SB), AX + MOVL AX, (g_sched+gobuf_pc)(BX) + MOVL $0, (g_sched+gobuf_ret)(BX) + // Assert ctxt is zero. See func save. + MOVL (g_sched+gobuf_ctxt)(BX), AX + TESTL AX, AX + JZ 2(PC) + CALL runtime·abort(SB) + POPL BX + POPL AX + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-8 + MOVL fn+0(FP), AX + MOVL arg+4(FP), BX + MOVL SP, DX + SUBL $32, SP + ANDL $~15, SP // alignment, perhaps unnecessary + MOVL DX, 8(SP) // save old SP + MOVL BX, 0(SP) // first argument in x86-32 ABI + CALL AX + MOVL 8(SP), DX + MOVL DX, SP + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-12 + MOVL fn+0(FP), AX + MOVL arg+4(FP), BX + + MOVL SP, DX + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + get_tls(CX) + MOVL g(CX), DI + CMPL DI, $0 + JEQ nosave // Don't even have a G yet. + MOVL g_m(DI), BP + CMPL DI, m_gsignal(BP) + JEQ noswitch + MOVL m_g0(BP), SI + CMPL DI, SI + JEQ noswitch + CALL gosave_systemstack_switch<>(SB) + get_tls(CX) + MOVL SI, g(CX) + MOVL (g_sched+gobuf_sp)(SI), SP + +noswitch: + // Now on a scheduling stack (a pthread-created stack). + SUBL $32, SP + ANDL $~15, SP // alignment, perhaps unnecessary + MOVL DI, 8(SP) // save g + MOVL (g_stack+stack_hi)(DI), DI + SUBL DX, DI + MOVL DI, 4(SP) // save depth in stack (can't just save SP, as stack might be copied during a callback) + MOVL BX, 0(SP) // first argument in x86-32 ABI + CALL AX + + // Restore registers, g, stack pointer. + get_tls(CX) + MOVL 8(SP), DI + MOVL (g_stack+stack_hi)(DI), SI + SUBL 4(SP), SI + MOVL DI, g(CX) + MOVL SI, SP + + MOVL AX, ret+8(FP) + RET +nosave: + // Now on a scheduling stack (a pthread-created stack). + SUBL $32, SP + ANDL $~15, SP // alignment, perhaps unnecessary + MOVL DX, 4(SP) // save original stack pointer + MOVL BX, 0(SP) // first argument in x86-32 ABI + CALL AX + + MOVL 4(SP), CX // restore original stack pointer + MOVL CX, SP + MOVL AX, ret+8(FP) + RET + +// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$12-12 // Frame size must match commented places below + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVL fn+0(FP), AX + CMPL AX, $0 + JNE loadg + // Restore the g from frame. + get_tls(CX) + MOVL frame+4(FP), BX + MOVL BX, g(CX) + JMP dropm + +loadg: + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call through AX. + get_tls(CX) +#ifdef GOOS_windows + MOVL $0, BP + CMPL CX, $0 + JEQ 2(PC) // TODO +#endif + MOVL g(CX), BP + CMPL BP, $0 + JEQ needm + MOVL g_m(BP), BP + MOVL BP, savedm-4(SP) // saved copy of oldm + JMP havem +needm: + MOVL $runtime·needAndBindM(SB), AX + CALL AX + MOVL $0, savedm-4(SP) + get_tls(CX) + MOVL g(CX), BP + MOVL g_m(BP), BP + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVL m_g0(BP), SI + MOVL SP, (g_sched+gobuf_sp)(SI) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 0(SP). + MOVL m_g0(BP), SI + MOVL (g_sched+gobuf_sp)(SI), AX + MOVL AX, 0(SP) + MOVL SP, (g_sched+gobuf_sp)(SI) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVL m_curg(BP), SI + MOVL SI, g(CX) + MOVL (g_sched+gobuf_sp)(SI), DI // prepare stack as DI + MOVL (g_sched+gobuf_pc)(SI), BP + MOVL BP, -4(DI) // "push" return PC on the g stack + // Gather our arguments into registers. + MOVL fn+0(FP), AX + MOVL frame+4(FP), BX + MOVL ctxt+8(FP), CX + LEAL -(4+12)(DI), SP // Must match declared frame size + MOVL AX, 0(SP) + MOVL BX, 4(SP) + MOVL CX, 8(SP) + CALL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + get_tls(CX) + MOVL g(CX), SI + MOVL 12(SP), BP // Must match declared frame size + MOVL BP, (g_sched+gobuf_pc)(SI) + LEAL (12+4)(SP), DI // Must match declared frame size + MOVL DI, (g_sched+gobuf_sp)(SI) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVL g(CX), BP + MOVL g_m(BP), BP + MOVL m_g0(BP), SI + MOVL SI, g(CX) + MOVL (g_sched+gobuf_sp)(SI), SP + MOVL 0(SP), AX + MOVL AX, (g_sched+gobuf_sp)(SI) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVL savedm-4(SP), DX + CMPL DX, $0 + JNE droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVL _cgo_pthread_key_created(SB), DX + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CMPL DX, $0 + JEQ dropm + CMPL (DX), $0 + JNE droppedm + +dropm: + MOVL $runtime·dropm(SB), AX + CALL AX +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-4 + MOVL gg+0(FP), BX +#ifdef GOOS_windows + MOVL runtime·tls_g(SB), CX + CMPL BX, $0 + JNE settls + MOVL $0, 0(CX)(FS) + RET +settls: + MOVL g_m(BX), AX + LEAL m_tls(AX), AX + MOVL AX, 0(CX)(FS) +#endif + get_tls(CX) + MOVL BX, g(CX) + RET + +// void setg_gcc(G*); set g. for use by gcc +TEXT setg_gcc<>(SB), NOSPLIT, $0 + get_tls(AX) + MOVL gg+0(FP), DX + MOVL DX, g(AX) + RET + +TEXT runtime·abort(SB),NOSPLIT,$0-0 + INT $3 +loop: + JMP loop + +// check that SP is in range [g->stack.lo, g->stack.hi) +TEXT runtime·stackcheck(SB), NOSPLIT, $0-0 + get_tls(CX) + MOVL g(CX), AX + CMPL (g_stack+stack_hi)(AX), SP + JHI 2(PC) + CALL runtime·abort(SB) + CMPL SP, (g_stack+stack_lo)(AX) + JHI 2(PC) + CALL runtime·abort(SB) + RET + +// func cputicks() int64 +TEXT runtime·cputicks(SB),NOSPLIT,$0-8 + // LFENCE/MFENCE instruction support is dependent on SSE2. + // When no SSE2 support is present do not enforce any serialization + // since using CPUID to serialize the instruction stream is + // very costly. +#ifdef GO386_softfloat + JMP rdtsc // no fence instructions available +#endif + CMPB internal∕cpu·X86+const_offsetX86HasRDTSCP(SB), $1 + JNE fences + // Instruction stream serializing RDTSCP is supported. + // RDTSCP is supported by Intel Nehalem (2008) and + // AMD K8 Rev. F (2006) and newer. + RDTSCP +done: + MOVL AX, ret_lo+0(FP) + MOVL DX, ret_hi+4(FP) + RET +fences: + // MFENCE is instruction stream serializing and flushes the + // store buffers on AMD. The serialization semantics of LFENCE on AMD + // are dependent on MSR C001_1029 and CPU generation. + // LFENCE on Intel does wait for all previous instructions to have executed. + // Intel recommends MFENCE;LFENCE in its manuals before RDTSC to have all + // previous instructions executed and all previous loads and stores to globally visible. + // Using MFENCE;LFENCE here aligns the serializing properties without + // runtime detection of CPU manufacturer. + MFENCE + LFENCE +rdtsc: + RDTSC + JMP done + +TEXT ldt0setup<>(SB),NOSPLIT,$16-0 +#ifdef GOOS_windows + CALL runtime·wintls(SB) +#endif + // set up ldt 7 to point at m0.tls + // ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go. + // the entry number is just a hint. setldt will set up GS with what it used. + MOVL $7, 0(SP) + LEAL runtime·m0+m_tls(SB), AX + MOVL AX, 4(SP) + MOVL $32, 8(SP) // sizeof(tls array) + CALL runtime·setldt(SB) + RET + +TEXT runtime·emptyfunc(SB),0,$0-0 + RET + +// hash function using AES hardware instructions +TEXT runtime·memhash(SB),NOSPLIT,$0-16 + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVL p+0(FP), AX // ptr to data + MOVL s+8(FP), BX // size + LEAL ret+12(FP), DX + JMP aeshashbody<>(SB) +noaes: + JMP runtime·memhashFallback(SB) + +TEXT runtime·strhash(SB),NOSPLIT,$0-12 + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVL p+0(FP), AX // ptr to string object + MOVL 4(AX), BX // length of string + MOVL (AX), AX // string data + LEAL ret+8(FP), DX + JMP aeshashbody<>(SB) +noaes: + JMP runtime·strhashFallback(SB) + +// AX: data +// BX: length +// DX: address to put return value +TEXT aeshashbody<>(SB),NOSPLIT,$0-0 + MOVL h+4(FP), X0 // 32 bits of per-table hash seed + PINSRW $4, BX, X0 // 16 bits of length + PSHUFHW $0, X0, X0 // replace size with its low 2 bytes repeated 4 times + MOVO X0, X1 // save unscrambled seed + PXOR runtime·aeskeysched(SB), X0 // xor in per-process seed + AESENC X0, X0 // scramble seed + + CMPL BX, $16 + JB aes0to15 + JE aes16 + CMPL BX, $32 + JBE aes17to32 + CMPL BX, $64 + JBE aes33to64 + JMP aes65plus + +aes0to15: + TESTL BX, BX + JE aes0 + + ADDL $16, AX + TESTW $0xff0, AX + JE endofpage + + // 16 bytes loaded at this address won't cross + // a page boundary, so we can load it directly. + MOVOU -16(AX), X1 + ADDL BX, BX + PAND masks<>(SB)(BX*8), X1 + +final1: + PXOR X0, X1 // xor data with seed + AESENC X1, X1 // scramble combo 3 times + AESENC X1, X1 + AESENC X1, X1 + MOVL X1, (DX) + RET + +endofpage: + // address ends in 1111xxxx. Might be up against + // a page boundary, so load ending at last byte. + // Then shift bytes down using pshufb. + MOVOU -32(AX)(BX*1), X1 + ADDL BX, BX + PSHUFB shifts<>(SB)(BX*8), X1 + JMP final1 + +aes0: + // Return scrambled input seed + AESENC X0, X0 + MOVL X0, (DX) + RET + +aes16: + MOVOU (AX), X1 + JMP final1 + +aes17to32: + // make second starting seed + PXOR runtime·aeskeysched+16(SB), X1 + AESENC X1, X1 + + // load data to be hashed + MOVOU (AX), X2 + MOVOU -16(AX)(BX*1), X3 + + // xor with seed + PXOR X0, X2 + PXOR X1, X3 + + // scramble 3 times + AESENC X2, X2 + AESENC X3, X3 + AESENC X2, X2 + AESENC X3, X3 + AESENC X2, X2 + AESENC X3, X3 + + // combine results + PXOR X3, X2 + MOVL X2, (DX) + RET + +aes33to64: + // make 3 more starting seeds + MOVO X1, X2 + MOVO X1, X3 + PXOR runtime·aeskeysched+16(SB), X1 + PXOR runtime·aeskeysched+32(SB), X2 + PXOR runtime·aeskeysched+48(SB), X3 + AESENC X1, X1 + AESENC X2, X2 + AESENC X3, X3 + + MOVOU (AX), X4 + MOVOU 16(AX), X5 + MOVOU -32(AX)(BX*1), X6 + MOVOU -16(AX)(BX*1), X7 + + PXOR X0, X4 + PXOR X1, X5 + PXOR X2, X6 + PXOR X3, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + PXOR X6, X4 + PXOR X7, X5 + PXOR X5, X4 + MOVL X4, (DX) + RET + +aes65plus: + // make 3 more starting seeds + MOVO X1, X2 + MOVO X1, X3 + PXOR runtime·aeskeysched+16(SB), X1 + PXOR runtime·aeskeysched+32(SB), X2 + PXOR runtime·aeskeysched+48(SB), X3 + AESENC X1, X1 + AESENC X2, X2 + AESENC X3, X3 + + // start with last (possibly overlapping) block + MOVOU -64(AX)(BX*1), X4 + MOVOU -48(AX)(BX*1), X5 + MOVOU -32(AX)(BX*1), X6 + MOVOU -16(AX)(BX*1), X7 + + // scramble state once + AESENC X0, X4 + AESENC X1, X5 + AESENC X2, X6 + AESENC X3, X7 + + // compute number of remaining 64-byte blocks + DECL BX + SHRL $6, BX + +aesloop: + // scramble state, xor in a block + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + AESENC X0, X4 + AESENC X1, X5 + AESENC X2, X6 + AESENC X3, X7 + + // scramble state + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + ADDL $64, AX + DECL BX + JNE aesloop + + // 3 more scrambles to finish + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + PXOR X6, X4 + PXOR X7, X5 + PXOR X5, X4 + MOVL X4, (DX) + RET + +TEXT runtime·memhash32(SB),NOSPLIT,$0-12 + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVL p+0(FP), AX // ptr to data + MOVL h+4(FP), X0 // seed + PINSRD $1, (AX), X0 // data + AESENC runtime·aeskeysched+0(SB), X0 + AESENC runtime·aeskeysched+16(SB), X0 + AESENC runtime·aeskeysched+32(SB), X0 + MOVL X0, ret+8(FP) + RET +noaes: + JMP runtime·memhash32Fallback(SB) + +TEXT runtime·memhash64(SB),NOSPLIT,$0-12 + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVL p+0(FP), AX // ptr to data + MOVQ (AX), X0 // data + PINSRD $2, h+4(FP), X0 // seed + AESENC runtime·aeskeysched+0(SB), X0 + AESENC runtime·aeskeysched+16(SB), X0 + AESENC runtime·aeskeysched+32(SB), X0 + MOVL X0, ret+8(FP) + RET +noaes: + JMP runtime·memhash64Fallback(SB) + +// simple mask to get rid of data in the high part of the register. +DATA masks<>+0x00(SB)/4, $0x00000000 +DATA masks<>+0x04(SB)/4, $0x00000000 +DATA masks<>+0x08(SB)/4, $0x00000000 +DATA masks<>+0x0c(SB)/4, $0x00000000 + +DATA masks<>+0x10(SB)/4, $0x000000ff +DATA masks<>+0x14(SB)/4, $0x00000000 +DATA masks<>+0x18(SB)/4, $0x00000000 +DATA masks<>+0x1c(SB)/4, $0x00000000 + +DATA masks<>+0x20(SB)/4, $0x0000ffff +DATA masks<>+0x24(SB)/4, $0x00000000 +DATA masks<>+0x28(SB)/4, $0x00000000 +DATA masks<>+0x2c(SB)/4, $0x00000000 + +DATA masks<>+0x30(SB)/4, $0x00ffffff +DATA masks<>+0x34(SB)/4, $0x00000000 +DATA masks<>+0x38(SB)/4, $0x00000000 +DATA masks<>+0x3c(SB)/4, $0x00000000 + +DATA masks<>+0x40(SB)/4, $0xffffffff +DATA masks<>+0x44(SB)/4, $0x00000000 +DATA masks<>+0x48(SB)/4, $0x00000000 +DATA masks<>+0x4c(SB)/4, $0x00000000 + +DATA masks<>+0x50(SB)/4, $0xffffffff +DATA masks<>+0x54(SB)/4, $0x000000ff +DATA masks<>+0x58(SB)/4, $0x00000000 +DATA masks<>+0x5c(SB)/4, $0x00000000 + +DATA masks<>+0x60(SB)/4, $0xffffffff +DATA masks<>+0x64(SB)/4, $0x0000ffff +DATA masks<>+0x68(SB)/4, $0x00000000 +DATA masks<>+0x6c(SB)/4, $0x00000000 + +DATA masks<>+0x70(SB)/4, $0xffffffff +DATA masks<>+0x74(SB)/4, $0x00ffffff +DATA masks<>+0x78(SB)/4, $0x00000000 +DATA masks<>+0x7c(SB)/4, $0x00000000 + +DATA masks<>+0x80(SB)/4, $0xffffffff +DATA masks<>+0x84(SB)/4, $0xffffffff +DATA masks<>+0x88(SB)/4, $0x00000000 +DATA masks<>+0x8c(SB)/4, $0x00000000 + +DATA masks<>+0x90(SB)/4, $0xffffffff +DATA masks<>+0x94(SB)/4, $0xffffffff +DATA masks<>+0x98(SB)/4, $0x000000ff +DATA masks<>+0x9c(SB)/4, $0x00000000 + +DATA masks<>+0xa0(SB)/4, $0xffffffff +DATA masks<>+0xa4(SB)/4, $0xffffffff +DATA masks<>+0xa8(SB)/4, $0x0000ffff +DATA masks<>+0xac(SB)/4, $0x00000000 + +DATA masks<>+0xb0(SB)/4, $0xffffffff +DATA masks<>+0xb4(SB)/4, $0xffffffff +DATA masks<>+0xb8(SB)/4, $0x00ffffff +DATA masks<>+0xbc(SB)/4, $0x00000000 + +DATA masks<>+0xc0(SB)/4, $0xffffffff +DATA masks<>+0xc4(SB)/4, $0xffffffff +DATA masks<>+0xc8(SB)/4, $0xffffffff +DATA masks<>+0xcc(SB)/4, $0x00000000 + +DATA masks<>+0xd0(SB)/4, $0xffffffff +DATA masks<>+0xd4(SB)/4, $0xffffffff +DATA masks<>+0xd8(SB)/4, $0xffffffff +DATA masks<>+0xdc(SB)/4, $0x000000ff + +DATA masks<>+0xe0(SB)/4, $0xffffffff +DATA masks<>+0xe4(SB)/4, $0xffffffff +DATA masks<>+0xe8(SB)/4, $0xffffffff +DATA masks<>+0xec(SB)/4, $0x0000ffff + +DATA masks<>+0xf0(SB)/4, $0xffffffff +DATA masks<>+0xf4(SB)/4, $0xffffffff +DATA masks<>+0xf8(SB)/4, $0xffffffff +DATA masks<>+0xfc(SB)/4, $0x00ffffff + +GLOBL masks<>(SB),RODATA,$256 + +// these are arguments to pshufb. They move data down from +// the high bytes of the register to the low bytes of the register. +// index is how many bytes to move. +DATA shifts<>+0x00(SB)/4, $0x00000000 +DATA shifts<>+0x04(SB)/4, $0x00000000 +DATA shifts<>+0x08(SB)/4, $0x00000000 +DATA shifts<>+0x0c(SB)/4, $0x00000000 + +DATA shifts<>+0x10(SB)/4, $0xffffff0f +DATA shifts<>+0x14(SB)/4, $0xffffffff +DATA shifts<>+0x18(SB)/4, $0xffffffff +DATA shifts<>+0x1c(SB)/4, $0xffffffff + +DATA shifts<>+0x20(SB)/4, $0xffff0f0e +DATA shifts<>+0x24(SB)/4, $0xffffffff +DATA shifts<>+0x28(SB)/4, $0xffffffff +DATA shifts<>+0x2c(SB)/4, $0xffffffff + +DATA shifts<>+0x30(SB)/4, $0xff0f0e0d +DATA shifts<>+0x34(SB)/4, $0xffffffff +DATA shifts<>+0x38(SB)/4, $0xffffffff +DATA shifts<>+0x3c(SB)/4, $0xffffffff + +DATA shifts<>+0x40(SB)/4, $0x0f0e0d0c +DATA shifts<>+0x44(SB)/4, $0xffffffff +DATA shifts<>+0x48(SB)/4, $0xffffffff +DATA shifts<>+0x4c(SB)/4, $0xffffffff + +DATA shifts<>+0x50(SB)/4, $0x0e0d0c0b +DATA shifts<>+0x54(SB)/4, $0xffffff0f +DATA shifts<>+0x58(SB)/4, $0xffffffff +DATA shifts<>+0x5c(SB)/4, $0xffffffff + +DATA shifts<>+0x60(SB)/4, $0x0d0c0b0a +DATA shifts<>+0x64(SB)/4, $0xffff0f0e +DATA shifts<>+0x68(SB)/4, $0xffffffff +DATA shifts<>+0x6c(SB)/4, $0xffffffff + +DATA shifts<>+0x70(SB)/4, $0x0c0b0a09 +DATA shifts<>+0x74(SB)/4, $0xff0f0e0d +DATA shifts<>+0x78(SB)/4, $0xffffffff +DATA shifts<>+0x7c(SB)/4, $0xffffffff + +DATA shifts<>+0x80(SB)/4, $0x0b0a0908 +DATA shifts<>+0x84(SB)/4, $0x0f0e0d0c +DATA shifts<>+0x88(SB)/4, $0xffffffff +DATA shifts<>+0x8c(SB)/4, $0xffffffff + +DATA shifts<>+0x90(SB)/4, $0x0a090807 +DATA shifts<>+0x94(SB)/4, $0x0e0d0c0b +DATA shifts<>+0x98(SB)/4, $0xffffff0f +DATA shifts<>+0x9c(SB)/4, $0xffffffff + +DATA shifts<>+0xa0(SB)/4, $0x09080706 +DATA shifts<>+0xa4(SB)/4, $0x0d0c0b0a +DATA shifts<>+0xa8(SB)/4, $0xffff0f0e +DATA shifts<>+0xac(SB)/4, $0xffffffff + +DATA shifts<>+0xb0(SB)/4, $0x08070605 +DATA shifts<>+0xb4(SB)/4, $0x0c0b0a09 +DATA shifts<>+0xb8(SB)/4, $0xff0f0e0d +DATA shifts<>+0xbc(SB)/4, $0xffffffff + +DATA shifts<>+0xc0(SB)/4, $0x07060504 +DATA shifts<>+0xc4(SB)/4, $0x0b0a0908 +DATA shifts<>+0xc8(SB)/4, $0x0f0e0d0c +DATA shifts<>+0xcc(SB)/4, $0xffffffff + +DATA shifts<>+0xd0(SB)/4, $0x06050403 +DATA shifts<>+0xd4(SB)/4, $0x0a090807 +DATA shifts<>+0xd8(SB)/4, $0x0e0d0c0b +DATA shifts<>+0xdc(SB)/4, $0xffffff0f + +DATA shifts<>+0xe0(SB)/4, $0x05040302 +DATA shifts<>+0xe4(SB)/4, $0x09080706 +DATA shifts<>+0xe8(SB)/4, $0x0d0c0b0a +DATA shifts<>+0xec(SB)/4, $0xffff0f0e + +DATA shifts<>+0xf0(SB)/4, $0x04030201 +DATA shifts<>+0xf4(SB)/4, $0x08070605 +DATA shifts<>+0xf8(SB)/4, $0x0c0b0a09 +DATA shifts<>+0xfc(SB)/4, $0xff0f0e0d + +GLOBL shifts<>(SB),RODATA,$256 + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte + MOVL $masks<>(SB), AX + MOVL $shifts<>(SB), BX + ORL BX, AX + TESTL $15, AX + SETEQ ret+0(FP) + RET + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVL $0, AX + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$0 + get_tls(CX) + MOVL g(CX), AX + MOVL g_m(AX), AX + MOVL m_curg(AX), AX + MOVL (g_stack+stack_hi)(AX), AX + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|TOPFRAME,$0-0 + BYTE $0x90 // NOP + CALL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + BYTE $0x90 // NOP + +// Add a module's moduledata to the linked list of moduledata objects. This +// is called from .init_array by a function generated in the linker and so +// follows the platform ABI wrt register preservation -- it only touches AX, +// CX (implicitly) and DX, but it does not follow the ABI wrt arguments: +// instead the pointer to the moduledata is passed in AX. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + MOVL runtime·lastmoduledatap(SB), DX + MOVL AX, moduledata_next(DX) + MOVL AX, runtime·lastmoduledatap(SB) + RET + +TEXT runtime·uint32tofloat64(SB),NOSPLIT,$8-12 + MOVL a+0(FP), AX + MOVL AX, 0(SP) + MOVL $0, 4(SP) + FMOVV 0(SP), F0 + FMOVDP F0, ret+4(FP) + RET + +TEXT runtime·float64touint32(SB),NOSPLIT,$12-12 + FMOVD a+0(FP), F0 + FSTCW 0(SP) + FLDCW runtime·controlWord64trunc(SB) + FMOVVP F0, 4(SP) + FLDCW 0(SP) + MOVL 4(SP), AX + MOVL AX, ret+8(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier returns space in a write barrier buffer which +// should be filled in by the caller. +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in DI, and returns a pointer +// to the buffer space in DI. +// It clobbers FLAGS. It does not clobber any general-purpose registers, +// but may clobber others (e.g., SSE registers). +// Typical use would be, when doing *(CX+88) = AX +// CMPL $0, runtime.writeBarrier(SB) +// JEQ dowrite +// CALL runtime.gcBatchBarrier2(SB) +// MOVL AX, (DI) +// MOVL 88(CX), DX +// MOVL DX, 4(DI) +// dowrite: +// MOVL AX, 88(CX) +TEXT gcWriteBarrier<>(SB),NOSPLIT,$28 + // Save the registers clobbered by the fast path. This is slightly + // faster than having the caller spill these. + MOVL CX, 20(SP) + MOVL BX, 24(SP) +retry: + // TODO: Consider passing g.m.p in as an argument so they can be shared + // across a sequence of write barriers. + get_tls(BX) + MOVL g(BX), BX + MOVL g_m(BX), BX + MOVL m_p(BX), BX + // Get current buffer write position. + MOVL (p_wbBuf+wbBuf_next)(BX), CX // original next position + ADDL DI, CX // new next position + // Is the buffer full? + CMPL CX, (p_wbBuf+wbBuf_end)(BX) + JA flush + // Commit to the larger buffer. + MOVL CX, (p_wbBuf+wbBuf_next)(BX) + // Make return value (the original next position) + SUBL DI, CX + MOVL CX, DI + // Restore registers. + MOVL 20(SP), CX + MOVL 24(SP), BX + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + MOVL DI, 0(SP) + MOVL AX, 4(SP) + // BX already saved + // CX already saved + MOVL DX, 8(SP) + MOVL BP, 12(SP) + MOVL SI, 16(SP) + // DI already saved + + CALL runtime·wbBufFlush(SB) + + MOVL 0(SP), DI + MOVL 4(SP), AX + MOVL 8(SP), DX + MOVL 12(SP), BP + MOVL 16(SP), SI + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVL $4, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVL $8, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVL $12, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVL $16, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVL $20, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVL $24, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVL $28, DI + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVL $32, DI + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 + MOVL DX, x+0(FP) + MOVL BX, y+4(FP) + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 + MOVL DX, x+0(FP) + MOVL BX, y+4(FP) + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 + MOVL DX, x+0(FP) + MOVL BX, y+4(FP) + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 + MOVL DX, x+0(FP) + MOVL BX, y+4(FP) + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 + MOVL CX, x+0(FP) + MOVL DX, y+4(FP) + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 + MOVL AX, x+0(FP) + MOVL CX, y+4(FP) + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 + MOVL DX, x+0(FP) + MOVL BX, y+4(FP) + JMP runtime·goPanicSliceConvert(SB) + +// Extended versions for 64-bit indexes. +TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendIndex(SB) +TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendIndexU(SB) +TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSliceAlen(SB) +TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSliceAlenU(SB) +TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSliceAcap(SB) +TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSliceAcapU(SB) +TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendSliceB(SB) +TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendSliceBU(SB) +TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL DX, lo+4(FP) + MOVL BX, y+8(FP) + JMP runtime·goPanicExtendSlice3Alen(SB) +TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL DX, lo+4(FP) + MOVL BX, y+8(FP) + JMP runtime·goPanicExtendSlice3AlenU(SB) +TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL DX, lo+4(FP) + MOVL BX, y+8(FP) + JMP runtime·goPanicExtendSlice3Acap(SB) +TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL DX, lo+4(FP) + MOVL BX, y+8(FP) + JMP runtime·goPanicExtendSlice3AcapU(SB) +TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSlice3B(SB) +TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL CX, lo+4(FP) + MOVL DX, y+8(FP) + JMP runtime·goPanicExtendSlice3BU(SB) +TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendSlice3C(SB) +TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 + MOVL SI, hi+0(FP) + MOVL AX, lo+4(FP) + MOVL CX, y+8(FP) + JMP runtime·goPanicExtendSlice3CU(SB) + +#ifdef GOOS_android +// Use the free TLS_SLOT_APP slot #2 on Android Q. +// Earlier androids are set up in gcc_android.c. +DATA runtime·tls_g+0(SB)/4, $8 +GLOBL runtime·tls_g+0(SB), NOPTR, $4 +#endif +#ifdef GOOS_windows +GLOBL runtime·tls_g+0(SB), NOPTR, $4 +#endif diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_amd64.h b/platform/dbops/binaries/go/go/src/runtime/asm_amd64.h new file mode 100644 index 0000000000000000000000000000000000000000..b263ade8022a7786d8ee7b5ba185ebd02adef0e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_amd64.h @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Define features that are guaranteed to be supported by setting the AMD64 variable. +// If a feature is supported, there's no need to check it at runtime every time. + +#ifdef GOAMD64_v2 +#define hasPOPCNT +#define hasSSE42 +#endif + +#ifdef GOAMD64_v3 +#define hasAVX +#define hasAVX2 +#define hasPOPCNT +#define hasSSE42 +#endif + +#ifdef GOAMD64_v4 +#define hasAVX +#define hasAVX2 +#define hasAVX512F +#define hasAVX512BW +#define hasAVX512VL +#define hasPOPCNT +#define hasSSE42 +#endif diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_amd64.s b/platform/dbops/binaries/go/go/src/runtime/asm_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..1071d270c1b85cf66c833b6b63ec8ebe21cbc656 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_amd64.s @@ -0,0 +1,2138 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" +#include "cgo/abi_amd64.h" + +// _rt0_amd64 is common startup code for most amd64 systems when using +// internal linking. This is the entry point for the program from the +// kernel for an ordinary -buildmode=exe program. The stack holds the +// number of arguments and the C-style argv. +TEXT _rt0_amd64(SB),NOSPLIT,$-8 + MOVQ 0(SP), DI // argc + LEAQ 8(SP), SI // argv + JMP runtime·rt0_go(SB) + +// main is common startup code for most amd64 systems when using +// external linking. The C startup code will call the symbol "main" +// passing argc and argv in the usual C ABI registers DI and SI. +TEXT main(SB),NOSPLIT,$-8 + JMP runtime·rt0_go(SB) + +// _rt0_amd64_lib is common startup code for most amd64 systems when +// using -buildmode=c-archive or -buildmode=c-shared. The linker will +// arrange to invoke this function as a global constructor (for +// c-archive) or when the shared library is loaded (for c-shared). +// We expect argc and argv to be passed in the usual C ABI registers +// DI and SI. +TEXT _rt0_amd64_lib(SB),NOSPLIT|NOFRAME,$0 + // Transition from C ABI to Go ABI. + PUSH_REGS_HOST_TO_ABI0() + + MOVQ DI, _rt0_amd64_lib_argc<>(SB) + MOVQ SI, _rt0_amd64_lib_argv<>(SB) + + // Synchronous initialization. + CALL runtime·libpreinit(SB) + + // Create a new thread to finish Go runtime initialization. + MOVQ _cgo_sys_thread_create(SB), AX + TESTQ AX, AX + JZ nocgo + + // We're calling back to C. + // Align stack per ELF ABI requirements. + MOVQ SP, BX // Callee-save in C ABI + ANDQ $~15, SP + MOVQ $_rt0_amd64_lib_go(SB), DI + MOVQ $0, SI + CALL AX + MOVQ BX, SP + JMP restore + +nocgo: + ADJSP $16 + MOVQ $0x800000, 0(SP) // stacksize + MOVQ $_rt0_amd64_lib_go(SB), AX + MOVQ AX, 8(SP) // fn + CALL runtime·newosproc0(SB) + ADJSP $-16 + +restore: + POP_REGS_HOST_TO_ABI0() + RET + +// _rt0_amd64_lib_go initializes the Go runtime. +// This is started in a separate thread by _rt0_amd64_lib. +TEXT _rt0_amd64_lib_go(SB),NOSPLIT,$0 + MOVQ _rt0_amd64_lib_argc<>(SB), DI + MOVQ _rt0_amd64_lib_argv<>(SB), SI + JMP runtime·rt0_go(SB) + +DATA _rt0_amd64_lib_argc<>(SB)/8, $0 +GLOBL _rt0_amd64_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_amd64_lib_argv<>(SB)/8, $0 +GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8 + +#ifdef GOAMD64_v2 +DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v2 microarchitecture support.\n" +#endif + +#ifdef GOAMD64_v3 +DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v3 microarchitecture support.\n" +#endif + +#ifdef GOAMD64_v4 +DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v4 microarchitecture support.\n" +#endif + +GLOBL bad_cpu_msg<>(SB), RODATA, $84 + +// Define a list of AMD64 microarchitecture level features +// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels + + // SSE3 SSSE3 CMPXCHNG16 SSE4.1 SSE4.2 POPCNT +#define V2_FEATURES_CX (1 << 0 | 1 << 9 | 1 << 13 | 1 << 19 | 1 << 20 | 1 << 23) + // LAHF/SAHF +#define V2_EXT_FEATURES_CX (1 << 0) + // FMA MOVBE OSXSAVE AVX F16C +#define V3_FEATURES_CX (V2_FEATURES_CX | 1 << 12 | 1 << 22 | 1 << 27 | 1 << 28 | 1 << 29) + // ABM (FOR LZNCT) +#define V3_EXT_FEATURES_CX (V2_EXT_FEATURES_CX | 1 << 5) + // BMI1 AVX2 BMI2 +#define V3_EXT_FEATURES_BX (1 << 3 | 1 << 5 | 1 << 8) + // XMM YMM +#define V3_OS_SUPPORT_AX (1 << 1 | 1 << 2) + +#define V4_FEATURES_CX V3_FEATURES_CX + +#define V4_EXT_FEATURES_CX V3_EXT_FEATURES_CX + // AVX512F AVX512DQ AVX512CD AVX512BW AVX512VL +#define V4_EXT_FEATURES_BX (V3_EXT_FEATURES_BX | 1 << 16 | 1 << 17 | 1 << 28 | 1 << 30 | 1 << 31) + // OPMASK ZMM +#define V4_OS_SUPPORT_AX (V3_OS_SUPPORT_AX | 1 << 5 | (1 << 6 | 1 << 7)) + +#ifdef GOAMD64_v2 +#define NEED_MAX_CPUID 0x80000001 +#define NEED_FEATURES_CX V2_FEATURES_CX +#define NEED_EXT_FEATURES_CX V2_EXT_FEATURES_CX +#endif + +#ifdef GOAMD64_v3 +#define NEED_MAX_CPUID 0x80000001 +#define NEED_FEATURES_CX V3_FEATURES_CX +#define NEED_EXT_FEATURES_CX V3_EXT_FEATURES_CX +#define NEED_EXT_FEATURES_BX V3_EXT_FEATURES_BX +#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX +#endif + +#ifdef GOAMD64_v4 +#define NEED_MAX_CPUID 0x80000001 +#define NEED_FEATURES_CX V4_FEATURES_CX +#define NEED_EXT_FEATURES_CX V4_EXT_FEATURES_CX +#define NEED_EXT_FEATURES_BX V4_EXT_FEATURES_BX + +// Darwin requires a different approach to check AVX512 support, see CL 285572. +#ifdef GOOS_darwin +#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX +// These values are from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define AVX512F 0x0000004000000000 +#define AVX512CD 0x0000008000000000 +#define AVX512DQ 0x0000010000000000 +#define AVX512BW 0x0000020000000000 +#define AVX512VL 0x0000100000000000 +#define NEED_DARWIN_SUPPORT (AVX512F | AVX512DQ | AVX512CD | AVX512BW | AVX512VL) +#else +#define NEED_OS_SUPPORT_AX V4_OS_SUPPORT_AX +#endif + +#endif + +TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0 + // copy arguments forward on an even stack + MOVQ DI, AX // argc + MOVQ SI, BX // argv + SUBQ $(5*8), SP // 3args 2auto + ANDQ $~15, SP + MOVQ AX, 24(SP) + MOVQ BX, 32(SP) + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVQ $runtime·g0(SB), DI + LEAQ (-64*1024)(SP), BX + MOVQ BX, g_stackguard0(DI) + MOVQ BX, g_stackguard1(DI) + MOVQ BX, (g_stack+stack_lo)(DI) + MOVQ SP, (g_stack+stack_hi)(DI) + + // find out information about the processor we're on + MOVL $0, AX + CPUID + CMPL AX, $0 + JE nocpuinfo + + CMPL BX, $0x756E6547 // "Genu" + JNE notintel + CMPL DX, $0x49656E69 // "ineI" + JNE notintel + CMPL CX, $0x6C65746E // "ntel" + JNE notintel + MOVB $1, runtime·isIntel(SB) + +notintel: + // Load EAX=1 cpuid flags + MOVL $1, AX + CPUID + MOVL AX, runtime·processorVersionInfo(SB) + +nocpuinfo: + // if there is an _cgo_init, call it. + MOVQ _cgo_init(SB), AX + TESTQ AX, AX + JZ needtls + // arg 1: g0, already in DI + MOVQ $setg_gcc<>(SB), SI // arg 2: setg_gcc + MOVQ $0, DX // arg 3, 4: not used when using platform's TLS + MOVQ $0, CX +#ifdef GOOS_android + MOVQ $runtime·tls_g(SB), DX // arg 3: &tls_g + // arg 4: TLS base, stored in slot 0 (Android's TLS_SLOT_SELF). + // Compensate for tls_g (+16). + MOVQ -16(TLS), CX +#endif +#ifdef GOOS_windows + MOVQ $runtime·tls_g(SB), DX // arg 3: &tls_g + // Adjust for the Win64 calling convention. + MOVQ CX, R9 // arg 4 + MOVQ DX, R8 // arg 3 + MOVQ SI, DX // arg 2 + MOVQ DI, CX // arg 1 +#endif + CALL AX + + // update stackguard after _cgo_init + MOVQ $runtime·g0(SB), CX + MOVQ (g_stack+stack_lo)(CX), AX + ADDQ $const_stackGuard, AX + MOVQ AX, g_stackguard0(CX) + MOVQ AX, g_stackguard1(CX) + +#ifndef GOOS_windows + JMP ok +#endif +needtls: +#ifdef GOOS_plan9 + // skip TLS setup on Plan 9 + JMP ok +#endif +#ifdef GOOS_solaris + // skip TLS setup on Solaris + JMP ok +#endif +#ifdef GOOS_illumos + // skip TLS setup on illumos + JMP ok +#endif +#ifdef GOOS_darwin + // skip TLS setup on Darwin + JMP ok +#endif +#ifdef GOOS_openbsd + // skip TLS setup on OpenBSD + JMP ok +#endif + +#ifdef GOOS_windows + CALL runtime·wintls(SB) +#endif + + LEAQ runtime·m0+m_tls(SB), DI + CALL runtime·settls(SB) + + // store through it, to make sure it works + get_tls(BX) + MOVQ $0x123, g(BX) + MOVQ runtime·m0+m_tls(SB), AX + CMPQ AX, $0x123 + JEQ 2(PC) + CALL runtime·abort(SB) +ok: + // set the per-goroutine and per-mach "registers" + get_tls(BX) + LEAQ runtime·g0(SB), CX + MOVQ CX, g(BX) + LEAQ runtime·m0(SB), AX + + // save m->g0 = g0 + MOVQ CX, m_g0(AX) + // save m0 to g0->m + MOVQ AX, g_m(CX) + + CLD // convention is D is always left cleared + + // Check GOAMD64 requirements + // We need to do this after setting up TLS, so that + // we can report an error if there is a failure. See issue 49586. +#ifdef NEED_FEATURES_CX + MOVL $0, AX + CPUID + CMPL AX, $0 + JE bad_cpu + MOVL $1, AX + CPUID + ANDL $NEED_FEATURES_CX, CX + CMPL CX, $NEED_FEATURES_CX + JNE bad_cpu +#endif + +#ifdef NEED_MAX_CPUID + MOVL $0x80000000, AX + CPUID + CMPL AX, $NEED_MAX_CPUID + JL bad_cpu +#endif + +#ifdef NEED_EXT_FEATURES_BX + MOVL $7, AX + MOVL $0, CX + CPUID + ANDL $NEED_EXT_FEATURES_BX, BX + CMPL BX, $NEED_EXT_FEATURES_BX + JNE bad_cpu +#endif + +#ifdef NEED_EXT_FEATURES_CX + MOVL $0x80000001, AX + CPUID + ANDL $NEED_EXT_FEATURES_CX, CX + CMPL CX, $NEED_EXT_FEATURES_CX + JNE bad_cpu +#endif + +#ifdef NEED_OS_SUPPORT_AX + XORL CX, CX + XGETBV + ANDL $NEED_OS_SUPPORT_AX, AX + CMPL AX, $NEED_OS_SUPPORT_AX + JNE bad_cpu +#endif + +#ifdef NEED_DARWIN_SUPPORT + MOVQ $commpage64_version, BX + CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13 + JL bad_cpu + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), BX + MOVQ $NEED_DARWIN_SUPPORT, CX + ANDQ CX, BX + CMPQ BX, CX + JNE bad_cpu +#endif + + CALL runtime·check(SB) + + MOVL 24(SP), AX // copy argc + MOVL AX, 0(SP) + MOVQ 32(SP), AX // copy argv + MOVQ AX, 8(SP) + CALL runtime·args(SB) + CALL runtime·osinit(SB) + CALL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVQ $runtime·mainPC(SB), AX // entry + PUSHQ AX + CALL runtime·newproc(SB) + POPQ AX + + // start this M + CALL runtime·mstart(SB) + + CALL runtime·abort(SB) // mstart should never return + RET + +bad_cpu: // show that the program requires a certain microarchitecture level. + MOVQ $2, 0(SP) + MOVQ $bad_cpu_msg<>(SB), AX + MOVQ AX, 8(SP) + MOVQ $84, 16(SP) + CALL runtime·write(SB) + MOVQ $1, 0(SP) + CALL runtime·exit(SB) + CALL runtime·abort(SB) + RET + + // Prevent dead-code elimination of debugCallV2, which is + // intended to be called by debuggers. + MOVQ $runtime·debugCallV2(SB), AX + RET + +// mainPC is a function value for runtime.main, to be passed to newproc. +// The reference to runtime.main is made via ABIInternal, since the +// actual function (not the ABI0 wrapper) is needed by newproc. +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 + BYTE $0xcc + RET + +TEXT runtime·asminit(SB),NOSPLIT,$0-0 + // No per-thread init. + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 + CALL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// func gogo(buf *gobuf) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT, $0-8 + MOVQ buf+0(FP), BX // gobuf + MOVQ gobuf_g(BX), DX + MOVQ 0(DX), CX // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT, $0 + get_tls(CX) + MOVQ DX, g(CX) + MOVQ DX, R14 // set the g register + MOVQ gobuf_sp(BX), SP // restore SP + MOVQ gobuf_ret(BX), AX + MOVQ gobuf_ctxt(BX), DX + MOVQ gobuf_bp(BX), BP + MOVQ $0, gobuf_sp(BX) // clear to help garbage collector + MOVQ $0, gobuf_ret(BX) + MOVQ $0, gobuf_ctxt(BX) + MOVQ $0, gobuf_bp(BX) + MOVQ gobuf_pc(BX), BX + JMP BX + +// func mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT, $0-8 + MOVQ AX, DX // DX = fn + + // Save state in g->sched. The caller's SP and PC are restored by gogo to + // resume execution in the caller's frame (implicit return). The caller's BP + // is also restored to support frame pointer unwinding. + MOVQ SP, BX // hide (SP) reads from vet + MOVQ 8(BX), BX // caller's PC + MOVQ BX, (g_sched+gobuf_pc)(R14) + LEAQ fn+0(FP), BX // caller's SP + MOVQ BX, (g_sched+gobuf_sp)(R14) + // Get the caller's frame pointer by dereferencing BP. Storing BP as it is + // can cause a frame pointer cycle, see CL 476235. + MOVQ (BP), BX // caller's BP + MOVQ BX, (g_sched+gobuf_bp)(R14) + + // switch to m->g0 & its stack, call fn + MOVQ g_m(R14), BX + MOVQ m_g0(BX), SI // SI = g.m.g0 + CMPQ SI, R14 // if g == m->g0 call badmcall + JNE goodm + JMP runtime·badmcall(SB) +goodm: + MOVQ R14, AX // AX (and arg 0) = g + MOVQ SI, R14 // g = g.m.g0 + get_tls(CX) // Set G in TLS + MOVQ R14, g(CX) + MOVQ (g_sched+gobuf_sp)(R14), SP // sp = g0.sched.sp + PUSHQ AX // open up space for fn's arg spill slot + MOVQ 0(DX), R12 + CALL R12 // fn(g) + POPQ AX + JMP runtime·badmcall2(SB) + RET + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +// The frame layout needs to match systemstack +// so that it can pretend to be systemstack_switch. +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + // Make sure this function is not leaf, + // so the frame is saved. + CALL runtime·abort(SB) + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVQ fn+0(FP), DI // DI = fn + get_tls(CX) + MOVQ g(CX), AX // AX = g + MOVQ g_m(AX), BX // BX = m + + CMPQ AX, m_gsignal(BX) + JEQ noswitch + + MOVQ m_g0(BX), DX // DX = g0 + CMPQ AX, DX + JEQ noswitch + + CMPQ AX, m_curg(BX) + JNE bad + + // Switch stacks. + // The original frame pointer is stored in BP, + // which is useful for stack unwinding. + // Save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + CALL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVQ DX, g(CX) + MOVQ DX, R14 // set the g register + MOVQ (g_sched+gobuf_sp)(DX), SP + + // call target function + MOVQ DI, DX + MOVQ 0(DI), DI + CALL DI + + // switch back to g + get_tls(CX) + MOVQ g(CX), AX + MOVQ g_m(AX), BX + MOVQ m_curg(BX), AX + MOVQ AX, g(CX) + MOVQ (g_sched+gobuf_sp)(AX), SP + MOVQ (g_sched+gobuf_bp)(AX), BP + MOVQ $0, (g_sched+gobuf_sp)(AX) + MOVQ $0, (g_sched+gobuf_bp)(AX) + RET + +noswitch: + // already on m stack; tail call the function + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVQ DI, DX + MOVQ 0(DI), DI + // The function epilogue is not called on a tail call. + // Pop BP from the stack to simulate it. + POPQ BP + JMP DI + +bad: + // Bad: g is not gsignal, not g0, not curg. What is it? + MOVQ $runtime·badsystemstack(SB), AX + CALL AX + INT $3 + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVQ g_m(R14), BX // curm + + // set g to gcrash + LEAQ runtime·gcrash(SB), R14 // g = &gcrash + MOVQ BX, g_m(R14) // g.m = curm + MOVQ R14, m_g0(BX) // curm.g0 = g + get_tls(CX) + MOVQ R14, g(CX) + + // switch to crashstack + MOVQ (g_stack+stack_hi)(R14), BX + SUBQ $(4*8), BX + MOVQ BX, SP + + // call target function + MOVQ AX, DX + MOVQ 0(AX), AX + CALL AX + + // should never return + CALL runtime·abort(SB) + UNDEF + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + get_tls(CX) + MOVQ g(CX), DI // DI = g + MOVQ g_m(DI), BX // BX = m + + // Set g->sched to context in f. + MOVQ 0(SP), AX // f's PC + MOVQ AX, (g_sched+gobuf_pc)(DI) + LEAQ 8(SP), AX // f's SP + MOVQ AX, (g_sched+gobuf_sp)(DI) + MOVQ BP, (g_sched+gobuf_bp)(DI) + MOVQ DX, (g_sched+gobuf_ctxt)(DI) + + MOVQ m_g0(BX), SI // SI = m.g0 + CMPQ DI, SI + JNE 3(PC) + CALL runtime·badmorestackg0(SB) + CALL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVQ m_gsignal(BX), SI + CMPQ DI, SI + JNE 3(PC) + CALL runtime·badmorestackgsignal(SB) + CALL runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's caller. + NOP SP // tell vet SP changed - stop checking offsets + MOVQ 8(SP), AX // f's caller's PC + MOVQ AX, (m_morebuf+gobuf_pc)(BX) + LEAQ 16(SP), AX // f's caller's SP + MOVQ AX, (m_morebuf+gobuf_sp)(BX) + MOVQ DI, (m_morebuf+gobuf_g)(BX) + + // Call newstack on m->g0's stack. + MOVQ m_g0(BX), BX + MOVQ BX, g(CX) + MOVQ (g_sched+gobuf_sp)(BX), SP + MOVQ (g_sched+gobuf_bp)(BX), BP + CALL runtime·newstack(SB) + CALL runtime·abort(SB) // crash if newstack returns + RET + +// morestack but not preserving ctxt. +TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 + MOVL $0, DX + JMP runtime·morestack(SB) + +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12. +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + MOVQ AX, 0(R12) + MOVQ BX, 8(R12) + MOVQ CX, 16(R12) + MOVQ DI, 24(R12) + MOVQ SI, 32(R12) + MOVQ R8, 40(R12) + MOVQ R9, 48(R12) + MOVQ R10, 56(R12) + MOVQ R11, 64(R12) + MOVQ X0, 72(R12) + MOVQ X1, 80(R12) + MOVQ X2, 88(R12) + MOVQ X3, 96(R12) + MOVQ X4, 104(R12) + MOVQ X5, 112(R12) + MOVQ X6, 120(R12) + MOVQ X7, 128(R12) + MOVQ X8, 136(R12) + MOVQ X9, 144(R12) + MOVQ X10, 152(R12) + MOVQ X11, 160(R12) + MOVQ X12, 168(R12) + MOVQ X13, 176(R12) + MOVQ X14, 184(R12) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12. +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + MOVQ 0(R12), AX + MOVQ 8(R12), BX + MOVQ 16(R12), CX + MOVQ 24(R12), DI + MOVQ 32(R12), SI + MOVQ 40(R12), R8 + MOVQ 48(R12), R9 + MOVQ 56(R12), R10 + MOVQ 64(R12), R11 + MOVQ 72(R12), X0 + MOVQ 80(R12), X1 + MOVQ 88(R12), X2 + MOVQ 96(R12), X3 + MOVQ 104(R12), X4 + MOVQ 112(R12), X5 + MOVQ 120(R12), X6 + MOVQ 128(R12), X7 + MOVQ 136(R12), X8 + MOVQ 144(R12), X9 + MOVQ 152(R12), X10 + MOVQ 160(R12), X11 + MOVQ 168(R12), X12 + MOVQ 176(R12), X13 + MOVQ 184(R12), X14 + RET + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + CMPQ CX, $MAXSIZE; \ + JA 3(PC); \ + MOVQ $NAME(SB), AX; \ + JMP AX +// Note: can't just "JMP NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT, $0-48 + MOVLQZX frameSize+32(FP), CX + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVQ $runtime·badreflectcall(SB), AX + JMP AX + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVQ stackArgs+16(FP), SI; \ + MOVLQZX stackArgsSize+24(FP), CX; \ + MOVQ SP, DI; \ + REP;MOVSB; \ + /* set up argument registers */ \ + MOVQ regArgs+40(FP), R12; \ + CALL ·unspillArgs(SB); \ + /* call function */ \ + MOVQ f+8(FP), DX; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + MOVQ (DX), R12; \ + CALL R12; \ + /* copy register return values back */ \ + MOVQ regArgs+40(FP), R12; \ + CALL ·spillArgs(SB); \ + MOVLQZX stackArgsSize+24(FP), CX; \ + MOVLQZX stackRetOffset+28(FP), BX; \ + MOVQ stackArgs+16(FP), DI; \ + MOVQ stackArgsType+0(FP), DX; \ + MOVQ SP, SI; \ + ADDQ BX, DI; \ + ADDQ BX, SI; \ + SUBQ BX, CX; \ + CALL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS + MOVQ DX, 0(SP) + MOVQ DI, 8(SP) + MOVQ SI, 16(SP) + MOVQ CX, 24(SP) + MOVQ R12, 32(SP) + CALL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + MOVL cycles+0(FP), AX +again: + PAUSE + SUBL $1, AX + JNZ again + RET + + +TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 + // Stores are already ordered on x86, so this is just a + // compile barrier. + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with frame pointer +// and without locals ($0) or else unwinding from +// systemstack_switch is incorrect. +// Smashes R9. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + // Take systemstack_switch PC and add 8 bytes to skip + // the prologue. The final location does not matter + // as long as we are between the prologue and the epilogue. + MOVQ $runtime·systemstack_switch+8(SB), R9 + MOVQ R9, (g_sched+gobuf_pc)(R14) + LEAQ 8(SP), R9 + MOVQ R9, (g_sched+gobuf_sp)(R14) + MOVQ $0, (g_sched+gobuf_ret)(R14) + MOVQ BP, (g_sched+gobuf_bp)(R14) + // Assert ctxt is zero. See func save. + MOVQ (g_sched+gobuf_ctxt)(R14), R9 + TESTQ R9, R9 + JZ 2(PC) + CALL runtime·abort(SB) + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$32-16 + MOVQ fn+0(FP), AX + MOVQ arg+8(FP), BX + MOVQ SP, DX + ANDQ $~15, SP // alignment + MOVQ DX, 8(SP) + MOVQ BX, DI // DI = first argument in AMD64 ABI + MOVQ BX, CX // CX = first argument in Win64 + CALL AX + MOVQ 8(SP), DX + MOVQ DX, SP + RET + +// asmcgocall_landingpad calls AX with BX as argument. +// Must be called on the system stack. +TEXT ·asmcgocall_landingpad(SB),NOSPLIT,$0-0 +#ifdef GOOS_windows + // Make sure we have enough room for 4 stack-backed fast-call + // registers as per Windows amd64 calling convention. + ADJSP $32 + // On Windows, asmcgocall_landingpad acts as landing pad for exceptions + // thrown in the cgo call. Exceptions that reach this function will be + // handled by runtime.sehtramp thanks to the SEH metadata added + // by the compiler. + // Note that runtime.sehtramp can't be attached directly to asmcgocall + // because its initial stack pointer can be outside the system stack bounds, + // and Windows stops the stack unwinding without calling the exception handler + // when it reaches that point. + MOVQ BX, CX // CX = first argument in Win64 + CALL AX + // The exception handler is not called if the next instruction is part of + // the epilogue, which includes the RET instruction, so we need to add a NOP here. + BYTE $0x90 + ADJSP $-32 + RET +#endif + // Tail call AX on non-Windows, as the extra stack frame is not needed. + MOVQ BX, DI // DI = first argument in AMD64 ABI + JMP AX + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOVQ fn+0(FP), AX + MOVQ arg+8(FP), BX + + MOVQ SP, DX + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + get_tls(CX) + MOVQ g(CX), DI + CMPQ DI, $0 + JEQ nosave + MOVQ g_m(DI), R8 + MOVQ m_gsignal(R8), SI + CMPQ DI, SI + JEQ nosave + MOVQ m_g0(R8), SI + CMPQ DI, SI + JEQ nosave + + // Switch to system stack. + // The original frame pointer is stored in BP, + // which is useful for stack unwinding. + CALL gosave_systemstack_switch<>(SB) + MOVQ SI, g(CX) + MOVQ (g_sched+gobuf_sp)(SI), SP + + // Now on a scheduling stack (a pthread-created stack). + SUBQ $16, SP + ANDQ $~15, SP // alignment for gcc ABI + MOVQ DI, 8(SP) // save g + MOVQ (g_stack+stack_hi)(DI), DI + SUBQ DX, DI + MOVQ DI, 0(SP) // save depth in stack (can't just save SP, as stack might be copied during a callback) + CALL runtime·asmcgocall_landingpad(SB) + + // Restore registers, g, stack pointer. + get_tls(CX) + MOVQ 8(SP), DI + MOVQ (g_stack+stack_hi)(DI), SI + SUBQ 0(SP), SI + MOVQ DI, g(CX) + MOVQ SI, SP + + MOVL AX, ret+16(FP) + RET + +nosave: + // Running on a system stack, perhaps even without a g. + // Having no g can happen during thread creation or thread teardown + // (see needm/dropm on Solaris, for example). + // This code is like the above sequence but without saving/restoring g + // and without worrying about the stack moving out from under us + // (because we're on a system stack, not a goroutine stack). + // The above code could be used directly if already on a system stack, + // but then the only path through this code would be a rare case on Solaris. + // Using this code for all "already on system stack" calls exercises it more, + // which should help keep it correct. + SUBQ $16, SP + ANDQ $~15, SP + MOVQ $0, 8(SP) // where above code stores g, in case someone looks during debugging + MOVQ DX, 0(SP) // save original stack pointer + CALL runtime·asmcgocall_landingpad(SB) + MOVQ 0(SP), SI // restore original stack pointer + MOVQ SI, SP + MOVL AX, ret+16(FP) + RET + +#ifdef GOOS_windows +// Dummy TLS that's used on Windows so that we don't crash trying +// to restore the G register in needm. needm and its callees are +// very careful never to actually use the G, the TLS just can't be +// unset since we're in Go code. +GLOBL zeroTLS<>(SB),RODATA,$const_tlsSize +#endif + +// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVQ fn+0(FP), AX + CMPQ AX, $0 + JNE loadg + // Restore the g from frame. + get_tls(CX) + MOVQ frame+8(FP), BX + MOVQ BX, g(CX) + JMP dropm + +loadg: + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one m for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call through AX. + get_tls(CX) +#ifdef GOOS_windows + MOVL $0, BX + CMPQ CX, $0 + JEQ 2(PC) +#endif + MOVQ g(CX), BX + CMPQ BX, $0 + JEQ needm + MOVQ g_m(BX), BX + MOVQ BX, savedm-8(SP) // saved copy of oldm + JMP havem +needm: +#ifdef GOOS_windows + // Set up a dummy TLS value. needm is careful not to use it, + // but it needs to be there to prevent autogenerated code from + // crashing when it loads from it. + // We don't need to clear it or anything later because needm + // will set up TLS properly. + MOVQ $zeroTLS<>(SB), DI + CALL runtime·settls(SB) +#endif + // On some platforms (Windows) we cannot call needm through + // an ABI wrapper because there's no TLS set up, and the ABI + // wrapper will try to restore the G register (R14) from TLS. + // Clear X15 because Go expects it and we're not calling + // through a wrapper, but otherwise avoid setting the G + // register in the wrapper and call needm directly. It + // takes no arguments and doesn't return any values so + // there's no need to handle that. Clear R14 so that there's + // a bad value in there, in case needm tries to use it. + XORPS X15, X15 + XORQ R14, R14 + MOVQ $runtime·needAndBindM(SB), AX + CALL AX + MOVQ $0, savedm-8(SP) + get_tls(CX) + MOVQ g(CX), BX + MOVQ g_m(BX), BX + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVQ m_g0(BX), SI + MOVQ SP, (g_sched+gobuf_sp)(SI) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 0(SP). + MOVQ m_g0(BX), SI + MOVQ (g_sched+gobuf_sp)(SI), AX + MOVQ AX, 0(SP) + MOVQ SP, (g_sched+gobuf_sp)(SI) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVQ m_curg(BX), SI + MOVQ SI, g(CX) + MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI + MOVQ (g_sched+gobuf_pc)(SI), BX + MOVQ BX, -8(DI) // "push" return PC on the g stack + // Gather our arguments into registers. + MOVQ fn+0(FP), BX + MOVQ frame+8(FP), CX + MOVQ ctxt+16(FP), DX + // Compute the size of the frame, including return PC and, if + // GOEXPERIMENT=framepointer, the saved base pointer + LEAQ fn+0(FP), AX + SUBQ SP, AX // AX is our actual frame size + SUBQ AX, DI // Allocate the same frame size on the g stack + MOVQ DI, SP + + MOVQ BX, 0(SP) + MOVQ CX, 8(SP) + MOVQ DX, 16(SP) + MOVQ $runtime·cgocallbackg(SB), AX + CALL AX // indirect call to bypass nosplit check. We're on a different stack now. + + // Compute the size of the frame again. FP and SP have + // completely different values here than they did above, + // but only their difference matters. + LEAQ fn+0(FP), AX + SUBQ SP, AX + + // Restore g->sched (== m->curg->sched) from saved values. + get_tls(CX) + MOVQ g(CX), SI + MOVQ SP, DI + ADDQ AX, DI + MOVQ -8(DI), BX + MOVQ BX, (g_sched+gobuf_pc)(SI) + MOVQ DI, (g_sched+gobuf_sp)(SI) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVQ g(CX), BX + MOVQ g_m(BX), BX + MOVQ m_g0(BX), SI + MOVQ SI, g(CX) + MOVQ (g_sched+gobuf_sp)(SI), SP + MOVQ 0(SP), AX + MOVQ AX, (g_sched+gobuf_sp)(SI) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVQ savedm-8(SP), BX + CMPQ BX, $0 + JNE done + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVQ _cgo_pthread_key_created(SB), AX + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CMPQ AX, $0 + JEQ dropm + CMPQ (AX), $0 + JNE done + +dropm: + MOVQ $runtime·dropm(SB), AX + CALL AX +#ifdef GOOS_windows + // We need to clear the TLS pointer in case the next + // thread that comes into Go tries to reuse that space + // but uses the same M. + XORQ DI, DI + CALL runtime·settls(SB) +#endif +done: + + // Done! + RET + +// func setg(gg *g) +// set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVQ gg+0(FP), BX + get_tls(CX) + MOVQ BX, g(CX) + RET + +// void setg_gcc(G*); set g called from gcc. +TEXT setg_gcc<>(SB),NOSPLIT,$0 + get_tls(AX) + MOVQ DI, g(AX) + MOVQ DI, R14 // set the g register + RET + +TEXT runtime·abort(SB),NOSPLIT,$0-0 + INT $3 +loop: + JMP loop + +// check that SP is in range [g->stack.lo, g->stack.hi) +TEXT runtime·stackcheck(SB), NOSPLIT|NOFRAME, $0-0 + get_tls(CX) + MOVQ g(CX), AX + CMPQ (g_stack+stack_hi)(AX), SP + JHI 2(PC) + CALL runtime·abort(SB) + CMPQ SP, (g_stack+stack_lo)(AX) + JHI 2(PC) + CALL runtime·abort(SB) + RET + +// func cputicks() int64 +TEXT runtime·cputicks(SB),NOSPLIT,$0-0 + CMPB internal∕cpu·X86+const_offsetX86HasRDTSCP(SB), $1 + JNE fences + // Instruction stream serializing RDTSCP is supported. + // RDTSCP is supported by Intel Nehalem (2008) and + // AMD K8 Rev. F (2006) and newer. + RDTSCP +done: + SHLQ $32, DX + ADDQ DX, AX + MOVQ AX, ret+0(FP) + RET +fences: + // MFENCE is instruction stream serializing and flushes the + // store buffers on AMD. The serialization semantics of LFENCE on AMD + // are dependent on MSR C001_1029 and CPU generation. + // LFENCE on Intel does wait for all previous instructions to have executed. + // Intel recommends MFENCE;LFENCE in its manuals before RDTSC to have all + // previous instructions executed and all previous loads and stores to globally visible. + // Using MFENCE;LFENCE here aligns the serializing properties without + // runtime detection of CPU manufacturer. + MFENCE + LFENCE + RDTSC + JMP done + +// func memhash(p unsafe.Pointer, h, s uintptr) uintptr +// hash function using AES hardware instructions +TEXT runtime·memhash(SB),NOSPLIT,$0-32 + // AX = ptr to data + // BX = seed + // CX = size + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + JMP aeshashbody<>(SB) +noaes: + JMP runtime·memhashFallback(SB) + +// func strhash(p unsafe.Pointer, h uintptr) uintptr +TEXT runtime·strhash(SB),NOSPLIT,$0-24 + // AX = ptr to string struct + // BX = seed + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVQ 8(AX), CX // length of string + MOVQ (AX), AX // string data + JMP aeshashbody<>(SB) +noaes: + JMP runtime·strhashFallback(SB) + +// AX: data +// BX: hash seed +// CX: length +// At return: AX = return value +TEXT aeshashbody<>(SB),NOSPLIT,$0-0 + // Fill an SSE register with our seeds. + MOVQ BX, X0 // 64 bits of per-table hash seed + PINSRW $4, CX, X0 // 16 bits of length + PSHUFHW $0, X0, X0 // repeat length 4 times total + MOVO X0, X1 // save unscrambled seed + PXOR runtime·aeskeysched(SB), X0 // xor in per-process seed + AESENC X0, X0 // scramble seed + + CMPQ CX, $16 + JB aes0to15 + JE aes16 + CMPQ CX, $32 + JBE aes17to32 + CMPQ CX, $64 + JBE aes33to64 + CMPQ CX, $128 + JBE aes65to128 + JMP aes129plus + +aes0to15: + TESTQ CX, CX + JE aes0 + + ADDQ $16, AX + TESTW $0xff0, AX + JE endofpage + + // 16 bytes loaded at this address won't cross + // a page boundary, so we can load it directly. + MOVOU -16(AX), X1 + ADDQ CX, CX + MOVQ $masks<>(SB), AX + PAND (AX)(CX*8), X1 +final1: + PXOR X0, X1 // xor data with seed + AESENC X1, X1 // scramble combo 3 times + AESENC X1, X1 + AESENC X1, X1 + MOVQ X1, AX // return X1 + RET + +endofpage: + // address ends in 1111xxxx. Might be up against + // a page boundary, so load ending at last byte. + // Then shift bytes down using pshufb. + MOVOU -32(AX)(CX*1), X1 + ADDQ CX, CX + MOVQ $shifts<>(SB), AX + PSHUFB (AX)(CX*8), X1 + JMP final1 + +aes0: + // Return scrambled input seed + AESENC X0, X0 + MOVQ X0, AX // return X0 + RET + +aes16: + MOVOU (AX), X1 + JMP final1 + +aes17to32: + // make second starting seed + PXOR runtime·aeskeysched+16(SB), X1 + AESENC X1, X1 + + // load data to be hashed + MOVOU (AX), X2 + MOVOU -16(AX)(CX*1), X3 + + // xor with seed + PXOR X0, X2 + PXOR X1, X3 + + // scramble 3 times + AESENC X2, X2 + AESENC X3, X3 + AESENC X2, X2 + AESENC X3, X3 + AESENC X2, X2 + AESENC X3, X3 + + // combine results + PXOR X3, X2 + MOVQ X2, AX // return X2 + RET + +aes33to64: + // make 3 more starting seeds + MOVO X1, X2 + MOVO X1, X3 + PXOR runtime·aeskeysched+16(SB), X1 + PXOR runtime·aeskeysched+32(SB), X2 + PXOR runtime·aeskeysched+48(SB), X3 + AESENC X1, X1 + AESENC X2, X2 + AESENC X3, X3 + + MOVOU (AX), X4 + MOVOU 16(AX), X5 + MOVOU -32(AX)(CX*1), X6 + MOVOU -16(AX)(CX*1), X7 + + PXOR X0, X4 + PXOR X1, X5 + PXOR X2, X6 + PXOR X3, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + PXOR X6, X4 + PXOR X7, X5 + PXOR X5, X4 + MOVQ X4, AX // return X4 + RET + +aes65to128: + // make 7 more starting seeds + MOVO X1, X2 + MOVO X1, X3 + MOVO X1, X4 + MOVO X1, X5 + MOVO X1, X6 + MOVO X1, X7 + PXOR runtime·aeskeysched+16(SB), X1 + PXOR runtime·aeskeysched+32(SB), X2 + PXOR runtime·aeskeysched+48(SB), X3 + PXOR runtime·aeskeysched+64(SB), X4 + PXOR runtime·aeskeysched+80(SB), X5 + PXOR runtime·aeskeysched+96(SB), X6 + PXOR runtime·aeskeysched+112(SB), X7 + AESENC X1, X1 + AESENC X2, X2 + AESENC X3, X3 + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + // load data + MOVOU (AX), X8 + MOVOU 16(AX), X9 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + MOVOU -64(AX)(CX*1), X12 + MOVOU -48(AX)(CX*1), X13 + MOVOU -32(AX)(CX*1), X14 + MOVOU -16(AX)(CX*1), X15 + + // xor with seed + PXOR X0, X8 + PXOR X1, X9 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X13 + PXOR X6, X14 + PXOR X7, X15 + + // scramble 3 times + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + + // combine results + PXOR X12, X8 + PXOR X13, X9 + PXOR X14, X10 + PXOR X15, X11 + PXOR X10, X8 + PXOR X11, X9 + PXOR X9, X8 + // X15 must be zero on return + PXOR X15, X15 + MOVQ X8, AX // return X8 + RET + +aes129plus: + // make 7 more starting seeds + MOVO X1, X2 + MOVO X1, X3 + MOVO X1, X4 + MOVO X1, X5 + MOVO X1, X6 + MOVO X1, X7 + PXOR runtime·aeskeysched+16(SB), X1 + PXOR runtime·aeskeysched+32(SB), X2 + PXOR runtime·aeskeysched+48(SB), X3 + PXOR runtime·aeskeysched+64(SB), X4 + PXOR runtime·aeskeysched+80(SB), X5 + PXOR runtime·aeskeysched+96(SB), X6 + PXOR runtime·aeskeysched+112(SB), X7 + AESENC X1, X1 + AESENC X2, X2 + AESENC X3, X3 + AESENC X4, X4 + AESENC X5, X5 + AESENC X6, X6 + AESENC X7, X7 + + // start with last (possibly overlapping) block + MOVOU -128(AX)(CX*1), X8 + MOVOU -112(AX)(CX*1), X9 + MOVOU -96(AX)(CX*1), X10 + MOVOU -80(AX)(CX*1), X11 + MOVOU -64(AX)(CX*1), X12 + MOVOU -48(AX)(CX*1), X13 + MOVOU -32(AX)(CX*1), X14 + MOVOU -16(AX)(CX*1), X15 + + // xor in seed + PXOR X0, X8 + PXOR X1, X9 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X13 + PXOR X6, X14 + PXOR X7, X15 + + // compute number of remaining 128-byte blocks + DECQ CX + SHRQ $7, CX + + PCALIGN $16 +aesloop: + // scramble state + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + + // scramble state, xor in a block + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + AESENC X0, X8 + AESENC X1, X9 + AESENC X2, X10 + AESENC X3, X11 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + AESENC X4, X12 + AESENC X5, X13 + AESENC X6, X14 + AESENC X7, X15 + + ADDQ $128, AX + DECQ CX + JNE aesloop + + // 3 more scrambles to finish + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + AESENC X8, X8 + AESENC X9, X9 + AESENC X10, X10 + AESENC X11, X11 + AESENC X12, X12 + AESENC X13, X13 + AESENC X14, X14 + AESENC X15, X15 + + PXOR X12, X8 + PXOR X13, X9 + PXOR X14, X10 + PXOR X15, X11 + PXOR X10, X8 + PXOR X11, X9 + PXOR X9, X8 + // X15 must be zero on return + PXOR X15, X15 + MOVQ X8, AX // return X8 + RET + +// func memhash32(p unsafe.Pointer, h uintptr) uintptr +// ABIInternal for performance. +TEXT runtime·memhash32(SB),NOSPLIT,$0-24 + // AX = ptr to data + // BX = seed + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVQ BX, X0 // X0 = seed + PINSRD $2, (AX), X0 // data + AESENC runtime·aeskeysched+0(SB), X0 + AESENC runtime·aeskeysched+16(SB), X0 + AESENC runtime·aeskeysched+32(SB), X0 + MOVQ X0, AX // return X0 + RET +noaes: + JMP runtime·memhash32Fallback(SB) + +// func memhash64(p unsafe.Pointer, h uintptr) uintptr +// ABIInternal for performance. +TEXT runtime·memhash64(SB),NOSPLIT,$0-24 + // AX = ptr to data + // BX = seed + CMPB runtime·useAeshash(SB), $0 + JEQ noaes + MOVQ BX, X0 // X0 = seed + PINSRQ $1, (AX), X0 // data + AESENC runtime·aeskeysched+0(SB), X0 + AESENC runtime·aeskeysched+16(SB), X0 + AESENC runtime·aeskeysched+32(SB), X0 + MOVQ X0, AX // return X0 + RET +noaes: + JMP runtime·memhash64Fallback(SB) + +// simple mask to get rid of data in the high part of the register. +DATA masks<>+0x00(SB)/8, $0x0000000000000000 +DATA masks<>+0x08(SB)/8, $0x0000000000000000 +DATA masks<>+0x10(SB)/8, $0x00000000000000ff +DATA masks<>+0x18(SB)/8, $0x0000000000000000 +DATA masks<>+0x20(SB)/8, $0x000000000000ffff +DATA masks<>+0x28(SB)/8, $0x0000000000000000 +DATA masks<>+0x30(SB)/8, $0x0000000000ffffff +DATA masks<>+0x38(SB)/8, $0x0000000000000000 +DATA masks<>+0x40(SB)/8, $0x00000000ffffffff +DATA masks<>+0x48(SB)/8, $0x0000000000000000 +DATA masks<>+0x50(SB)/8, $0x000000ffffffffff +DATA masks<>+0x58(SB)/8, $0x0000000000000000 +DATA masks<>+0x60(SB)/8, $0x0000ffffffffffff +DATA masks<>+0x68(SB)/8, $0x0000000000000000 +DATA masks<>+0x70(SB)/8, $0x00ffffffffffffff +DATA masks<>+0x78(SB)/8, $0x0000000000000000 +DATA masks<>+0x80(SB)/8, $0xffffffffffffffff +DATA masks<>+0x88(SB)/8, $0x0000000000000000 +DATA masks<>+0x90(SB)/8, $0xffffffffffffffff +DATA masks<>+0x98(SB)/8, $0x00000000000000ff +DATA masks<>+0xa0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xa8(SB)/8, $0x000000000000ffff +DATA masks<>+0xb0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xb8(SB)/8, $0x0000000000ffffff +DATA masks<>+0xc0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xc8(SB)/8, $0x00000000ffffffff +DATA masks<>+0xd0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xd8(SB)/8, $0x000000ffffffffff +DATA masks<>+0xe0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xe8(SB)/8, $0x0000ffffffffffff +DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff +DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff +GLOBL masks<>(SB),RODATA,$256 + +// func checkASM() bool +TEXT ·checkASM(SB),NOSPLIT,$0-1 + // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte + MOVQ $masks<>(SB), AX + MOVQ $shifts<>(SB), BX + ORQ BX, AX + TESTQ $15, AX + SETEQ ret+0(FP) + RET + +// these are arguments to pshufb. They move data down from +// the high bytes of the register to the low bytes of the register. +// index is how many bytes to move. +DATA shifts<>+0x00(SB)/8, $0x0000000000000000 +DATA shifts<>+0x08(SB)/8, $0x0000000000000000 +DATA shifts<>+0x10(SB)/8, $0xffffffffffffff0f +DATA shifts<>+0x18(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x20(SB)/8, $0xffffffffffff0f0e +DATA shifts<>+0x28(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x30(SB)/8, $0xffffffffff0f0e0d +DATA shifts<>+0x38(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x40(SB)/8, $0xffffffff0f0e0d0c +DATA shifts<>+0x48(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x50(SB)/8, $0xffffff0f0e0d0c0b +DATA shifts<>+0x58(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x60(SB)/8, $0xffff0f0e0d0c0b0a +DATA shifts<>+0x68(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x70(SB)/8, $0xff0f0e0d0c0b0a09 +DATA shifts<>+0x78(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x80(SB)/8, $0x0f0e0d0c0b0a0908 +DATA shifts<>+0x88(SB)/8, $0xffffffffffffffff +DATA shifts<>+0x90(SB)/8, $0x0e0d0c0b0a090807 +DATA shifts<>+0x98(SB)/8, $0xffffffffffffff0f +DATA shifts<>+0xa0(SB)/8, $0x0d0c0b0a09080706 +DATA shifts<>+0xa8(SB)/8, $0xffffffffffff0f0e +DATA shifts<>+0xb0(SB)/8, $0x0c0b0a0908070605 +DATA shifts<>+0xb8(SB)/8, $0xffffffffff0f0e0d +DATA shifts<>+0xc0(SB)/8, $0x0b0a090807060504 +DATA shifts<>+0xc8(SB)/8, $0xffffffff0f0e0d0c +DATA shifts<>+0xd0(SB)/8, $0x0a09080706050403 +DATA shifts<>+0xd8(SB)/8, $0xffffff0f0e0d0c0b +DATA shifts<>+0xe0(SB)/8, $0x0908070605040302 +DATA shifts<>+0xe8(SB)/8, $0xffff0f0e0d0c0b0a +DATA shifts<>+0xf0(SB)/8, $0x0807060504030201 +DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09 +GLOBL shifts<>(SB),RODATA,$256 + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVL $0, AX + RET + + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$0 + get_tls(CX) + MOVQ g(CX), AX + MOVQ g_m(AX), AX + MOVQ m_curg(AX), AX + MOVQ (g_stack+stack_hi)(AX), AX + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|TOPFRAME|NOFRAME,$0-0 + BYTE $0x90 // NOP + CALL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + BYTE $0x90 // NOP + +// This is called from .init_array and follows the platform, not Go, ABI. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + PUSHQ R15 // The access to global variables below implicitly uses R15, which is callee-save + MOVQ runtime·lastmoduledatap(SB), AX + MOVQ DI, moduledata_next(AX) + MOVQ DI, runtime·lastmoduledatap(SB) + POPQ R15 + RET + +// Initialize special registers then jump to sigpanic. +// This function is injected from the signal handler for panicking +// signals. It is quite painful to set X15 in the signal context, +// so we do it here. +TEXT ·sigpanic0(SB),NOSPLIT,$0-0 + get_tls(R14) + MOVQ g(R14), R14 +#ifndef GOOS_plan9 + XORPS X15, X15 +#endif + JMP ·sigpanic(SB) + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier returns space in a write barrier buffer which +// should be filled in by the caller. +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R11, and returns a pointer +// to the buffer space in R11. +// It clobbers FLAGS. It does not clobber any general-purpose registers, +// but may clobber others (e.g., SSE registers). +// Typical use would be, when doing *(CX+88) = AX +// CMPL $0, runtime.writeBarrier(SB) +// JEQ dowrite +// CALL runtime.gcBatchBarrier2(SB) +// MOVQ AX, (R11) +// MOVQ 88(CX), DX +// MOVQ DX, 8(R11) +// dowrite: +// MOVQ AX, 88(CX) +TEXT gcWriteBarrier<>(SB),NOSPLIT,$112 + // Save the registers clobbered by the fast path. This is slightly + // faster than having the caller spill these. + MOVQ R12, 96(SP) + MOVQ R13, 104(SP) +retry: + // TODO: Consider passing g.m.p in as an argument so they can be shared + // across a sequence of write barriers. + MOVQ g_m(R14), R13 + MOVQ m_p(R13), R13 + // Get current buffer write position. + MOVQ (p_wbBuf+wbBuf_next)(R13), R12 // original next position + ADDQ R11, R12 // new next position + // Is the buffer full? + CMPQ R12, (p_wbBuf+wbBuf_end)(R13) + JA flush + // Commit to the larger buffer. + MOVQ R12, (p_wbBuf+wbBuf_next)(R13) + // Make return value (the original next position) + SUBQ R11, R12 + MOVQ R12, R11 + // Restore registers. + MOVQ 96(SP), R12 + MOVQ 104(SP), R13 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + // It is possible for wbBufFlush to clobber other registers + // (e.g., SSE registers), but the compiler takes care of saving + // those in the caller if necessary. This strikes a balance + // with registers that are likely to be used. + // + // We don't have type information for these, but all code under + // here is NOSPLIT, so nothing will observe these. + // + // TODO: We could strike a different balance; e.g., saving X0 + // and not saving GP registers that are less likely to be used. + MOVQ DI, 0(SP) + MOVQ AX, 8(SP) + MOVQ BX, 16(SP) + MOVQ CX, 24(SP) + MOVQ DX, 32(SP) + // DI already saved + MOVQ SI, 40(SP) + MOVQ BP, 48(SP) + MOVQ R8, 56(SP) + MOVQ R9, 64(SP) + MOVQ R10, 72(SP) + MOVQ R11, 80(SP) + // R12 already saved + // R13 already saved + // R14 is g + MOVQ R15, 88(SP) + + CALL runtime·wbBufFlush(SB) + + MOVQ 0(SP), DI + MOVQ 8(SP), AX + MOVQ 16(SP), BX + MOVQ 24(SP), CX + MOVQ 32(SP), DX + MOVQ 40(SP), SI + MOVQ 48(SP), BP + MOVQ 56(SP), R8 + MOVQ 64(SP), R9 + MOVQ 72(SP), R10 + MOVQ 80(SP), R11 + MOVQ 88(SP), R15 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT|NOFRAME,$0 + MOVL $8, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT|NOFRAME,$0 + MOVL $16, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT|NOFRAME,$0 + MOVL $24, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT|NOFRAME,$0 + MOVL $32, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT|NOFRAME,$0 + MOVL $40, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT|NOFRAME,$0 + MOVL $48, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT|NOFRAME,$0 + MOVL $56, R11 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT|NOFRAME,$0 + MOVL $64, R11 + JMP gcWriteBarrier<>(SB) + +DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large" +GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below + +// debugCallV2 is the entry point for debugger-injected function +// calls on running goroutines. It informs the runtime that a +// debug call has been injected and creates a call frame for the +// debugger to fill in. +// +// To inject a function call, a debugger should: +// 1. Check that the goroutine is in state _Grunning and that +// there are at least 256 bytes free on the stack. +// 2. Push the current PC on the stack (updating SP). +// 3. Write the desired argument frame size at SP-16 (using the SP +// after step 2). +// 4. Save all machine registers (including flags and XMM registers) +// so they can be restored later by the debugger. +// 5. Set the PC to debugCallV2 and resume execution. +// +// If the goroutine is in state _Grunnable, then it's not generally +// safe to inject a call because it may return out via other runtime +// operations. Instead, the debugger should unwind the stack to find +// the return to non-runtime code, add a temporary breakpoint there, +// and inject the call once that breakpoint is hit. +// +// If the goroutine is in any other state, it's not safe to inject a call. +// +// This function communicates back to the debugger by setting R12 and +// invoking INT3 to raise a breakpoint signal. See the comments in the +// implementation for the protocol the debugger is expected to +// follow. InjectDebugCall in the runtime tests demonstrates this protocol. +// +// The debugger must ensure that any pointers passed to the function +// obey escape analysis requirements. Specifically, it must not pass +// a stack pointer to an escaping argument. debugCallV2 cannot check +// this invariant. +// +// This is ABIInternal because Go code injects its PC directly into new +// goroutine stacks. +TEXT runtime·debugCallV2(SB),NOSPLIT,$152-0 + // Save all registers that may contain pointers so they can be + // conservatively scanned. + // + // We can't do anything that might clobber any of these + // registers before this. + MOVQ R15, r15-(14*8+8)(SP) + MOVQ R14, r14-(13*8+8)(SP) + MOVQ R13, r13-(12*8+8)(SP) + MOVQ R12, r12-(11*8+8)(SP) + MOVQ R11, r11-(10*8+8)(SP) + MOVQ R10, r10-(9*8+8)(SP) + MOVQ R9, r9-(8*8+8)(SP) + MOVQ R8, r8-(7*8+8)(SP) + MOVQ DI, di-(6*8+8)(SP) + MOVQ SI, si-(5*8+8)(SP) + MOVQ BP, bp-(4*8+8)(SP) + MOVQ BX, bx-(3*8+8)(SP) + MOVQ DX, dx-(2*8+8)(SP) + // Save the frame size before we clobber it. Either of the last + // saves could clobber this depending on whether there's a saved BP. + MOVQ frameSize-24(FP), DX // aka -16(RSP) before prologue + MOVQ CX, cx-(1*8+8)(SP) + MOVQ AX, ax-(0*8+8)(SP) + + // Save the argument frame size. + MOVQ DX, frameSize-128(SP) + + // Perform a safe-point check. + MOVQ retpc-8(FP), AX // Caller's PC + MOVQ AX, 0(SP) + CALL runtime·debugCallCheck(SB) + MOVQ 8(SP), AX + TESTQ AX, AX + JZ good + // The safety check failed. Put the reason string at the top + // of the stack. + MOVQ AX, 0(SP) + MOVQ 16(SP), AX + MOVQ AX, 8(SP) + // Set R12 to 8 and invoke INT3. The debugger should get the + // reason a call can't be injected from the top of the stack + // and resume execution. + MOVQ $8, R12 + BYTE $0xcc + JMP restore + +good: + // Registers are saved and it's safe to make a call. + // Open up a call frame, moving the stack if necessary. + // + // Once the frame is allocated, this will set R12 to 0 and + // invoke INT3. The debugger should write the argument + // frame for the call at SP, set up argument registers, push + // the trapping PC on the stack, set the PC to the function to + // call, set RDX to point to the closure (if a closure call), + // and resume execution. + // + // If the function returns, this will set R12 to 1 and invoke + // INT3. The debugger can then inspect any return value saved + // on the stack at SP and in registers and resume execution again. + // + // If the function panics, this will set R12 to 2 and invoke INT3. + // The interface{} value of the panic will be at SP. The debugger + // can inspect the panic value and resume execution again. +#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \ + CMPQ AX, $MAXSIZE; \ + JA 5(PC); \ + MOVQ $NAME(SB), AX; \ + MOVQ AX, 0(SP); \ + CALL runtime·debugCallWrap(SB); \ + JMP restore + + MOVQ frameSize-128(SP), AX + DEBUG_CALL_DISPATCH(debugCall32<>, 32) + DEBUG_CALL_DISPATCH(debugCall64<>, 64) + DEBUG_CALL_DISPATCH(debugCall128<>, 128) + DEBUG_CALL_DISPATCH(debugCall256<>, 256) + DEBUG_CALL_DISPATCH(debugCall512<>, 512) + DEBUG_CALL_DISPATCH(debugCall1024<>, 1024) + DEBUG_CALL_DISPATCH(debugCall2048<>, 2048) + DEBUG_CALL_DISPATCH(debugCall4096<>, 4096) + DEBUG_CALL_DISPATCH(debugCall8192<>, 8192) + DEBUG_CALL_DISPATCH(debugCall16384<>, 16384) + DEBUG_CALL_DISPATCH(debugCall32768<>, 32768) + DEBUG_CALL_DISPATCH(debugCall65536<>, 65536) + // The frame size is too large. Report the error. + MOVQ $debugCallFrameTooLarge<>(SB), AX + MOVQ AX, 0(SP) + MOVQ $20, 8(SP) // length of debugCallFrameTooLarge string + MOVQ $8, R12 + BYTE $0xcc + JMP restore + +restore: + // Calls and failures resume here. + // + // Set R12 to 16 and invoke INT3. The debugger should restore + // all registers except RIP and RSP and resume execution. + MOVQ $16, R12 + BYTE $0xcc + // We must not modify flags after this point. + + // Restore pointer-containing registers, which may have been + // modified from the debugger's copy by stack copying. + MOVQ ax-(0*8+8)(SP), AX + MOVQ cx-(1*8+8)(SP), CX + MOVQ dx-(2*8+8)(SP), DX + MOVQ bx-(3*8+8)(SP), BX + MOVQ bp-(4*8+8)(SP), BP + MOVQ si-(5*8+8)(SP), SI + MOVQ di-(6*8+8)(SP), DI + MOVQ r8-(7*8+8)(SP), R8 + MOVQ r9-(8*8+8)(SP), R9 + MOVQ r10-(9*8+8)(SP), R10 + MOVQ r11-(10*8+8)(SP), R11 + MOVQ r12-(11*8+8)(SP), R12 + MOVQ r13-(12*8+8)(SP), R13 + MOVQ r14-(13*8+8)(SP), R14 + MOVQ r15-(14*8+8)(SP), R15 + + RET + +// runtime.debugCallCheck assumes that functions defined with the +// DEBUG_CALL_FN macro are safe points to inject calls. +#define DEBUG_CALL_FN(NAME,MAXSIZE) \ +TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \ + NO_LOCAL_POINTERS; \ + MOVQ $0, R12; \ + BYTE $0xcc; \ + MOVQ $1, R12; \ + BYTE $0xcc; \ + RET +DEBUG_CALL_FN(debugCall32<>, 32) +DEBUG_CALL_FN(debugCall64<>, 64) +DEBUG_CALL_FN(debugCall128<>, 128) +DEBUG_CALL_FN(debugCall256<>, 256) +DEBUG_CALL_FN(debugCall512<>, 512) +DEBUG_CALL_FN(debugCall1024<>, 1024) +DEBUG_CALL_FN(debugCall2048<>, 2048) +DEBUG_CALL_FN(debugCall4096<>, 4096) +DEBUG_CALL_FN(debugCall8192<>, 8192) +DEBUG_CALL_FN(debugCall16384<>, 16384) +DEBUG_CALL_FN(debugCall32768<>, 32768) +DEBUG_CALL_FN(debugCall65536<>, 65536) + +// func debugCallPanicked(val interface{}) +TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 + // Copy the panic value to the top of stack. + MOVQ val_type+0(FP), AX + MOVQ AX, 0(SP) + MOVQ val_data+8(FP), AX + MOVQ AX, 8(SP) + MOVQ $2, R12 + BYTE $0xcc + RET + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +// Defined as ABIInternal since they do not use the stack-based Go ABI. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOVQ DX, AX + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOVQ DX, AX + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOVQ DX, AX + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOVQ DX, AX + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOVQ CX, AX + MOVQ DX, BX + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + MOVQ CX, BX + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOVQ DX, AX + JMP runtime·goPanicSliceConvert(SB) + +#ifdef GOOS_android +// Use the free TLS_SLOT_APP slot #2 on Android Q. +// Earlier androids are set up in gcc_android.c. +DATA runtime·tls_g+0(SB)/8, $16 +GLOBL runtime·tls_g+0(SB), NOPTR, $8 +#endif +#ifdef GOOS_windows +GLOBL runtime·tls_g+0(SB), NOPTR, $8 +#endif + +// The compiler and assembler's -spectre=ret mode rewrites +// all indirect CALL AX / JMP AX instructions to be +// CALL retpolineAX / JMP retpolineAX. +// See https://support.google.com/faqs/answer/7625886. +#define RETPOLINE(reg) \ + /* CALL setup */ BYTE $0xE8; BYTE $(2+2); BYTE $0; BYTE $0; BYTE $0; \ + /* nospec: */ \ + /* PAUSE */ BYTE $0xF3; BYTE $0x90; \ + /* JMP nospec */ BYTE $0xEB; BYTE $-(2+2); \ + /* setup: */ \ + /* MOVQ AX, 0(SP) */ BYTE $0x48|((reg&8)>>1); BYTE $0x89; \ + BYTE $0x04|((reg&7)<<3); BYTE $0x24; \ + /* RET */ BYTE $0xC3 + +TEXT runtime·retpolineAX(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(0) +TEXT runtime·retpolineCX(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(1) +TEXT runtime·retpolineDX(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(2) +TEXT runtime·retpolineBX(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(3) +/* SP is 4, can't happen / magic encodings */ +TEXT runtime·retpolineBP(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(5) +TEXT runtime·retpolineSI(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(6) +TEXT runtime·retpolineDI(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(7) +TEXT runtime·retpolineR8(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(8) +TEXT runtime·retpolineR9(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(9) +TEXT runtime·retpolineR10(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(10) +TEXT runtime·retpolineR11(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(11) +TEXT runtime·retpolineR12(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(12) +TEXT runtime·retpolineR13(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(13) +TEXT runtime·retpolineR14(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(14) +TEXT runtime·retpolineR15(SB),NOSPLIT|NOFRAME,$0; RETPOLINE(15) + +TEXT ·getfp(SB),NOSPLIT|NOFRAME,$0 + MOVQ BP, AX + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_arm.s b/platform/dbops/binaries/go/go/src/runtime/asm_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..31a0584fb5d8ca9806fb88a543629a43fea946f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_arm.s @@ -0,0 +1,1130 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +// _rt0_arm is common startup code for most ARM systems when using +// internal linking. This is the entry point for the program from the +// kernel for an ordinary -buildmode=exe program. The stack holds the +// number of arguments and the C-style argv. +TEXT _rt0_arm(SB),NOSPLIT|NOFRAME,$0 + MOVW (R13), R0 // argc + MOVW $4(R13), R1 // argv + B runtime·rt0_go(SB) + +// main is common startup code for most ARM systems when using +// external linking. The C startup code will call the symbol "main" +// passing argc and argv in the usual C ABI registers R0 and R1. +TEXT main(SB),NOSPLIT|NOFRAME,$0 + B runtime·rt0_go(SB) + +// _rt0_arm_lib is common startup code for most ARM systems when +// using -buildmode=c-archive or -buildmode=c-shared. The linker will +// arrange to invoke this function as a global constructor (for +// c-archive) or when the shared library is loaded (for c-shared). +// We expect argc and argv to be passed in the usual C ABI registers +// R0 and R1. +TEXT _rt0_arm_lib(SB),NOSPLIT,$104 + // Preserve callee-save registers. Raspberry Pi's dlopen(), for example, + // actually cares that R11 is preserved. + MOVW R4, 12(R13) + MOVW R5, 16(R13) + MOVW R6, 20(R13) + MOVW R7, 24(R13) + MOVW R8, 28(R13) + MOVW g, 32(R13) + MOVW R11, 36(R13) + + // Skip floating point registers on goarmsoftfp != 0. + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfpsave + MOVD F8, (40+8*0)(R13) + MOVD F9, (40+8*1)(R13) + MOVD F10, (40+8*2)(R13) + MOVD F11, (40+8*3)(R13) + MOVD F12, (40+8*4)(R13) + MOVD F13, (40+8*5)(R13) + MOVD F14, (40+8*6)(R13) + MOVD F15, (40+8*7)(R13) +skipfpsave: + // Save argc/argv. + MOVW R0, _rt0_arm_lib_argc<>(SB) + MOVW R1, _rt0_arm_lib_argv<>(SB) + + MOVW $0, g // Initialize g. + + // Synchronous initialization. + CALL runtime·libpreinit(SB) + + // Create a new thread to do the runtime initialization. + MOVW _cgo_sys_thread_create(SB), R2 + CMP $0, R2 + BEQ nocgo + MOVW $_rt0_arm_lib_go<>(SB), R0 + MOVW $0, R1 + BL (R2) + B rr +nocgo: + MOVW $0x800000, R0 // stacksize = 8192KB + MOVW $_rt0_arm_lib_go<>(SB), R1 // fn + MOVW R0, 4(R13) + MOVW R1, 8(R13) + BL runtime·newosproc0(SB) +rr: + // Restore callee-save registers and return. + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfprest + MOVD (40+8*0)(R13), F8 + MOVD (40+8*1)(R13), F9 + MOVD (40+8*2)(R13), F10 + MOVD (40+8*3)(R13), F11 + MOVD (40+8*4)(R13), F12 + MOVD (40+8*5)(R13), F13 + MOVD (40+8*6)(R13), F14 + MOVD (40+8*7)(R13), F15 +skipfprest: + MOVW 12(R13), R4 + MOVW 16(R13), R5 + MOVW 20(R13), R6 + MOVW 24(R13), R7 + MOVW 28(R13), R8 + MOVW 32(R13), g + MOVW 36(R13), R11 + RET + +// _rt0_arm_lib_go initializes the Go runtime. +// This is started in a separate thread by _rt0_arm_lib. +TEXT _rt0_arm_lib_go<>(SB),NOSPLIT,$8 + MOVW _rt0_arm_lib_argc<>(SB), R0 + MOVW _rt0_arm_lib_argv<>(SB), R1 + B runtime·rt0_go(SB) + +DATA _rt0_arm_lib_argc<>(SB)/4,$0 +GLOBL _rt0_arm_lib_argc<>(SB),NOPTR,$4 +DATA _rt0_arm_lib_argv<>(SB)/4,$0 +GLOBL _rt0_arm_lib_argv<>(SB),NOPTR,$4 + +// using NOFRAME means do not save LR on stack. +// argc is in R0, argv is in R1. +TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0 + MOVW $0xcafebabe, R12 + + // copy arguments forward on an even stack + // use R13 instead of SP to avoid linker rewriting the offsets + SUB $64, R13 // plenty of scratch + AND $~7, R13 + MOVW R0, 60(R13) // save argc, argv away + MOVW R1, 64(R13) + + // set up g register + // g is R10 + MOVW $runtime·g0(SB), g + MOVW $runtime·m0(SB), R8 + + // save m->g0 = g0 + MOVW g, m_g0(R8) + // save g->m = m0 + MOVW R8, g_m(g) + + // create istack out of the OS stack + // (1MB of system stack is available on iOS and Android) + MOVW $(-64*1024+104)(R13), R0 + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + MOVW R0, (g_stack+stack_lo)(g) + MOVW R13, (g_stack+stack_hi)(g) + + BL runtime·emptyfunc(SB) // fault if stack check is wrong + +#ifdef GOOS_openbsd + // Save g to TLS so that it is available from signal trampoline. + BL runtime·save_g(SB) +#endif + + BL runtime·_initcgo(SB) // will clobber R0-R3 + + // update stackguard after _cgo_init + MOVW (g_stack+stack_lo)(g), R0 + ADD $const_stackGuard, R0 + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + + BL runtime·check(SB) + + // saved argc, argv + MOVW 60(R13), R0 + MOVW R0, 4(R13) + MOVW 64(R13), R1 + MOVW R1, 8(R13) + BL runtime·args(SB) + BL runtime·checkgoarm(SB) + BL runtime·osinit(SB) + BL runtime·schedinit(SB) + + // create a new goroutine to start program + SUB $8, R13 + MOVW $runtime·mainPC(SB), R0 + MOVW R0, 4(R13) // arg 1: fn + MOVW $0, R0 + MOVW R0, 0(R13) // dummy LR + BL runtime·newproc(SB) + ADD $8, R13 // pop args and LR + + // start this M + BL runtime·mstart(SB) + + MOVW $1234, R0 + MOVW $1000, R1 + MOVW R0, (R1) // fail hard + +DATA runtime·mainPC+0(SB)/4,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$4 + +TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 + // gdb won't skip this breakpoint instruction automatically, + // so you must manually "set $pc+=4" to skip it and continue. +#ifdef GOOS_plan9 + WORD $0xD1200070 // undefined instruction used as armv5 breakpoint in Plan 9 +#else + WORD $0xe7f001f0 // undefined instruction that gdb understands is a software breakpoint +#endif + RET + +TEXT runtime·asminit(SB),NOSPLIT,$0-0 + // disable runfast (flush-to-zero) mode of vfp if runtime.goarmsoftfp == 0 + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE 4(PC) + WORD $0xeef1ba10 // vmrs r11, fpscr + BIC $(1<<24), R11 + WORD $0xeee1ba10 // vmsr fpscr, r11 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + BL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB),NOSPLIT|NOFRAME,$0-4 + MOVW buf+0(FP), R1 + MOVW gobuf_g(R1), R0 + MOVW 0(R0), R2 // make sure g != nil + B gogo<>(SB) + +TEXT gogo<>(SB),NOSPLIT|NOFRAME,$0 + BL setg<>(SB) + MOVW gobuf_sp(R1), R13 // restore SP==R13 + MOVW gobuf_lr(R1), LR + MOVW gobuf_ret(R1), R0 + MOVW gobuf_ctxt(R1), R7 + MOVW $0, R11 + MOVW R11, gobuf_sp(R1) // clear to help garbage collector + MOVW R11, gobuf_ret(R1) + MOVW R11, gobuf_lr(R1) + MOVW R11, gobuf_ctxt(R1) + MOVW gobuf_pc(R1), R11 + CMP R11, R11 // set condition codes for == test, needed by stack split + B (R11) + +// func mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB),NOSPLIT|NOFRAME,$0-4 + // Save caller state in g->sched. + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW LR, (g_sched+gobuf_pc)(g) + MOVW $0, R11 + MOVW R11, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVW g, R1 + MOVW g_m(g), R8 + MOVW m_g0(R8), R0 + BL setg<>(SB) + CMP g, R1 + B.NE 2(PC) + B runtime·badmcall(SB) + MOVW fn+0(FP), R0 + MOVW (g_sched+gobuf_sp)(g), R13 + SUB $8, R13 + MOVW R1, 4(R13) + MOVW R0, R7 + MOVW 0(R0), R0 + BL (R0) + B runtime·badmcall2(SB) + RET + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB),NOSPLIT,$0-0 + MOVW $0, R0 + BL (R0) // clobber lr to ensure push {lr} is kept + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB),NOSPLIT,$0-4 + MOVW fn+0(FP), R0 // R0 = fn + MOVW g_m(g), R1 // R1 = m + + MOVW m_gsignal(R1), R2 // R2 = gsignal + CMP g, R2 + B.EQ noswitch + + MOVW m_g0(R1), R2 // R2 = g0 + CMP g, R2 + B.EQ noswitch + + MOVW m_curg(R1), R3 + CMP g, R3 + B.EQ switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVW $runtime·badsystemstack(SB), R0 + BL (R0) + B runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + BL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVW R0, R5 + MOVW R2, R0 + BL setg<>(SB) + MOVW R5, R0 + MOVW (g_sched+gobuf_sp)(R2), R13 + + // call target function + MOVW R0, R7 + MOVW 0(R0), R0 + BL (R0) + + // switch back to g + MOVW g_m(g), R1 + MOVW m_curg(R1), R0 + BL setg<>(SB) + MOVW (g_sched+gobuf_sp)(g), R13 + MOVW $0, R3 + MOVW R3, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVW R0, R7 + MOVW 0(R0), R0 + MOVW.P 4(R13), R14 // restore LR + B (R0) + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// R3 prolog's LR +// using NOFRAME means do not save LR on stack. +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + MOVW g_m(g), R8 + MOVW m_g0(R8), R4 + CMP g, R4 + BNE 3(PC) + BL runtime·badmorestackg0(SB) + B runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVW m_gsignal(R8), R4 + CMP g, R4 + BNE 3(PC) + BL runtime·badmorestackgsignal(SB) + B runtime·abort(SB) + + // Called from f. + // Set g->sched to context in f. + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW LR, (g_sched+gobuf_pc)(g) + MOVW R3, (g_sched+gobuf_lr)(g) + MOVW R7, (g_sched+gobuf_ctxt)(g) + + // Called from f. + // Set m->morebuf to f's caller. + MOVW R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC + MOVW R13, (m_morebuf+gobuf_sp)(R8) // f's caller's SP + MOVW g, (m_morebuf+gobuf_g)(R8) + + // Call newstack on m->g0's stack. + MOVW m_g0(R8), R0 + BL setg<>(SB) + MOVW (g_sched+gobuf_sp)(g), R13 + MOVW $0, R0 + MOVW.W R0, -4(R13) // create a call frame on g0 (saved LR) + BL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + RET + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R3), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVW R13, R13 + + MOVW $0, R7 + B runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + CMP $MAXSIZE, R0; \ + B.HI 3(PC); \ + MOVW $NAME(SB), R1; \ + B (R1) + +TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28 + MOVW frameSize+20(FP), R0 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVW $runtime·badreflectcall(SB), R1 + B (R1) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVW stackArgs+8(FP), R0; \ + MOVW stackArgsSize+12(FP), R2; \ + ADD $4, R13, R1; \ + CMP $0, R2; \ + B.EQ 5(PC); \ + MOVBU.P 1(R0), R5; \ + MOVBU.P R5, 1(R1); \ + SUB $1, R2, R2; \ + B -5(PC); \ + /* call function */ \ + MOVW f+4(FP), R7; \ + MOVW (R7), R0; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + BL (R0); \ + /* copy return values back */ \ + MOVW stackArgsType+0(FP), R4; \ + MOVW stackArgs+8(FP), R0; \ + MOVW stackArgsSize+12(FP), R2; \ + MOVW stackArgsRetOffset+16(FP), R3; \ + ADD $4, R13, R1; \ + ADD R3, R1; \ + ADD R3, R0; \ + SUB R3, R2; \ + BL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $20-0 + MOVW R4, 4(R13) + MOVW R0, 8(R13) + MOVW R1, 12(R13) + MOVW R2, 16(R13) + MOVW $0, R7 + MOVW R7, 20(R13) + BL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R11. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·systemstack_switch(SB), R11 + ADD $4, R11 // get past push {lr} + MOVW R11, (g_sched+gobuf_pc)(g) + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW $0, R11 + MOVW R11, (g_sched+gobuf_lr)(g) + MOVW R11, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVW (g_sched+gobuf_ctxt)(g), R11 + TST R11, R11 + B.EQ 2(PC) + BL runtime·abort(SB) + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-8 + MOVW fn+0(FP), R1 + MOVW arg+4(FP), R0 + MOVW R13, R2 + SUB $32, R13 + BIC $0x7, R13 // alignment for gcc ABI + MOVW R2, 8(R13) + BL (R1) + MOVW 8(R13), R2 + MOVW R2, R13 + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-12 + MOVW fn+0(FP), R1 + MOVW arg+4(FP), R0 + + MOVW R13, R2 + CMP $0, g + BEQ nosave + MOVW g, R4 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVW g_m(g), R8 + MOVW m_gsignal(R8), R3 + CMP R3, g + BEQ nosave + MOVW m_g0(R8), R3 + CMP R3, g + BEQ nosave + BL gosave_systemstack_switch<>(SB) + MOVW R0, R5 + MOVW R3, R0 + BL setg<>(SB) + MOVW R5, R0 + MOVW (g_sched+gobuf_sp)(g), R13 + + // Now on a scheduling stack (a pthread-created stack). + SUB $24, R13 + BIC $0x7, R13 // alignment for gcc ABI + MOVW R4, 20(R13) // save old g + MOVW (g_stack+stack_hi)(R4), R4 + SUB R2, R4 + MOVW R4, 16(R13) // save depth in stack (can't just save SP, as stack might be copied during a callback) + BL (R1) + + // Restore registers, g, stack pointer. + MOVW R0, R5 + MOVW 20(R13), R0 + BL setg<>(SB) + MOVW (g_stack+stack_hi)(g), R1 + MOVW 16(R13), R2 + SUB R2, R1 + MOVW R5, R0 + MOVW R1, R13 + + MOVW R0, ret+8(FP) + RET + +nosave: + // Running on a system stack, perhaps even without a g. + // Having no g can happen during thread creation or thread teardown + // (see needm/dropm on Solaris, for example). + // This code is like the above sequence but without saving/restoring g + // and without worrying about the stack moving out from under us + // (because we're on a system stack, not a goroutine stack). + // The above code could be used directly if already on a system stack, + // but then the only path through this code would be a rare case on Solaris. + // Using this code for all "already on system stack" calls exercises it more, + // which should help keep it correct. + SUB $24, R13 + BIC $0x7, R13 // alignment for gcc ABI + // save null g in case someone looks during debugging. + MOVW $0, R4 + MOVW R4, 20(R13) + MOVW R2, 16(R13) // Save old stack pointer. + BL (R1) + // Restore stack pointer. + MOVW 16(R13), R2 + MOVW R2, R13 + MOVW R0, ret+8(FP) + RET + +// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$12-12 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVW fn+0(FP), R1 + CMP $0, R1 + B.NE loadg + // Restore the g from frame. + MOVW frame+4(FP), g + B dropm + +loadg: + // Load m and g from thread-local storage. +#ifdef GOOS_openbsd + BL runtime·load_g(SB) +#else + MOVB runtime·iscgo(SB), R0 + CMP $0, R0 + BL.NE runtime·load_g(SB) +#endif + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + CMP $0, g + B.EQ needm + + MOVW g_m(g), R8 + MOVW R8, savedm-4(SP) + B havem + +needm: + MOVW g, savedm-4(SP) // g is zero, so is m. + MOVW $runtime·needAndBindM(SB), R0 + BL (R0) + + // Set m->g0->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVW g_m(g), R8 + MOVW m_g0(R8), R3 + MOVW R13, (g_sched+gobuf_sp)(R3) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 4(R13) aka savedsp-12(SP). + MOVW m_g0(R8), R3 + MOVW (g_sched+gobuf_sp)(R3), R4 + MOVW R4, savedsp-12(SP) // must match frame size + MOVW R13, (g_sched+gobuf_sp)(R3) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVW m_curg(R8), R0 + BL setg<>(SB) + MOVW (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 + MOVW (g_sched+gobuf_pc)(g), R5 + MOVW R5, -(12+4)(R4) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOVW fn+0(FP), R1 + MOVW frame+4(FP), R2 + MOVW ctxt+8(FP), R3 + MOVW $-(12+4)(R4), R13 // switch stack; must match frame size + MOVW R1, 4(R13) + MOVW R2, 8(R13) + MOVW R3, 12(R13) + BL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOVW 0(R13), R5 + MOVW R5, (g_sched+gobuf_pc)(g) + MOVW $(12+4)(R13), R4 // must match frame size + MOVW R4, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVW g_m(g), R8 + MOVW m_g0(R8), R0 + BL setg<>(SB) + MOVW (g_sched+gobuf_sp)(g), R13 + MOVW savedsp-12(SP), R4 // must match frame size + MOVW R4, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVW savedm-4(SP), R6 + CMP $0, R6 + B.NE done + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVW _cgo_pthread_key_created(SB), R6 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CMP $0, R6 + B.EQ dropm + MOVW (R6), R6 + CMP $0, R6 + B.NE done + +dropm: + MOVW $runtime·dropm(SB), R0 + BL (R0) + +done: + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB),NOSPLIT|NOFRAME,$0-4 + MOVW gg+0(FP), R0 + B setg<>(SB) + +TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0 + MOVW R0, g + + // Save g to thread-local storage. +#ifdef GOOS_windows + B runtime·save_g(SB) +#else +#ifdef GOOS_openbsd + B runtime·save_g(SB) +#else + MOVB runtime·iscgo(SB), R0 + CMP $0, R0 + B.EQ 2(PC) + B runtime·save_g(SB) + + MOVW g, R0 + RET +#endif +#endif + +TEXT runtime·emptyfunc(SB),0,$0-0 + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVW $0, R0 + MOVW (R0), R1 + +// armPublicationBarrier is a native store/store barrier for ARMv7+. +// On earlier ARM revisions, armPublicationBarrier is a no-op. +// This will not work on SMP ARMv6 machines, if any are in use. +// To implement publicationBarrier in sys_$GOOS_arm.s using the native +// instructions, use: +// +// TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 +// B runtime·armPublicationBarrier(SB) +// +TEXT runtime·armPublicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + DMB MB_ST + RET + +// AES hashing not implemented for ARM +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-16 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB),NOSPLIT,$0 + MOVW $0, R0 + RET + +TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0 + MOVW cycles+0(FP), R1 + MOVW $0, R0 +yieldloop: + WORD $0xe320f001 // YIELD (NOP pre-ARMv6K) + CMP R0, R1 + B.NE 2(PC) + RET + SUB $1, R1 + B yieldloop + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$8 + // R11 and g register are clobbered by load_g. They are + // callee-save in the gcc calling convention, so save them here. + MOVW R11, saveR11-4(SP) + MOVW g, saveG-8(SP) + + BL runtime·load_g(SB) + MOVW g_m(g), R0 + MOVW m_curg(R0), R0 + MOVW (g_stack+stack_hi)(R0), R0 + + MOVW saveG-8(SP), g + MOVW saveR11-4(SP), R11 + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + MOVW R0, R0 // NOP + BL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + MOVW R0, R0 // NOP + +// x -> x/1000000, x%1000000, called from Go with args, results on stack. +TEXT runtime·usplit(SB),NOSPLIT,$0-12 + MOVW x+0(FP), R0 + CALL runtime·usplitR0(SB) + MOVW R0, q+4(FP) + MOVW R1, r+8(FP) + RET + +// R0, R1 = R0/1000000, R0%1000000 +TEXT runtime·usplitR0(SB),NOSPLIT,$0 + // magic multiply to avoid software divide without available m. + // see output of go tool compile -S for x/1000000. + MOVW R0, R3 + MOVW $1125899907, R1 + MULLU R1, R0, (R0, R1) + MOVW R0>>18, R0 + MOVW $1000000, R1 + MULU R0, R1 + SUB R1, R3, R1 + RET + +// This is called from .init_array and follows the platform, not Go, ABI. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + MOVW R9, saver9-4(SP) // The access to global variables below implicitly uses R9, which is callee-save + MOVW R11, saver11-8(SP) // Likewise, R11 is the temp register, but callee-save in C ABI + MOVW runtime·lastmoduledatap(SB), R1 + MOVW R0, moduledata_next(R1) + MOVW R0, runtime·lastmoduledatap(SB) + MOVW saver11-8(SP), R11 + MOVW saver9-4(SP), R9 + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R3 + MOVB R3, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R8, and returns a pointer +// to the buffer space in R8. +// It clobbers condition codes. +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +// The act of CALLing gcWriteBarrier will clobber R14 (LR). +TEXT gcWriteBarrier<>(SB),NOSPLIT|NOFRAME,$0 + // Save the registers clobbered by the fast path. + MOVM.DB.W [R0,R1], (R13) +retry: + MOVW g_m(g), R0 + MOVW m_p(R0), R0 + MOVW (p_wbBuf+wbBuf_next)(R0), R1 + MOVW (p_wbBuf+wbBuf_end)(R0), R11 + // Increment wbBuf.next position. + ADD R8, R1 + // Is the buffer full? + CMP R11, R1 + BHI flush + // Commit to the larger buffer. + MOVW R1, (p_wbBuf+wbBuf_next)(R0) + // Make return value (the original next position) + SUB R8, R1, R8 + // Restore registers. + MOVM.IA.W (R13), [R0,R1] + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + // + // R0 and R1 were saved at entry. + // R10 is g, so preserved. + // R11 is linker temp, so no need to save. + // R13 is stack pointer. + // R15 is PC. + MOVM.DB.W [R2-R9,R12], (R13) + // Save R14 (LR) because the fast path above doesn't save it, + // but needs it to RET. + MOVM.DB.W [R14], (R13) + + CALL runtime·wbBufFlush(SB) + + MOVM.IA.W (R13), [R14] + MOVM.IA.W (R13), [R2-R9,R12] + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVW $4, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVW $8, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVW $12, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVW $16, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVW $20, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVW $24, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVW $28, R8 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVW $32, R8 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 + MOVW R0, x+0(FP) + MOVW R1, y+4(FP) + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSliceConvert(SB) + +// Extended versions for 64-bit indexes. +TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendIndex(SB) +TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendIndexU(SB) +TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceAlen(SB) +TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceAlenU(SB) +TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceAcap(SB) +TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceAcapU(SB) +TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendSliceB(SB) +TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendSliceBU(SB) +TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3Alen(SB) +TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3AlenU(SB) +TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3Acap(SB) +TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3AcapU(SB) +TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSlice3B(SB) +TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSlice3BU(SB) +TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendSlice3C(SB) +TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 + MOVW R4, hi+0(FP) + MOVW R0, lo+4(FP) + MOVW R1, y+8(FP) + JMP runtime·goPanicExtendSlice3CU(SB) diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_arm64.s b/platform/dbops/binaries/go/go/src/runtime/asm_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..6d77b08a1b90d6db55b6876ff29862c67514c662 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_arm64.s @@ -0,0 +1,1598 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "tls_arm64.h" +#include "funcdata.h" +#include "textflag.h" + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // SP = stack; R0 = argc; R1 = argv + + SUB $32, RSP + MOVW R0, 8(RSP) // argc + MOVD R1, 16(RSP) // argv + +#ifdef TLS_darwin + // Initialize TLS. + MOVD ZR, g // clear g, make sure it's not junk. + SUB $32, RSP + MRS_TPIDR_R0 + AND $~7, R0 + MOVD R0, 16(RSP) // arg2: TLS base + MOVD $runtime·tls_g(SB), R2 + MOVD R2, 8(RSP) // arg1: &tlsg + BL ·tlsinit(SB) + ADD $32, RSP +#endif + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVD $runtime·g0(SB), g + MOVD RSP, R7 + MOVD $(-64*1024)(R7), R0 + MOVD R0, g_stackguard0(g) + MOVD R0, g_stackguard1(g) + MOVD R0, (g_stack+stack_lo)(g) + MOVD R7, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOVD _cgo_init(SB), R12 + CBZ R12, nocgo + +#ifdef GOOS_android + MRS_TPIDR_R0 // load TLS base pointer + MOVD R0, R3 // arg 3: TLS base pointer + MOVD $runtime·tls_g(SB), R2 // arg 2: &tls_g +#else + MOVD $0, R2 // arg 2: not used when using platform's TLS +#endif + MOVD $setg_gcc<>(SB), R1 // arg 1: setg + MOVD g, R0 // arg 0: G + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. + BL (R12) + ADD $16, RSP + +nocgo: + BL runtime·save_g(SB) + // update stackguard after _cgo_init + MOVD (g_stack+stack_lo)(g), R0 + ADD $const_stackGuard, R0 + MOVD R0, g_stackguard0(g) + MOVD R0, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVD $runtime·m0(SB), R0 + + // save m->g0 = g0 + MOVD g, m_g0(R0) + // save m0 to g0->m + MOVD R0, g_m(g) + + BL runtime·check(SB) + +#ifdef GOOS_windows + BL runtime·wintls(SB) +#endif + + MOVW 8(RSP), R0 // copy argc + MOVW R0, -8(RSP) + MOVD 16(RSP), R0 // copy argv + MOVD R0, 0(RSP) + BL runtime·args(SB) + BL runtime·osinit(SB) + BL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVD $runtime·mainPC(SB), R0 // entry + SUB $16, RSP + MOVD R0, 8(RSP) // arg + MOVD $0, 0(RSP) // dummy LR + BL runtime·newproc(SB) + ADD $16, RSP + + // start this M + BL runtime·mstart(SB) + + // Prevent dead-code elimination of debugCallV2, which is + // intended to be called by debuggers. + MOVD $runtime·debugCallV2(SB), R0 + + MOVD $0, R0 + MOVD R0, (R0) // boom + UNDEF + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +// Windows ARM64 needs an immediate 0xf000 argument. +// See go.dev/issues/53837. +#define BREAK \ +#ifdef GOOS_windows \ + BRK $0xf000 \ +#else \ + BRK \ +#endif \ + + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + BREAK + RET + +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + BL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVD buf+0(FP), R5 + MOVD gobuf_g(R5), R6 + MOVD 0(R6), R4 // make sure g != nil + B gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOVD R6, g + BL runtime·save_g(SB) + + MOVD gobuf_sp(R5), R0 + MOVD R0, RSP + MOVD gobuf_bp(R5), R29 + MOVD gobuf_lr(R5), LR + MOVD gobuf_ret(R5), R0 + MOVD gobuf_ctxt(R5), R26 + MOVD $0, gobuf_sp(R5) + MOVD $0, gobuf_bp(R5) + MOVD $0, gobuf_ret(R5) + MOVD $0, gobuf_lr(R5) + MOVD $0, gobuf_ctxt(R5) + CMP ZR, ZR // set condition codes for == test, needed by stack split + MOVD gobuf_pc(R5), R6 + B (R6) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 + MOVD R0, R26 // context + + // Save caller state in g->sched + MOVD RSP, R0 + MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) + MOVD LR, (g_sched+gobuf_pc)(g) + MOVD $0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVD g, R3 + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + CMP g, R3 + BNE 2(PC) + B runtime·badmcall(SB) + + MOVD (g_sched+gobuf_sp)(g), R0 + MOVD R0, RSP // sp = m->g0->sched.sp + MOVD (g_sched+gobuf_bp)(g), R29 + MOVD R3, R0 // arg = g + MOVD $0, -16(RSP) // dummy LR + SUB $16, RSP + MOVD 0(R26), R4 // code pointer + BL (R4) + B runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + BL (LR) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), R3 // R3 = fn + MOVD R3, R26 // context + MOVD g_m(g), R4 // R4 = m + + MOVD m_gsignal(R4), R5 // R5 = gsignal + CMP g, R5 + BEQ noswitch + + MOVD m_g0(R4), R5 // R5 = g0 + CMP g, R5 + BEQ noswitch + + MOVD m_curg(R4), R6 + CMP g, R6 + BEQ switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVD $runtime·badsystemstack(SB), R3 + BL (R3) + B runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + BL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVD R5, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R3 + MOVD R3, RSP + MOVD (g_sched+gobuf_bp)(g), R29 + + // call target function + MOVD 0(R26), R3 // code pointer + BL (R3) + + // switch back to g + MOVD g_m(g), R3 + MOVD m_curg(R3), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R0 + MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 + MOVD $0, (g_sched+gobuf_sp)(g) + MOVD $0, (g_sched+gobuf_bp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVD 0(R26), R3 // code pointer + MOVD.P 16(RSP), R30 // restore LR + SUB $8, RSP, R29 // restore FP + B (R3) + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD R0, R26 // context register + MOVD g_m(g), R1 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + BL runtime·save_g(SB) // clobbers R0 + MOVD R1, g_m(g) // g.m = curm + MOVD g, m_g0(R1) // curm.g0 = g + + // switch to crashstack + MOVD (g_stack+stack_hi)(g), R1 + SUB $(4*8), R1 + MOVD R1, RSP + + // call target function + MOVD 0(R26), R0 + CALL (R0) + + // should never return + CALL runtime·abort(SB) + UNDEF + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// R3 prolog's LR (R30) +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + MOVD g_m(g), R8 + MOVD m_g0(R8), R4 + + // Called from f. + // Set g->sched to context in f + MOVD RSP, R0 + MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) + MOVD LR, (g_sched+gobuf_pc)(g) + MOVD R3, (g_sched+gobuf_lr)(g) + MOVD R26, (g_sched+gobuf_ctxt)(g) + + CMP g, R4 + BNE 3(PC) + BL runtime·badmorestackg0(SB) + B runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVD m_gsignal(R8), R4 + CMP g, R4 + BNE 3(PC) + BL runtime·badmorestackgsignal(SB) + B runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's callers. + MOVD R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC + MOVD RSP, R0 + MOVD R0, (m_morebuf+gobuf_sp)(R8) // f's caller's RSP + MOVD g, (m_morebuf+gobuf_g)(R8) + + // Call newstack on m->g0's stack. + MOVD m_g0(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R0 + MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 + MOVD.W $0, -16(RSP) // create a call frame on g0 (saved LR; keep 16-aligned) + BL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R3), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVD RSP, RSP + + MOVW $0, R26 + B runtime·morestack(SB) + +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R20. +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + STP (R0, R1), (0*8)(R20) + STP (R2, R3), (2*8)(R20) + STP (R4, R5), (4*8)(R20) + STP (R6, R7), (6*8)(R20) + STP (R8, R9), (8*8)(R20) + STP (R10, R11), (10*8)(R20) + STP (R12, R13), (12*8)(R20) + STP (R14, R15), (14*8)(R20) + FSTPD (F0, F1), (16*8)(R20) + FSTPD (F2, F3), (18*8)(R20) + FSTPD (F4, F5), (20*8)(R20) + FSTPD (F6, F7), (22*8)(R20) + FSTPD (F8, F9), (24*8)(R20) + FSTPD (F10, F11), (26*8)(R20) + FSTPD (F12, F13), (28*8)(R20) + FSTPD (F14, F15), (30*8)(R20) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R20. +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + LDP (0*8)(R20), (R0, R1) + LDP (2*8)(R20), (R2, R3) + LDP (4*8)(R20), (R4, R5) + LDP (6*8)(R20), (R6, R7) + LDP (8*8)(R20), (R8, R9) + LDP (10*8)(R20), (R10, R11) + LDP (12*8)(R20), (R12, R13) + LDP (14*8)(R20), (R14, R15) + FLDPD (16*8)(R20), (F0, F1) + FLDPD (18*8)(R20), (F2, F3) + FLDPD (20*8)(R20), (F4, F5) + FLDPD (22*8)(R20), (F6, F7) + FLDPD (24*8)(R20), (F8, F9) + FLDPD (26*8)(R20), (F10, F11) + FLDPD (28*8)(R20), (F12, F13) + FLDPD (30*8)(R20), (F14, F15) + RET + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOVD $MAXSIZE, R27; \ + CMP R27, R16; \ + BGT 3(PC); \ + MOVD $NAME(SB), R27; \ + B (R27) +// Note: can't just "B NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), R16 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVD $runtime·badreflectcall(SB), R0 + B (R0) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVD stackArgs+16(FP), R3; \ + MOVWU stackArgsSize+24(FP), R4; \ + ADD $8, RSP, R5; \ + BIC $0xf, R4, R6; \ + CBZ R6, 6(PC); \ + /* if R6=(argsize&~15) != 0 */ \ + ADD R6, R5, R6; \ + /* copy 16 bytes a time */ \ + LDP.P 16(R3), (R7, R8); \ + STP.P (R7, R8), 16(R5); \ + CMP R5, R6; \ + BNE -3(PC); \ + AND $0xf, R4, R6; \ + CBZ R6, 6(PC); \ + /* if R6=(argsize&15) != 0 */ \ + ADD R6, R5, R6; \ + /* copy 1 byte a time for the rest */ \ + MOVBU.P 1(R3), R7; \ + MOVBU.P R7, 1(R5); \ + CMP R5, R6; \ + BNE -3(PC); \ + /* set up argument registers */ \ + MOVD regArgs+40(FP), R20; \ + CALL ·unspillArgs(SB); \ + /* call function */ \ + MOVD f+8(FP), R26; \ + MOVD (R26), R20; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + BL (R20); \ + /* copy return values back */ \ + MOVD regArgs+40(FP), R20; \ + CALL ·spillArgs(SB); \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R3; \ + MOVWU stackArgsSize+24(FP), R4; \ + MOVWU stackRetOffset+28(FP), R6; \ + ADD $8, RSP, R5; \ + ADD R6, R5; \ + ADD R6, R3; \ + SUB R6, R4; \ + BL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $48-0 + NO_LOCAL_POINTERS + STP (R7, R3), 8(RSP) + STP (R5, R4), 24(RSP) + MOVD R20, 40(RSP) + BL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +// func memhash32(p unsafe.Pointer, h uintptr) uintptr +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + MOVB runtime·useAeshash(SB), R10 + CBZ R10, noaes + MOVD $runtime·aeskeysched+0(SB), R3 + + VEOR V0.B16, V0.B16, V0.B16 + VLD1 (R3), [V2.B16] + VLD1 (R0), V0.S[1] + VMOV R1, V0.S[0] + + AESE V2.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V2.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V2.B16, V0.B16 + + VMOV V0.D[0], R0 + RET +noaes: + B runtime·memhash32Fallback(SB) + +// func memhash64(p unsafe.Pointer, h uintptr) uintptr +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + MOVB runtime·useAeshash(SB), R10 + CBZ R10, noaes + MOVD $runtime·aeskeysched+0(SB), R3 + + VEOR V0.B16, V0.B16, V0.B16 + VLD1 (R3), [V2.B16] + VLD1 (R0), V0.D[1] + VMOV R1, V0.D[0] + + AESE V2.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V2.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V2.B16, V0.B16 + + VMOV V0.D[0], R0 + RET +noaes: + B runtime·memhash64Fallback(SB) + +// func memhash(p unsafe.Pointer, h, size uintptr) uintptr +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + MOVB runtime·useAeshash(SB), R10 + CBZ R10, noaes + B aeshashbody<>(SB) +noaes: + B runtime·memhashFallback(SB) + +// func strhash(p unsafe.Pointer, h uintptr) uintptr +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + MOVB runtime·useAeshash(SB), R10 + CBZ R10, noaes + LDP (R0), (R0, R2) // string data / length + B aeshashbody<>(SB) +noaes: + B runtime·strhashFallback(SB) + +// R0: data +// R1: seed data +// R2: length +// At return, R0 = return value +TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0 + VEOR V30.B16, V30.B16, V30.B16 + VMOV R1, V30.D[0] + VMOV R2, V30.D[1] // load length into seed + + MOVD $runtime·aeskeysched+0(SB), R4 + VLD1.P 16(R4), [V0.B16] + AESE V30.B16, V0.B16 + AESMC V0.B16, V0.B16 + CMP $16, R2 + BLO aes0to15 + BEQ aes16 + CMP $32, R2 + BLS aes17to32 + CMP $64, R2 + BLS aes33to64 + CMP $128, R2 + BLS aes65to128 + B aes129plus + +aes0to15: + CBZ R2, aes0 + VEOR V2.B16, V2.B16, V2.B16 + TBZ $3, R2, less_than_8 + VLD1.P 8(R0), V2.D[0] + +less_than_8: + TBZ $2, R2, less_than_4 + VLD1.P 4(R0), V2.S[2] + +less_than_4: + TBZ $1, R2, less_than_2 + VLD1.P 2(R0), V2.H[6] + +less_than_2: + TBZ $0, R2, done + VLD1 (R0), V2.B[14] +done: + AESE V0.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V0.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V0.B16, V2.B16 + AESMC V2.B16, V2.B16 + + VMOV V2.D[0], R0 + RET + +aes0: + VMOV V0.D[0], R0 + RET + +aes16: + VLD1 (R0), [V2.B16] + B done + +aes17to32: + // make second seed + VLD1 (R4), [V1.B16] + AESE V30.B16, V1.B16 + AESMC V1.B16, V1.B16 + SUB $16, R2, R10 + VLD1.P (R0)(R10), [V2.B16] + VLD1 (R0), [V3.B16] + + AESE V0.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V1.B16, V3.B16 + AESMC V3.B16, V3.B16 + + AESE V0.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V1.B16, V3.B16 + AESMC V3.B16, V3.B16 + + AESE V0.B16, V2.B16 + AESE V1.B16, V3.B16 + + VEOR V3.B16, V2.B16, V2.B16 + + VMOV V2.D[0], R0 + RET + +aes33to64: + VLD1 (R4), [V1.B16, V2.B16, V3.B16] + AESE V30.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V30.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V30.B16, V3.B16 + AESMC V3.B16, V3.B16 + SUB $32, R2, R10 + + VLD1.P (R0)(R10), [V4.B16, V5.B16] + VLD1 (R0), [V6.B16, V7.B16] + + AESE V0.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V1.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V2.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V3.B16, V7.B16 + AESMC V7.B16, V7.B16 + + AESE V0.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V1.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V2.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V3.B16, V7.B16 + AESMC V7.B16, V7.B16 + + AESE V0.B16, V4.B16 + AESE V1.B16, V5.B16 + AESE V2.B16, V6.B16 + AESE V3.B16, V7.B16 + + VEOR V6.B16, V4.B16, V4.B16 + VEOR V7.B16, V5.B16, V5.B16 + VEOR V5.B16, V4.B16, V4.B16 + + VMOV V4.D[0], R0 + RET + +aes65to128: + VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16] + VLD1 (R4), [V5.B16, V6.B16, V7.B16] + AESE V30.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V30.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V30.B16, V3.B16 + AESMC V3.B16, V3.B16 + AESE V30.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V30.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V30.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V30.B16, V7.B16 + AESMC V7.B16, V7.B16 + + SUB $64, R2, R10 + VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16] + VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16] + AESE V0.B16, V8.B16 + AESMC V8.B16, V8.B16 + AESE V1.B16, V9.B16 + AESMC V9.B16, V9.B16 + AESE V2.B16, V10.B16 + AESMC V10.B16, V10.B16 + AESE V3.B16, V11.B16 + AESMC V11.B16, V11.B16 + AESE V4.B16, V12.B16 + AESMC V12.B16, V12.B16 + AESE V5.B16, V13.B16 + AESMC V13.B16, V13.B16 + AESE V6.B16, V14.B16 + AESMC V14.B16, V14.B16 + AESE V7.B16, V15.B16 + AESMC V15.B16, V15.B16 + + AESE V0.B16, V8.B16 + AESMC V8.B16, V8.B16 + AESE V1.B16, V9.B16 + AESMC V9.B16, V9.B16 + AESE V2.B16, V10.B16 + AESMC V10.B16, V10.B16 + AESE V3.B16, V11.B16 + AESMC V11.B16, V11.B16 + AESE V4.B16, V12.B16 + AESMC V12.B16, V12.B16 + AESE V5.B16, V13.B16 + AESMC V13.B16, V13.B16 + AESE V6.B16, V14.B16 + AESMC V14.B16, V14.B16 + AESE V7.B16, V15.B16 + AESMC V15.B16, V15.B16 + + AESE V0.B16, V8.B16 + AESE V1.B16, V9.B16 + AESE V2.B16, V10.B16 + AESE V3.B16, V11.B16 + AESE V4.B16, V12.B16 + AESE V5.B16, V13.B16 + AESE V6.B16, V14.B16 + AESE V7.B16, V15.B16 + + VEOR V12.B16, V8.B16, V8.B16 + VEOR V13.B16, V9.B16, V9.B16 + VEOR V14.B16, V10.B16, V10.B16 + VEOR V15.B16, V11.B16, V11.B16 + VEOR V10.B16, V8.B16, V8.B16 + VEOR V11.B16, V9.B16, V9.B16 + VEOR V9.B16, V8.B16, V8.B16 + + VMOV V8.D[0], R0 + RET + +aes129plus: + PRFM (R0), PLDL1KEEP + VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16] + VLD1 (R4), [V5.B16, V6.B16, V7.B16] + AESE V30.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V30.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V30.B16, V3.B16 + AESMC V3.B16, V3.B16 + AESE V30.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V30.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V30.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V30.B16, V7.B16 + AESMC V7.B16, V7.B16 + ADD R0, R2, R10 + SUB $128, R10, R10 + VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16] + VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16] + SUB $1, R2, R2 + LSR $7, R2, R2 + +aesloop: + AESE V8.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V9.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V10.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V11.B16, V3.B16 + AESMC V3.B16, V3.B16 + AESE V12.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V13.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V14.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V15.B16, V7.B16 + AESMC V7.B16, V7.B16 + + VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16] + AESE V8.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V9.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V10.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V11.B16, V3.B16 + AESMC V3.B16, V3.B16 + + VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16] + AESE V12.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V13.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V14.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V15.B16, V7.B16 + AESMC V7.B16, V7.B16 + SUB $1, R2, R2 + CBNZ R2, aesloop + + AESE V8.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V9.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V10.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V11.B16, V3.B16 + AESMC V3.B16, V3.B16 + AESE V12.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V13.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V14.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V15.B16, V7.B16 + AESMC V7.B16, V7.B16 + + AESE V8.B16, V0.B16 + AESMC V0.B16, V0.B16 + AESE V9.B16, V1.B16 + AESMC V1.B16, V1.B16 + AESE V10.B16, V2.B16 + AESMC V2.B16, V2.B16 + AESE V11.B16, V3.B16 + AESMC V3.B16, V3.B16 + AESE V12.B16, V4.B16 + AESMC V4.B16, V4.B16 + AESE V13.B16, V5.B16 + AESMC V5.B16, V5.B16 + AESE V14.B16, V6.B16 + AESMC V6.B16, V6.B16 + AESE V15.B16, V7.B16 + AESMC V7.B16, V7.B16 + + AESE V8.B16, V0.B16 + AESE V9.B16, V1.B16 + AESE V10.B16, V2.B16 + AESE V11.B16, V3.B16 + AESE V12.B16, V4.B16 + AESE V13.B16, V5.B16 + AESE V14.B16, V6.B16 + AESE V15.B16, V7.B16 + + VEOR V0.B16, V1.B16, V0.B16 + VEOR V2.B16, V3.B16, V2.B16 + VEOR V4.B16, V5.B16, V4.B16 + VEOR V6.B16, V7.B16, V6.B16 + VEOR V0.B16, V2.B16, V0.B16 + VEOR V4.B16, V6.B16, V4.B16 + VEOR V4.B16, V0.B16, V0.B16 + + VMOV V0.D[0], R0 + RET + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + MOVWU cycles+0(FP), R0 +again: + YIELD + SUBW $1, R0 + CBNZ R0, again + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R0. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVD $runtime·systemstack_switch(SB), R0 + ADD $8, R0 // get past prologue + MOVD R0, (g_sched+gobuf_pc)(g) + MOVD RSP, R0 + MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) + MOVD $0, (g_sched+gobuf_lr)(g) + MOVD $0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVD (g_sched+gobuf_ctxt)(g), R0 + CBZ R0, 2(PC) + CALL runtime·abort(SB) + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 + MOVD fn+0(FP), R1 + MOVD arg+8(FP), R0 + SUB $16, RSP // skip over saved frame pointer below RSP + BL (R1) + ADD $16, RSP // skip over saved frame pointer below RSP + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOVD fn+0(FP), R1 + MOVD arg+8(FP), R0 + + MOVD RSP, R2 // save original stack pointer + CBZ g, nosave + MOVD g, R4 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVD g_m(g), R8 + MOVD m_gsignal(R8), R3 + CMP R3, g + BEQ nosave + MOVD m_g0(R8), R3 + CMP R3, g + BEQ nosave + + // Switch to system stack. + MOVD R0, R9 // gosave_systemstack_switch<> and save_g might clobber R0 + BL gosave_systemstack_switch<>(SB) + MOVD R3, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R0 + MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 + MOVD R9, R0 + + // Now on a scheduling stack (a pthread-created stack). + // Save room for two of our pointers /*, plus 32 bytes of callee + // save area that lives on the caller stack. */ + MOVD RSP, R13 + SUB $16, R13 + MOVD R13, RSP + MOVD R4, 0(RSP) // save old g on stack + MOVD (g_stack+stack_hi)(R4), R4 + SUB R2, R4 + MOVD R4, 8(RSP) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + BL (R1) + MOVD R0, R9 + + // Restore g, stack pointer. R0 is errno, so don't touch it + MOVD 0(RSP), g + BL runtime·save_g(SB) + MOVD (g_stack+stack_hi)(g), R5 + MOVD 8(RSP), R6 + SUB R6, R5 + MOVD R9, R0 + MOVD R5, RSP + + MOVW R0, ret+16(FP) + RET + +nosave: + // Running on a system stack, perhaps even without a g. + // Having no g can happen during thread creation or thread teardown + // (see needm/dropm on Solaris, for example). + // This code is like the above sequence but without saving/restoring g + // and without worrying about the stack moving out from under us + // (because we're on a system stack, not a goroutine stack). + // The above code could be used directly if already on a system stack, + // but then the only path through this code would be a rare case on Solaris. + // Using this code for all "already on system stack" calls exercises it more, + // which should help keep it correct. + MOVD RSP, R13 + SUB $16, R13 + MOVD R13, RSP + MOVD $0, R4 + MOVD R4, 0(RSP) // Where above code stores g, in case someone looks during debugging. + MOVD R2, 8(RSP) // Save original stack pointer. + BL (R1) + // Restore stack pointer. + MOVD 8(RSP), R2 + MOVD R2, RSP + MOVD R0, ret+16(FP) + RET + +// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVD fn+0(FP), R1 + CBNZ R1, loadg + // Restore the g from frame. + MOVD frame+8(FP), g + B dropm + +loadg: + // Load g from thread-local storage. + BL runtime·load_g(SB) + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + CBZ g, needm + + MOVD g_m(g), R8 + MOVD R8, savedm-8(SP) + B havem + +needm: + MOVD g, savedm-8(SP) // g is zero, so is m. + MOVD $runtime·needAndBindM(SB), R0 + BL (R0) + + // Set m->g0->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVD g_m(g), R8 + MOVD m_g0(R8), R3 + MOVD RSP, R0 + MOVD R0, (g_sched+gobuf_sp)(R3) + MOVD R29, (g_sched+gobuf_bp)(R3) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 16(RSP) aka savedsp-16(SP). + // Beware that the frame size is actually 32+16. + MOVD m_g0(R8), R3 + MOVD (g_sched+gobuf_sp)(R3), R4 + MOVD R4, savedsp-16(SP) + MOVD RSP, R0 + MOVD R0, (g_sched+gobuf_sp)(R3) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVD m_curg(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 + MOVD (g_sched+gobuf_pc)(g), R5 + MOVD R5, -48(R4) + MOVD (g_sched+gobuf_bp)(g), R5 + MOVD R5, -56(R4) + // Gather our arguments into registers. + MOVD fn+0(FP), R1 + MOVD frame+8(FP), R2 + MOVD ctxt+16(FP), R3 + MOVD $-48(R4), R0 // maintain 16-byte SP alignment + MOVD R0, RSP // switch stack + MOVD R1, 8(RSP) + MOVD R2, 16(RSP) + MOVD R3, 24(RSP) + MOVD $runtime·cgocallbackg(SB), R0 + CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now. + + // Restore g->sched (== m->curg->sched) from saved values. + MOVD 0(RSP), R5 + MOVD R5, (g_sched+gobuf_pc)(g) + MOVD RSP, R4 + ADD $48, R4, R4 + MOVD R4, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R0 + MOVD R0, RSP + MOVD savedsp-16(SP), R4 + MOVD R4, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVD savedm-8(SP), R6 + CBNZ R6, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVD _cgo_pthread_key_created(SB), R6 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CBZ R6, dropm + MOVD (R6), R6 + CBNZ R6, droppedm + +dropm: + MOVD $runtime·dropm(SB), R0 + BL (R0) +droppedm: + + // Done! + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$24 + // g (R28) and REGTMP (R27) might be clobbered by load_g. They + // are callee-save in the gcc calling convention, so save them. + MOVD R27, savedR27-8(SP) + MOVD g, saveG-16(SP) + + BL runtime·load_g(SB) + MOVD g_m(g), R0 + MOVD m_curg(R0), R0 + MOVD (g_stack+stack_hi)(R0), R0 + + MOVD saveG-16(SP), g + MOVD savedR28-8(SP), R27 + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVD gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + BL runtime·save_g(SB) + RET + +// void setg_gcc(G*); set g called from gcc +TEXT setg_gcc<>(SB),NOSPLIT,$8 + MOVD R0, g + MOVD R27, savedR27-8(SP) + BL runtime·save_g(SB) + MOVD savedR27-8(SP), R27 + RET + +TEXT runtime·emptyfunc(SB),0,$0-0 + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVD ZR, R0 + MOVD (R0), R0 + UNDEF + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVW $0, R0 + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + MOVD R0, R0 // NOP + BL runtime·goexit1(SB) // does not return + +// This is called from .init_array and follows the platform, not Go, ABI. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + SUB $0x10, RSP + MOVD R27, 8(RSP) // The access to global variables below implicitly uses R27, which is callee-save + MOVD runtime·lastmoduledatap(SB), R1 + MOVD R0, moduledata_next(R1) + MOVD R0, runtime·lastmoduledatap(SB) + MOVD 8(RSP), R27 + ADD $0x10, RSP + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R3 + MOVB R3, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R25, and returns a pointer +// to the buffer space in R25. +// It clobbers condition codes. +// It does not clobber any general-purpose registers except R27, +// but may clobber others (e.g., floating point registers) +// The act of CALLing gcWriteBarrier will clobber R30 (LR). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$200 + // Save the registers clobbered by the fast path. + STP (R0, R1), 184(RSP) +retry: + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + MOVD (p_wbBuf+wbBuf_next)(R0), R1 + MOVD (p_wbBuf+wbBuf_end)(R0), R27 + // Increment wbBuf.next position. + ADD R25, R1 + // Is the buffer full? + CMP R27, R1 + BHI flush + // Commit to the larger buffer. + MOVD R1, (p_wbBuf+wbBuf_next)(R0) + // Make return value (the original next position) + SUB R25, R1, R25 + // Restore registers. + LDP 184(RSP), (R0, R1) + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + // R0 and R1 already saved + STP (R2, R3), 1*8(RSP) + STP (R4, R5), 3*8(RSP) + STP (R6, R7), 5*8(RSP) + STP (R8, R9), 7*8(RSP) + STP (R10, R11), 9*8(RSP) + STP (R12, R13), 11*8(RSP) + STP (R14, R15), 13*8(RSP) + // R16, R17 may be clobbered by linker trampoline + // R18 is unused. + STP (R19, R20), 15*8(RSP) + STP (R21, R22), 17*8(RSP) + STP (R23, R24), 19*8(RSP) + STP (R25, R26), 21*8(RSP) + // R27 is temp register. + // R28 is g. + // R29 is frame pointer (unused). + // R30 is LR, which was saved by the prologue. + // R31 is SP. + + CALL runtime·wbBufFlush(SB) + LDP 1*8(RSP), (R2, R3) + LDP 3*8(RSP), (R4, R5) + LDP 5*8(RSP), (R6, R7) + LDP 7*8(RSP), (R8, R9) + LDP 9*8(RSP), (R10, R11) + LDP 11*8(RSP), (R12, R13) + LDP 13*8(RSP), (R14, R15) + LDP 15*8(RSP), (R19, R20) + LDP 17*8(RSP), (R21, R22) + LDP 19*8(RSP), (R23, R24) + LDP 21*8(RSP), (R25, R26) + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVD $8, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVD $16, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVD $24, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVD $32, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVD $40, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVD $48, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVD $56, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVD $64, R25 + JMP gcWriteBarrier<>(SB) + +DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large" +GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below + +// debugCallV2 is the entry point for debugger-injected function +// calls on running goroutines. It informs the runtime that a +// debug call has been injected and creates a call frame for the +// debugger to fill in. +// +// To inject a function call, a debugger should: +// 1. Check that the goroutine is in state _Grunning and that +// there are at least 288 bytes free on the stack. +// 2. Set SP as SP-16. +// 3. Store the current LR in (SP) (using the SP after step 2). +// 4. Store the current PC in the LR register. +// 5. Write the desired argument frame size at SP-16 +// 6. Save all machine registers (including flags and fpsimd registers) +// so they can be restored later by the debugger. +// 7. Set the PC to debugCallV2 and resume execution. +// +// If the goroutine is in state _Grunnable, then it's not generally +// safe to inject a call because it may return out via other runtime +// operations. Instead, the debugger should unwind the stack to find +// the return to non-runtime code, add a temporary breakpoint there, +// and inject the call once that breakpoint is hit. +// +// If the goroutine is in any other state, it's not safe to inject a call. +// +// This function communicates back to the debugger by setting R20 and +// invoking BRK to raise a breakpoint signal. Note that the signal PC of +// the signal triggered by the BRK instruction is the PC where the signal +// is trapped, not the next PC, so to resume execution, the debugger needs +// to set the signal PC to PC+4. See the comments in the implementation for +// the protocol the debugger is expected to follow. InjectDebugCall in the +// runtime tests demonstrates this protocol. +// +// The debugger must ensure that any pointers passed to the function +// obey escape analysis requirements. Specifically, it must not pass +// a stack pointer to an escaping argument. debugCallV2 cannot check +// this invariant. +// +// This is ABIInternal because Go code injects its PC directly into new +// goroutine stacks. +TEXT runtime·debugCallV2(SB),NOSPLIT|NOFRAME,$0-0 + STP (R29, R30), -280(RSP) + SUB $272, RSP, RSP + SUB $8, RSP, R29 + // Save all registers that may contain pointers so they can be + // conservatively scanned. + // + // We can't do anything that might clobber any of these + // registers before this. + STP (R27, g), (30*8)(RSP) + STP (R25, R26), (28*8)(RSP) + STP (R23, R24), (26*8)(RSP) + STP (R21, R22), (24*8)(RSP) + STP (R19, R20), (22*8)(RSP) + STP (R16, R17), (20*8)(RSP) + STP (R14, R15), (18*8)(RSP) + STP (R12, R13), (16*8)(RSP) + STP (R10, R11), (14*8)(RSP) + STP (R8, R9), (12*8)(RSP) + STP (R6, R7), (10*8)(RSP) + STP (R4, R5), (8*8)(RSP) + STP (R2, R3), (6*8)(RSP) + STP (R0, R1), (4*8)(RSP) + + // Perform a safe-point check. + MOVD R30, 8(RSP) // Caller's PC + CALL runtime·debugCallCheck(SB) + MOVD 16(RSP), R0 + CBZ R0, good + + // The safety check failed. Put the reason string at the top + // of the stack. + MOVD R0, 8(RSP) + MOVD 24(RSP), R0 + MOVD R0, 16(RSP) + + // Set R20 to 8 and invoke BRK. The debugger should get the + // reason a call can't be injected from SP+8 and resume execution. + MOVD $8, R20 + BREAK + JMP restore + +good: + // Registers are saved and it's safe to make a call. + // Open up a call frame, moving the stack if necessary. + // + // Once the frame is allocated, this will set R20 to 0 and + // invoke BRK. The debugger should write the argument + // frame for the call at SP+8, set up argument registers, + // set the LR as the signal PC + 4, set the PC to the function + // to call, set R26 to point to the closure (if a closure call), + // and resume execution. + // + // If the function returns, this will set R20 to 1 and invoke + // BRK. The debugger can then inspect any return value saved + // on the stack at SP+8 and in registers. To resume execution, + // the debugger should restore the LR from (SP). + // + // If the function panics, this will set R20 to 2 and invoke BRK. + // The interface{} value of the panic will be at SP+8. The debugger + // can inspect the panic value and resume execution again. +#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \ + CMP $MAXSIZE, R0; \ + BGT 5(PC); \ + MOVD $NAME(SB), R0; \ + MOVD R0, 8(RSP); \ + CALL runtime·debugCallWrap(SB); \ + JMP restore + + MOVD 256(RSP), R0 // the argument frame size + DEBUG_CALL_DISPATCH(debugCall32<>, 32) + DEBUG_CALL_DISPATCH(debugCall64<>, 64) + DEBUG_CALL_DISPATCH(debugCall128<>, 128) + DEBUG_CALL_DISPATCH(debugCall256<>, 256) + DEBUG_CALL_DISPATCH(debugCall512<>, 512) + DEBUG_CALL_DISPATCH(debugCall1024<>, 1024) + DEBUG_CALL_DISPATCH(debugCall2048<>, 2048) + DEBUG_CALL_DISPATCH(debugCall4096<>, 4096) + DEBUG_CALL_DISPATCH(debugCall8192<>, 8192) + DEBUG_CALL_DISPATCH(debugCall16384<>, 16384) + DEBUG_CALL_DISPATCH(debugCall32768<>, 32768) + DEBUG_CALL_DISPATCH(debugCall65536<>, 65536) + // The frame size is too large. Report the error. + MOVD $debugCallFrameTooLarge<>(SB), R0 + MOVD R0, 8(RSP) + MOVD $20, R0 + MOVD R0, 16(RSP) // length of debugCallFrameTooLarge string + MOVD $8, R20 + BREAK + JMP restore + +restore: + // Calls and failures resume here. + // + // Set R20 to 16 and invoke BRK. The debugger should restore + // all registers except for PC and RSP and resume execution. + MOVD $16, R20 + BREAK + // We must not modify flags after this point. + + // Restore pointer-containing registers, which may have been + // modified from the debugger's copy by stack copying. + LDP (30*8)(RSP), (R27, g) + LDP (28*8)(RSP), (R25, R26) + LDP (26*8)(RSP), (R23, R24) + LDP (24*8)(RSP), (R21, R22) + LDP (22*8)(RSP), (R19, R20) + LDP (20*8)(RSP), (R16, R17) + LDP (18*8)(RSP), (R14, R15) + LDP (16*8)(RSP), (R12, R13) + LDP (14*8)(RSP), (R10, R11) + LDP (12*8)(RSP), (R8, R9) + LDP (10*8)(RSP), (R6, R7) + LDP (8*8)(RSP), (R4, R5) + LDP (6*8)(RSP), (R2, R3) + LDP (4*8)(RSP), (R0, R1) + + LDP -8(RSP), (R29, R27) + ADD $288, RSP, RSP // Add 16 more bytes, see saveSigContext + MOVD -16(RSP), R30 // restore old lr + JMP (R27) + +// runtime.debugCallCheck assumes that functions defined with the +// DEBUG_CALL_FN macro are safe points to inject calls. +#define DEBUG_CALL_FN(NAME,MAXSIZE) \ +TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \ + NO_LOCAL_POINTERS; \ + MOVD $0, R20; \ + BREAK; \ + MOVD $1, R20; \ + BREAK; \ + RET +DEBUG_CALL_FN(debugCall32<>, 32) +DEBUG_CALL_FN(debugCall64<>, 64) +DEBUG_CALL_FN(debugCall128<>, 128) +DEBUG_CALL_FN(debugCall256<>, 256) +DEBUG_CALL_FN(debugCall512<>, 512) +DEBUG_CALL_FN(debugCall1024<>, 1024) +DEBUG_CALL_FN(debugCall2048<>, 2048) +DEBUG_CALL_FN(debugCall4096<>, 4096) +DEBUG_CALL_FN(debugCall8192<>, 8192) +DEBUG_CALL_FN(debugCall16384<>, 16384) +DEBUG_CALL_FN(debugCall32768<>, 32768) +DEBUG_CALL_FN(debugCall65536<>, 65536) + +// func debugCallPanicked(val interface{}) +TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 + // Copy the panic value to the top of stack at SP+8. + MOVD val_type+0(FP), R0 + MOVD R0, 8(RSP) + MOVD val_data+8(FP), R0 + MOVD R0, 16(RSP) + MOVD $2, R20 + BREAK + RET + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +// +// Defined as ABIInternal since the compiler generates ABIInternal +// calls to it directly and it does not use the stack-based Go ABI. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOVD R2, R0 + MOVD R3, R1 + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOVD R2, R0 + MOVD R3, R1 + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOVD R2, R0 + MOVD R3, R1 + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOVD R2, R0 + MOVD R3, R1 + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOVD R1, R0 + MOVD R2, R1 + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOVD R2, R0 + MOVD R3, R1 + JMP runtime·goPanicSliceConvert(SB) + +TEXT ·getfp(SB),NOSPLIT|NOFRAME,$0 + MOVD R29, R0 + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_loong64.s b/platform/dbops/binaries/go/go/src/runtime/asm_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..586bd23ed4781e7cad03a557cf9d144f81631b2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_loong64.s @@ -0,0 +1,1032 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +#define REGCTXT R29 + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R3 = stack; R4 = argc; R5 = argv + + ADDV $-24, R3 + MOVW R4, 8(R3) // argc + MOVV R5, 16(R3) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVV $runtime·g0(SB), g + MOVV $(-64*1024), R30 + ADDV R30, R3, R19 + MOVV R19, g_stackguard0(g) + MOVV R19, g_stackguard1(g) + MOVV R19, (g_stack+stack_lo)(g) + MOVV R3, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOVV _cgo_init(SB), R25 + BEQ R25, nocgo + + MOVV R0, R7 // arg 3: not used + MOVV R0, R6 // arg 2: not used + MOVV $setg_gcc<>(SB), R5 // arg 1: setg + MOVV g, R4 // arg 0: G + JAL (R25) + +nocgo: + // update stackguard after _cgo_init + MOVV (g_stack+stack_lo)(g), R19 + ADDV $const_stackGuard, R19 + MOVV R19, g_stackguard0(g) + MOVV R19, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVV $runtime·m0(SB), R19 + + // save m->g0 = g0 + MOVV g, m_g0(R19) + // save m0 to g0->m + MOVV R19, g_m(g) + + JAL runtime·check(SB) + + // args are already prepared + JAL runtime·args(SB) + JAL runtime·osinit(SB) + JAL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVV $runtime·mainPC(SB), R19 // entry + ADDV $-16, R3 + MOVV R19, 8(R3) + MOVV R0, 0(R3) + JAL runtime·newproc(SB) + ADDV $16, R3 + + // start this M + JAL runtime·mstart(SB) + + MOVV R0, 1(R0) + RET + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + BREAK + RET + +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + JAL runtime·mstart0(SB) + RET // not reached + +// func cputicks() int64 +TEXT runtime·cputicks(SB),NOSPLIT,$0-8 + RDTIMED R0, R4 + MOVV R4, ret+0(FP) + RET + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVV buf+0(FP), R4 + MOVV gobuf_g(R4), R5 + MOVV 0(R5), R0 // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOVV R5, g + JAL runtime·save_g(SB) + + MOVV gobuf_sp(R4), R3 + MOVV gobuf_lr(R4), R1 + MOVV gobuf_ret(R4), R19 + MOVV gobuf_ctxt(R4), REGCTXT + MOVV R0, gobuf_sp(R4) + MOVV R0, gobuf_ret(R4) + MOVV R0, gobuf_lr(R4) + MOVV R0, gobuf_ctxt(R4) + MOVV gobuf_pc(R4), R6 + JMP (R6) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 +#ifdef GOEXPERIMENT_regabiargs + MOVV R4, REGCTXT +#else + MOVV fn+0(FP), REGCTXT +#endif + + // Save caller state in g->sched + MOVV R3, (g_sched+gobuf_sp)(g) + MOVV R1, (g_sched+gobuf_pc)(g) + MOVV R0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVV g, R4 // arg = g + MOVV g_m(g), R20 + MOVV m_g0(R20), g + JAL runtime·save_g(SB) + BNE g, R4, 2(PC) + JMP runtime·badmcall(SB) + MOVV 0(REGCTXT), R20 // code pointer + MOVV (g_sched+gobuf_sp)(g), R3 // sp = m->g0->sched.sp + ADDV $-16, R3 + MOVV R4, 8(R3) + MOVV R0, 0(R3) + JAL (R20) + JMP runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + JAL (R1) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVV fn+0(FP), R19 // R19 = fn + MOVV R19, REGCTXT // context + MOVV g_m(g), R4 // R4 = m + + MOVV m_gsignal(R4), R5 // R5 = gsignal + BEQ g, R5, noswitch + + MOVV m_g0(R4), R5 // R5 = g0 + BEQ g, R5, noswitch + + MOVV m_curg(R4), R6 + BEQ g, R6, switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVV $runtime·badsystemstack(SB), R7 + JAL (R7) + JAL runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + JAL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVV R5, g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R19 + MOVV R19, R3 + + // call target function + MOVV 0(REGCTXT), R6 // code pointer + JAL (R6) + + // switch back to g + MOVV g_m(g), R4 + MOVV m_curg(R4), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R3 + MOVV R0, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVV 0(REGCTXT), R4 // code pointer + MOVV 0(R3), R1 // restore LR + ADDV $8, R3 + JMP (R4) + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// loong64: R31: LR +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + MOVV g_m(g), R7 + MOVV m_g0(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackg0(SB) + JAL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVV m_gsignal(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackgsignal(SB) + JAL runtime·abort(SB) + + // Called from f. + // Set g->sched to context in f. + MOVV R3, (g_sched+gobuf_sp)(g) + MOVV R1, (g_sched+gobuf_pc)(g) + MOVV R31, (g_sched+gobuf_lr)(g) + MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) + + // Called from f. + // Set m->morebuf to f's caller. + MOVV R31, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVV R3, (m_morebuf+gobuf_sp)(R7) // f's caller's SP + MOVV g, (m_morebuf+gobuf_g)(R7) + + // Call newstack on m->g0's stack. + MOVV m_g0(R7), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R3 + // Create a stack frame on g0 to call newstack. + MOVV R0, -8(R3) // Zero saved LR in frame + ADDV $-8, R3 + JAL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R5), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVV R3, R3 + + MOVV R0, REGCTXT + JMP runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOVV $MAXSIZE, R30; \ + SGTU R19, R30, R30; \ + BNE R30, 3(PC); \ + MOVV $NAME(SB), R4; \ + JMP (R4) +// Note: can't just "BR NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), R19 + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVV $runtime·badreflectcall(SB), R4 + JMP (R4) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVV arg+16(FP), R4; \ + MOVWU argsize+24(FP), R5; \ + MOVV R3, R12; \ + ADDV $8, R12; \ + ADDV R12, R5; \ + BEQ R12, R5, 6(PC); \ + MOVBU (R4), R6; \ + ADDV $1, R4; \ + MOVBU R6, (R12); \ + ADDV $1, R12; \ + JMP -5(PC); \ + /* set up argument registers */ \ + MOVV regArgs+40(FP), R25; \ + JAL ·unspillArgs(SB); \ + /* call function */ \ + MOVV f+8(FP), REGCTXT; \ + MOVV (REGCTXT), R25; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + JAL (R25); \ + /* copy return values back */ \ + MOVV regArgs+40(FP), R25; \ + JAL ·spillArgs(SB); \ + MOVV argtype+0(FP), R7; \ + MOVV arg+16(FP), R4; \ + MOVWU n+24(FP), R5; \ + MOVWU retoffset+28(FP), R6; \ + ADDV $8, R3, R12; \ + ADDV R6, R12; \ + ADDV R6, R4; \ + SUBVU R6, R5; \ + JAL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS + MOVV R7, 8(R3) + MOVV R4, 16(R3) + MOVV R12, 24(R3) + MOVV R5, 32(R3) + MOVV R25, 40(R3) + JAL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + RET + +// Save state of caller into g->sched. +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R19. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVV $runtime·systemstack_switch(SB), R19 + ADDV $8, R19 + MOVV R19, (g_sched+gobuf_pc)(g) + MOVV R3, (g_sched+gobuf_sp)(g) + MOVV R0, (g_sched+gobuf_lr)(g) + MOVV R0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVV (g_sched+gobuf_ctxt)(g), R19 + BEQ R19, 2(PC) + JAL runtime·abort(SB) + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOVV fn+0(FP), R25 + MOVV arg+8(FP), R4 + + MOVV R3, R12 // save original stack pointer + MOVV g, R13 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. + MOVV g_m(g), R5 + MOVV m_gsignal(R5), R6 + BEQ R6, g, g0 + MOVV m_g0(R5), R6 + BEQ R6, g, g0 + + JAL gosave_systemstack_switch<>(SB) + MOVV R6, g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R3 + + // Now on a scheduling stack (a pthread-created stack). +g0: + // Save room for two of our pointers. + ADDV $-16, R3 + MOVV R13, 0(R3) // save old g on stack + MOVV (g_stack+stack_hi)(R13), R13 + SUBVU R12, R13 + MOVV R13, 8(R3) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + JAL (R25) + + // Restore g, stack pointer. R4 is return value. + MOVV 0(R3), g + JAL runtime·save_g(SB) + MOVV (g_stack+stack_hi)(g), R5 + MOVV 8(R3), R6 + SUBVU R6, R5 + MOVV R5, R3 + + MOVW R4, ret+16(FP) + RET + +// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVV fn+0(FP), R5 + BNE R5, loadg + // Restore the g from frame. + MOVV frame+8(FP), g + JMP dropm + +loadg: + // Load m and g from thread-local storage. + MOVB runtime·iscgo(SB), R19 + BEQ R19, nocgo + JAL runtime·load_g(SB) +nocgo: + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + BEQ g, needm + + MOVV g_m(g), R12 + MOVV R12, savedm-8(SP) + JMP havem + +needm: + MOVV g, savedm-8(SP) // g is zero, so is m. + MOVV $runtime·needAndBindM(SB), R4 + JAL (R4) + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVV g_m(g), R12 + MOVV m_g0(R12), R19 + MOVV R3, (g_sched+gobuf_sp)(R19) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 8(R29) aka savedsp-16(SP). + MOVV m_g0(R12), R19 + MOVV (g_sched+gobuf_sp)(R19), R13 + MOVV R13, savedsp-24(SP) // must match frame size + MOVV R3, (g_sched+gobuf_sp)(R19) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the stack. + // This has the added benefit that it looks to the traceback + // routine like cgocallbackg is going to return to that + // PC (because the frame we allocate below has the same + // size as cgocallback_gofunc's frame declared above) + // so that the traceback will seamlessly trace back into + // the earlier calls. + MOVV m_curg(R12), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R13 // prepare stack as R13 + MOVV (g_sched+gobuf_pc)(g), R4 + MOVV R4, -(24+8)(R13) // "saved LR"; must match frame size + MOVV fn+0(FP), R5 + MOVV frame+8(FP), R6 + MOVV ctxt+16(FP), R7 + MOVV $-(24+8)(R13), R3 + MOVV R5, 8(R3) + MOVV R6, 16(R3) + MOVV R7, 24(R3) + JAL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOVV 0(R3), R4 + MOVV R4, (g_sched+gobuf_pc)(g) + MOVV $(24+8)(R3), R13 // must match frame size + MOVV R13, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVV g_m(g), R12 + MOVV m_g0(R12), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R3 + MOVV savedsp-24(SP), R13 // must match frame size + MOVV R13, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to resue the m in the first call. + MOVV savedm-8(SP), R12 + BNE R12, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVV _cgo_pthread_key_created(SB), R12 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + BEQ R12, dropm + MOVV (R12), R12 + BNE R12, droppedm + +dropm: + MOVV $runtime·dropm(SB), R4 + JAL (R4) +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVV gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + JAL runtime·save_g(SB) + RET + +// void setg_gcc(G*); set g called from gcc with g in R19 +TEXT setg_gcc<>(SB),NOSPLIT,$0-0 + MOVV R19, g + JAL runtime·save_g(SB) + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVW (R0), R0 + UNDEF + +// AES hashing not implemented for loong64 +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVW $0, R19 + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$16 + // g (R22) and REGTMP (R30) might be clobbered by load_g. They + // are callee-save in the gcc calling convention, so save them. + MOVV R30, savedREGTMP-16(SP) + MOVV g, savedG-8(SP) + + JAL runtime·load_g(SB) + MOVV g_m(g), R19 + MOVV m_curg(R19), R19 + MOVV (g_stack+stack_hi)(R19), R4 // return value in R4 + + MOVV savedG-8(SP), g + MOVV savedREGTMP-16(SP), R30 + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + NOOP + JAL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + NOOP + +// This is called from .init_array and follows the platform, not Go, ABI. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + ADDV $-0x10, R3 + MOVV R30, 8(R3) // The access to global variables below implicitly uses R30, which is callee-save + MOVV runtime·lastmoduledatap(SB), R12 + MOVV R4, moduledata_next(R12) + MOVV R4, runtime·lastmoduledatap(SB) + MOVV 8(R3), R30 + ADDV $0x10, R3 + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R19 + MOVB R19, ret+0(FP) + RET + +#ifdef GOEXPERIMENT_regabiargs +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R25. +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + MOVV R4, (0*8)(R25) + MOVV R5, (1*8)(R25) + MOVV R6, (2*8)(R25) + MOVV R7, (3*8)(R25) + MOVV R8, (4*8)(R25) + MOVV R9, (5*8)(R25) + MOVV R10, (6*8)(R25) + MOVV R11, (7*8)(R25) + MOVV R12, (8*8)(R25) + MOVV R13, (9*8)(R25) + MOVV R14, (10*8)(R25) + MOVV R15, (11*8)(R25) + MOVV R16, (12*8)(R25) + MOVV R17, (13*8)(R25) + MOVV R18, (14*8)(R25) + MOVV R19, (15*8)(R25) + MOVD F0, (16*8)(R25) + MOVD F1, (17*8)(R25) + MOVD F2, (18*8)(R25) + MOVD F3, (19*8)(R25) + MOVD F4, (20*8)(R25) + MOVD F5, (21*8)(R25) + MOVD F6, (22*8)(R25) + MOVD F7, (23*8)(R25) + MOVD F8, (24*8)(R25) + MOVD F9, (25*8)(R25) + MOVD F10, (26*8)(R25) + MOVD F11, (27*8)(R25) + MOVD F12, (28*8)(R25) + MOVD F13, (29*8)(R25) + MOVD F14, (30*8)(R25) + MOVD F15, (31*8)(R25) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R25. +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + MOVV (0*8)(R25), R4 + MOVV (1*8)(R25), R5 + MOVV (2*8)(R25), R6 + MOVV (3*8)(R25), R7 + MOVV (4*8)(R25), R8 + MOVV (5*8)(R25), R9 + MOVV (6*8)(R25), R10 + MOVV (7*8)(R25), R11 + MOVV (8*8)(R25), R12 + MOVV (9*8)(R25), R13 + MOVV (10*8)(R25), R14 + MOVV (11*8)(R25), R15 + MOVV (12*8)(R25), R16 + MOVV (13*8)(R25), R17 + MOVV (14*8)(R25), R18 + MOVV (15*8)(R25), R19 + MOVD (16*8)(R25), F0 + MOVD (17*8)(R25), F1 + MOVD (18*8)(R25), F2 + MOVD (19*8)(R25), F3 + MOVD (20*8)(R25), F4 + MOVD (21*8)(R25), F5 + MOVD (22*8)(R25), F6 + MOVD (23*8)(R25), F7 + MOVD (24*8)(R25), F8 + MOVD (25*8)(R25), F9 + MOVD (26*8)(R25), F10 + MOVD (27*8)(R25), F11 + MOVD (28*8)(R25), F12 + MOVD (29*8)(R25), F13 + MOVD (30*8)(R25), F14 + MOVD (31*8)(R25), F15 + RET +#else +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + RET + +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + RET +#endif + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R29, and returns a pointer +// to the buffer space in R29. +// It clobbers R30 (the linker temp register). +// The act of CALLing gcWriteBarrier will clobber R1 (LR). +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$216 + // Save the registers clobbered by the fast path. + MOVV R19, 208(R3) + MOVV R13, 216(R3) +retry: + MOVV g_m(g), R19 + MOVV m_p(R19), R19 + MOVV (p_wbBuf+wbBuf_next)(R19), R13 + MOVV (p_wbBuf+wbBuf_end)(R19), R30 // R30 is linker temp register + // Increment wbBuf.next position. + ADDV R29, R13 + // Is the buffer full? + BLTU R30, R13, flush + // Commit to the larger buffer. + MOVV R13, (p_wbBuf+wbBuf_next)(R19) + // Make return value (the original next position) + SUBV R29, R13, R29 + // Restore registers. + MOVV 208(R3), R19 + MOVV 216(R3), R13 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + MOVV R27, 8(R3) + MOVV R28, 16(R3) + // R1 is LR, which was saved by the prologue. + MOVV R2, 24(R3) + // R3 is SP. + MOVV R4, 32(R3) + MOVV R5, 40(R3) + MOVV R6, 48(R3) + MOVV R7, 56(R3) + MOVV R8, 64(R3) + MOVV R9, 72(R3) + MOVV R10, 80(R3) + MOVV R11, 88(R3) + MOVV R12, 96(R3) + // R13 already saved + MOVV R14, 104(R3) + MOVV R15, 112(R3) + MOVV R16, 120(R3) + MOVV R17, 128(R3) + MOVV R18, 136(R3) + // R19 already saved + MOVV R20, 144(R3) + MOVV R21, 152(R3) + // R22 is g. + MOVV R23, 160(R3) + MOVV R24, 168(R3) + MOVV R25, 176(R3) + MOVV R26, 184(R3) + // R27 already saved + // R28 already saved. + MOVV R29, 192(R3) + // R30 is tmp register. + MOVV R31, 200(R3) + + CALL runtime·wbBufFlush(SB) + + MOVV 8(R3), R27 + MOVV 16(R3), R28 + MOVV 24(R3), R2 + MOVV 32(R3), R4 + MOVV 40(R3), R5 + MOVV 48(R3), R6 + MOVV 56(R3), R7 + MOVV 64(R3), R8 + MOVV 72(R3), R9 + MOVV 80(R3), R10 + MOVV 88(R3), R11 + MOVV 96(R3), R12 + MOVV 104(R3), R14 + MOVV 112(R3), R15 + MOVV 120(R3), R16 + MOVV 128(R3), R17 + MOVV 136(R3), R18 + MOVV 144(R3), R20 + MOVV 152(R3), R21 + MOVV 160(R3), R23 + MOVV 168(R3), R24 + MOVV 176(R3), R25 + MOVV 184(R3), R26 + MOVV 192(R3), R29 + MOVV 200(R3), R31 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVV $8, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVV $16, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVV $24, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVV $32, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVV $40, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVV $48, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVV $56, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVV $64, R29 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSliceConvert(SB) diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_mips64x.s b/platform/dbops/binaries/go/go/src/runtime/asm_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..80cd87c4af335f6760165e7c9edf9a895df2bf06 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_mips64x.s @@ -0,0 +1,873 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +#define REGCTXT R22 + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R29 = stack; R4 = argc; R5 = argv + + ADDV $-24, R29 + MOVW R4, 8(R29) // argc + MOVV R5, 16(R29) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVV $runtime·g0(SB), g + MOVV $(-64*1024), R23 + ADDV R23, R29, R1 + MOVV R1, g_stackguard0(g) + MOVV R1, g_stackguard1(g) + MOVV R1, (g_stack+stack_lo)(g) + MOVV R29, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOVV _cgo_init(SB), R25 + BEQ R25, nocgo + + MOVV R0, R7 // arg 3: not used + MOVV R0, R6 // arg 2: not used + MOVV $setg_gcc<>(SB), R5 // arg 1: setg + MOVV g, R4 // arg 0: G + JAL (R25) + +nocgo: + // update stackguard after _cgo_init + MOVV (g_stack+stack_lo)(g), R1 + ADDV $const_stackGuard, R1 + MOVV R1, g_stackguard0(g) + MOVV R1, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVV $runtime·m0(SB), R1 + + // save m->g0 = g0 + MOVV g, m_g0(R1) + // save m0 to g0->m + MOVV R1, g_m(g) + + JAL runtime·check(SB) + + // args are already prepared + JAL runtime·args(SB) + JAL runtime·osinit(SB) + JAL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVV $runtime·mainPC(SB), R1 // entry + ADDV $-16, R29 + MOVV R1, 8(R29) + MOVV R0, 0(R29) + JAL runtime·newproc(SB) + ADDV $16, R29 + + // start this M + JAL runtime·mstart(SB) + + MOVV R0, 1(R0) + RET + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + MOVV R0, 2(R0) // TODO: TD + RET + +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + JAL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVV buf+0(FP), R3 + MOVV gobuf_g(R3), R4 + MOVV 0(R4), R0 // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOVV R4, g + JAL runtime·save_g(SB) + + MOVV 0(g), R2 + MOVV gobuf_sp(R3), R29 + MOVV gobuf_lr(R3), R31 + MOVV gobuf_ret(R3), R1 + MOVV gobuf_ctxt(R3), REGCTXT + MOVV R0, gobuf_sp(R3) + MOVV R0, gobuf_ret(R3) + MOVV R0, gobuf_lr(R3) + MOVV R0, gobuf_ctxt(R3) + MOVV gobuf_pc(R3), R4 + JMP (R4) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 + // Save caller state in g->sched + MOVV R29, (g_sched+gobuf_sp)(g) + MOVV R31, (g_sched+gobuf_pc)(g) + MOVV R0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVV g, R1 + MOVV g_m(g), R3 + MOVV m_g0(R3), g + JAL runtime·save_g(SB) + BNE g, R1, 2(PC) + JMP runtime·badmcall(SB) + MOVV fn+0(FP), REGCTXT // context + MOVV 0(REGCTXT), R4 // code pointer + MOVV (g_sched+gobuf_sp)(g), R29 // sp = m->g0->sched.sp + ADDV $-16, R29 + MOVV R1, 8(R29) + MOVV R0, 0(R29) + JAL (R4) + JMP runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + JAL (R31) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVV fn+0(FP), R1 // R1 = fn + MOVV R1, REGCTXT // context + MOVV g_m(g), R2 // R2 = m + + MOVV m_gsignal(R2), R3 // R3 = gsignal + BEQ g, R3, noswitch + + MOVV m_g0(R2), R3 // R3 = g0 + BEQ g, R3, noswitch + + MOVV m_curg(R2), R4 + BEQ g, R4, switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVV $runtime·badsystemstack(SB), R4 + JAL (R4) + JAL runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + JAL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVV R3, g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R1 + MOVV R1, R29 + + // call target function + MOVV 0(REGCTXT), R4 // code pointer + JAL (R4) + + // switch back to g + MOVV g_m(g), R1 + MOVV m_curg(R1), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R29 + MOVV R0, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVV 0(REGCTXT), R4 // code pointer + MOVV 0(R29), R31 // restore LR + ADDV $8, R29 + JMP (R4) + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVV fn+0(FP), REGCTXT // context register + MOVV g_m(g), R2 // curm + + // set g to gcrash + MOVV $runtime·gcrash(SB), g // g = &gcrash + CALL runtime·save_g(SB) + MOVV R2, g_m(g) // g.m = curm + MOVV g, m_g0(R2) // curm.g0 = g + + // switch to crashstack + MOVV (g_stack+stack_hi)(g), R2 + ADDV $(-4*8), R2, R29 + + // call target function + MOVV 0(REGCTXT), R25 + JAL (R25) + + // should never return + CALL runtime·abort(SB) + UNDEF + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// R1: framesize, R2: argsize, R3: LR +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOVV R29, (g_sched+gobuf_sp)(g) + MOVV R31, (g_sched+gobuf_pc)(g) + MOVV R3, (g_sched+gobuf_lr)(g) + MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) + + // Cannot grow scheduler stack (m->g0). + MOVV g_m(g), R7 + MOVV m_g0(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackg0(SB) + JAL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVV m_gsignal(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackgsignal(SB) + JAL runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's caller. + MOVV R3, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVV R29, (m_morebuf+gobuf_sp)(R7) // f's caller's SP + MOVV g, (m_morebuf+gobuf_g)(R7) + + // Call newstack on m->g0's stack. + MOVV m_g0(R7), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R29 + // Create a stack frame on g0 to call newstack. + MOVV R0, -8(R29) // Zero saved LR in frame + ADDV $-8, R29 + JAL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R3), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVV R29, R29 + + MOVV R0, REGCTXT + JMP runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOVV $MAXSIZE, R23; \ + SGTU R1, R23, R23; \ + BNE R23, 3(PC); \ + MOVV $NAME(SB), R4; \ + JMP (R4) +// Note: can't just "BR NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), R1 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVV $runtime·badreflectcall(SB), R4 + JMP (R4) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVV stackArgs+16(FP), R1; \ + MOVWU stackArgsSize+24(FP), R2; \ + MOVV R29, R3; \ + ADDV $8, R3; \ + ADDV R3, R2; \ + BEQ R3, R2, 6(PC); \ + MOVBU (R1), R4; \ + ADDV $1, R1; \ + MOVBU R4, (R3); \ + ADDV $1, R3; \ + JMP -5(PC); \ + /* call function */ \ + MOVV f+8(FP), REGCTXT; \ + MOVV (REGCTXT), R4; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + JAL (R4); \ + /* copy return values back */ \ + MOVV stackArgsType+0(FP), R5; \ + MOVV stackArgs+16(FP), R1; \ + MOVWU stackArgsSize+24(FP), R2; \ + MOVWU stackRetOffset+28(FP), R4; \ + ADDV $8, R29, R3; \ + ADDV R4, R3; \ + ADDV R4, R1; \ + SUBVU R4, R2; \ + JAL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + MOVV R5, 8(R29) + MOVV R1, 16(R29) + MOVV R3, 24(R29) + MOVV R2, 32(R29) + MOVV $0, 40(R29) + JAL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R1. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVV $runtime·systemstack_switch(SB), R1 + ADDV $8, R1 // get past prologue + MOVV R1, (g_sched+gobuf_pc)(g) + MOVV R29, (g_sched+gobuf_sp)(g) + MOVV R0, (g_sched+gobuf_lr)(g) + MOVV R0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVV (g_sched+gobuf_ctxt)(g), R1 + BEQ R1, 2(PC) + JAL runtime·abort(SB) + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 + MOVV fn+0(FP), R25 + MOVV arg+8(FP), R4 + JAL (R25) + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOVV fn+0(FP), R25 + MOVV arg+8(FP), R4 + + MOVV R29, R3 // save original stack pointer + MOVV g, R2 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVV g_m(g), R5 + MOVV m_gsignal(R5), R6 + BEQ R6, g, g0 + MOVV m_g0(R5), R6 + BEQ R6, g, g0 + + JAL gosave_systemstack_switch<>(SB) + MOVV R6, g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R29 + + // Now on a scheduling stack (a pthread-created stack). +g0: + // Save room for two of our pointers. + ADDV $-16, R29 + MOVV R2, 0(R29) // save old g on stack + MOVV (g_stack+stack_hi)(R2), R2 + SUBVU R3, R2 + MOVV R2, 8(R29) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + JAL (R25) + + // Restore g, stack pointer. R2 is return value. + MOVV 0(R29), g + JAL runtime·save_g(SB) + MOVV (g_stack+stack_hi)(g), R5 + MOVV 8(R29), R6 + SUBVU R6, R5 + MOVV R5, R29 + + MOVW R2, ret+16(FP) + RET + +// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVV fn+0(FP), R5 + BNE R5, loadg + // Restore the g from frame. + MOVV frame+8(FP), g + JMP dropm + +loadg: + // Load m and g from thread-local storage. + MOVB runtime·iscgo(SB), R1 + BEQ R1, nocgo + JAL runtime·load_g(SB) +nocgo: + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + BEQ g, needm + + MOVV g_m(g), R3 + MOVV R3, savedm-8(SP) + JMP havem + +needm: + MOVV g, savedm-8(SP) // g is zero, so is m. + MOVV $runtime·needAndBindM(SB), R4 + JAL (R4) + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVV g_m(g), R3 + MOVV m_g0(R3), R1 + MOVV R29, (g_sched+gobuf_sp)(R1) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 8(R29) aka savedsp-16(SP). + MOVV m_g0(R3), R1 + MOVV (g_sched+gobuf_sp)(R1), R2 + MOVV R2, savedsp-24(SP) // must match frame size + MOVV R29, (g_sched+gobuf_sp)(R1) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVV m_curg(R3), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R2 // prepare stack as R2 + MOVV (g_sched+gobuf_pc)(g), R4 + MOVV R4, -(24+8)(R2) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOVV fn+0(FP), R5 + MOVV frame+8(FP), R6 + MOVV ctxt+16(FP), R7 + MOVV $-(24+8)(R2), R29 // switch stack; must match frame size + MOVV R5, 8(R29) + MOVV R6, 16(R29) + MOVV R7, 24(R29) + JAL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOVV 0(R29), R4 + MOVV R4, (g_sched+gobuf_pc)(g) + MOVV $(24+8)(R29), R2 // must match frame size + MOVV R2, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVV g_m(g), R3 + MOVV m_g0(R3), g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R29 + MOVV savedsp-24(SP), R2 // must match frame size + MOVV R2, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVV savedm-8(SP), R3 + BNE R3, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVV _cgo_pthread_key_created(SB), R3 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + BEQ R3, dropm + MOVV (R3), R3 + BNE R3, droppedm + +dropm: + MOVV $runtime·dropm(SB), R4 + JAL (R4) +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVV gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + JAL runtime·save_g(SB) + RET + +// void setg_gcc(G*); set g called from gcc with g in R1 +TEXT setg_gcc<>(SB),NOSPLIT,$0-0 + MOVV R1, g + JAL runtime·save_g(SB) + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVW (R0), R0 + UNDEF + +// AES hashing not implemented for mips64 +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVW $0, R1 + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$16 + // g (R30) and REGTMP (R23) might be clobbered by load_g. They + // are callee-save in the gcc calling convention, so save them. + MOVV R23, savedR23-16(SP) + MOVV g, savedG-8(SP) + + JAL runtime·load_g(SB) + MOVV g_m(g), R1 + MOVV m_curg(R1), R1 + MOVV (g_stack+stack_hi)(R1), R2 // return value in R2 + + MOVV savedG-8(SP), g + MOVV savedR23-16(SP), R23 + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + NOR R0, R0 // NOP + JAL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + NOR R0, R0 // NOP + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R1 + MOVB R1, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R25, and returns a pointer +// to the buffer space in R25. +// It clobbers R23 (the linker temp register). +// The act of CALLing gcWriteBarrier will clobber R31 (LR). +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$192 + // Save the registers clobbered by the fast path. + MOVV R1, 184(R29) + MOVV R2, 192(R29) +retry: + MOVV g_m(g), R1 + MOVV m_p(R1), R1 + MOVV (p_wbBuf+wbBuf_next)(R1), R2 + MOVV (p_wbBuf+wbBuf_end)(R1), R23 // R23 is linker temp register + // Increment wbBuf.next position. + ADDV R25, R2 + // Is the buffer full? + SGTU R2, R23, R23 + BNE R23, flush + // Commit to the larger buffer. + MOVV R2, (p_wbBuf+wbBuf_next)(R1) + // Make return value (the original next position) + SUBV R25, R2, R25 + // Restore registers. + MOVV 184(R29), R1 + MOVV 192(R29), R2 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + MOVV R20, 8(R29) + MOVV R21, 16(R29) + // R1 already saved + // R2 already saved + MOVV R3, 24(R29) + MOVV R4, 32(R29) + MOVV R5, 40(R29) + MOVV R6, 48(R29) + MOVV R7, 56(R29) + MOVV R8, 64(R29) + MOVV R9, 72(R29) + MOVV R10, 80(R29) + MOVV R11, 88(R29) + MOVV R12, 96(R29) + MOVV R13, 104(R29) + MOVV R14, 112(R29) + MOVV R15, 120(R29) + MOVV R16, 128(R29) + MOVV R17, 136(R29) + MOVV R18, 144(R29) + MOVV R19, 152(R29) + // R20 already saved + // R21 already saved. + MOVV R22, 160(R29) + // R23 is tmp register. + MOVV R24, 168(R29) + MOVV R25, 176(R29) + // R26 is reserved by kernel. + // R27 is reserved by kernel. + // R28 is REGSB (not modified by Go code). + // R29 is SP. + // R30 is g. + // R31 is LR, which was saved by the prologue. + + CALL runtime·wbBufFlush(SB) + + MOVV 8(R29), R20 + MOVV 16(R29), R21 + MOVV 24(R29), R3 + MOVV 32(R29), R4 + MOVV 40(R29), R5 + MOVV 48(R29), R6 + MOVV 56(R29), R7 + MOVV 64(R29), R8 + MOVV 72(R29), R9 + MOVV 80(R29), R10 + MOVV 88(R29), R11 + MOVV 96(R29), R12 + MOVV 104(R29), R13 + MOVV 112(R29), R14 + MOVV 120(R29), R15 + MOVV 128(R29), R16 + MOVV 136(R29), R17 + MOVV 144(R29), R18 + MOVV 152(R29), R19 + MOVV 160(R29), R22 + MOVV 168(R29), R24 + MOVV 176(R29), R25 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVV $8, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVV $16, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVV $24, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVV $32, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVV $40, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVV $48, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVV $56, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVV $64, R25 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOVV R3, x+0(FP) + MOVV R4, y+8(FP) + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOVV R3, x+0(FP) + MOVV R4, y+8(FP) + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOVV R3, x+0(FP) + MOVV R4, y+8(FP) + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOVV R3, x+0(FP) + MOVV R4, y+8(FP) + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOVV R2, x+0(FP) + MOVV R3, y+8(FP) + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + MOVV R1, x+0(FP) + MOVV R2, y+8(FP) + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOVV R3, x+0(FP) + MOVV R4, y+8(FP) + JMP runtime·goPanicSliceConvert(SB) diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_mipsx.s b/platform/dbops/binaries/go/go/src/runtime/asm_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..eed4a05b38b91a4b3416b9386babdacc746feae2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_mipsx.s @@ -0,0 +1,928 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +#define REGCTXT R22 + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R29 = stack; R4 = argc; R5 = argv + + ADDU $-12, R29 + MOVW R4, 4(R29) // argc + MOVW R5, 8(R29) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVW $runtime·g0(SB), g + MOVW $(-64*1024), R23 + ADD R23, R29, R1 + MOVW R1, g_stackguard0(g) + MOVW R1, g_stackguard1(g) + MOVW R1, (g_stack+stack_lo)(g) + MOVW R29, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOVW _cgo_init(SB), R25 + BEQ R25, nocgo + ADDU $-16, R29 + MOVW R0, R7 // arg 3: not used + MOVW R0, R6 // arg 2: not used + MOVW $setg_gcc<>(SB), R5 // arg 1: setg + MOVW g, R4 // arg 0: G + JAL (R25) + ADDU $16, R29 + +nocgo: + // update stackguard after _cgo_init + MOVW (g_stack+stack_lo)(g), R1 + ADD $const_stackGuard, R1 + MOVW R1, g_stackguard0(g) + MOVW R1, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVW $runtime·m0(SB), R1 + + // save m->g0 = g0 + MOVW g, m_g0(R1) + // save m0 to g0->m + MOVW R1, g_m(g) + + JAL runtime·check(SB) + + // args are already prepared + JAL runtime·args(SB) + JAL runtime·osinit(SB) + JAL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVW $runtime·mainPC(SB), R1 // entry + ADDU $-8, R29 + MOVW R1, 4(R29) + MOVW R0, 0(R29) + JAL runtime·newproc(SB) + ADDU $8, R29 + + // start this M + JAL runtime·mstart(SB) + + UNDEF + RET + +DATA runtime·mainPC+0(SB)/4,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$4 + +TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 + BREAK + RET + +TEXT runtime·asminit(SB),NOSPLIT,$0-0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + JAL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB),NOSPLIT|NOFRAME,$0-4 + MOVW buf+0(FP), R3 + MOVW gobuf_g(R3), R4 + MOVW 0(R4), R5 // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB),NOSPLIT|NOFRAME,$0 + MOVW R4, g + JAL runtime·save_g(SB) + MOVW gobuf_sp(R3), R29 + MOVW gobuf_lr(R3), R31 + MOVW gobuf_ret(R3), R1 + MOVW gobuf_ctxt(R3), REGCTXT + MOVW R0, gobuf_sp(R3) + MOVW R0, gobuf_ret(R3) + MOVW R0, gobuf_lr(R3) + MOVW R0, gobuf_ctxt(R3) + MOVW gobuf_pc(R3), R4 + JMP (R4) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB),NOSPLIT|NOFRAME,$0-4 + // Save caller state in g->sched + MOVW R29, (g_sched+gobuf_sp)(g) + MOVW R31, (g_sched+gobuf_pc)(g) + MOVW R0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVW g, R1 + MOVW g_m(g), R3 + MOVW m_g0(R3), g + JAL runtime·save_g(SB) + BNE g, R1, 2(PC) + JMP runtime·badmcall(SB) + MOVW fn+0(FP), REGCTXT // context + MOVW 0(REGCTXT), R4 // code pointer + MOVW (g_sched+gobuf_sp)(g), R29 // sp = m->g0->sched.sp + ADDU $-8, R29 // make room for 1 arg and fake LR + MOVW R1, 4(R29) + MOVW R0, 0(R29) + JAL (R4) + JMP runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB),NOSPLIT,$0-0 + UNDEF + JAL (R31) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB),NOSPLIT,$0-4 + MOVW fn+0(FP), R1 // R1 = fn + MOVW R1, REGCTXT // context + MOVW g_m(g), R2 // R2 = m + + MOVW m_gsignal(R2), R3 // R3 = gsignal + BEQ g, R3, noswitch + + MOVW m_g0(R2), R3 // R3 = g0 + BEQ g, R3, noswitch + + MOVW m_curg(R2), R4 + BEQ g, R4, switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVW $runtime·badsystemstack(SB), R4 + JAL (R4) + JAL runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + JAL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVW R3, g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R1 + MOVW R1, R29 + + // call target function + MOVW 0(REGCTXT), R4 // code pointer + JAL (R4) + + // switch back to g + MOVW g_m(g), R1 + MOVW m_curg(R1), g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R29 + MOVW R0, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVW 0(REGCTXT), R4 // code pointer + MOVW 0(R29), R31 // restore LR + ADD $4, R29 + JMP (R4) + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// R1: framesize, R2: argsize, R3: LR +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + MOVW g_m(g), R7 + MOVW m_g0(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackg0(SB) + JAL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVW m_gsignal(R7), R8 + BNE g, R8, 3(PC) + JAL runtime·badmorestackgsignal(SB) + JAL runtime·abort(SB) + + // Called from f. + // Set g->sched to context in f. + MOVW R29, (g_sched+gobuf_sp)(g) + MOVW R31, (g_sched+gobuf_pc)(g) + MOVW R3, (g_sched+gobuf_lr)(g) + MOVW REGCTXT, (g_sched+gobuf_ctxt)(g) + + // Called from f. + // Set m->morebuf to f's caller. + MOVW R3, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVW R29, (m_morebuf+gobuf_sp)(R7) // f's caller's SP + MOVW g, (m_morebuf+gobuf_g)(R7) + + // Call newstack on m->g0's stack. + MOVW m_g0(R7), g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R29 + // Create a stack frame on g0 to call newstack. + MOVW R0, -4(R29) // Zero saved LR in frame + ADDU $-4, R29 + JAL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R3), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVW R29, R29 + + MOVW R0, REGCTXT + JMP runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. + +#define DISPATCH(NAME,MAXSIZE) \ + MOVW $MAXSIZE, R23; \ + SGTU R1, R23, R23; \ + BNE R23, 3(PC); \ + MOVW $NAME(SB), R4; \ + JMP (R4) + +TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28 + MOVW frameSize+20(FP), R1 + + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVW $runtime·badreflectcall(SB), R4 + JMP (R4) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB),WRAPPER,$MAXSIZE-28; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVW stackArgs+8(FP), R1; \ + MOVW stackArgsSize+12(FP), R2; \ + MOVW R29, R3; \ + ADDU $4, R3; \ + ADDU R3, R2; \ + BEQ R3, R2, 6(PC); \ + MOVBU (R1), R4; \ + ADDU $1, R1; \ + MOVBU R4, (R3); \ + ADDU $1, R3; \ + JMP -5(PC); \ + /* call function */ \ + MOVW f+4(FP), REGCTXT; \ + MOVW (REGCTXT), R4; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + JAL (R4); \ + /* copy return values back */ \ + MOVW stackArgsType+0(FP), R5; \ + MOVW stackArgs+8(FP), R1; \ + MOVW stackArgsSize+12(FP), R2; \ + MOVW stackRetOffset+16(FP), R4; \ + ADDU $4, R29, R3; \ + ADDU R4, R3; \ + ADDU R4, R1; \ + SUBU R4, R2; \ + JAL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $20-0 + MOVW R5, 4(R29) + MOVW R1, 8(R29) + MOVW R3, 12(R29) + MOVW R2, 16(R29) + MOVW $0, 20(R29) + JAL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT,$0-4 + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R1. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·systemstack_switch(SB), R1 + ADDU $8, R1 // get past prologue + MOVW R1, (g_sched+gobuf_pc)(g) + MOVW R29, (g_sched+gobuf_sp)(g) + MOVW R0, (g_sched+gobuf_lr)(g) + MOVW R0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVW (g_sched+gobuf_ctxt)(g), R1 + BEQ R1, 2(PC) + JAL runtime·abort(SB) + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-12 + MOVW fn+0(FP), R25 + MOVW arg+4(FP), R4 + + MOVW R29, R3 // save original stack pointer + MOVW g, R2 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVW g_m(g), R5 + MOVW m_gsignal(R5), R6 + BEQ R6, g, g0 + MOVW m_g0(R5), R6 + BEQ R6, g, g0 + + JAL gosave_systemstack_switch<>(SB) + MOVW R6, g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R29 + + // Now on a scheduling stack (a pthread-created stack). +g0: + // Save room for two of our pointers and O32 frame. + ADDU $-24, R29 + AND $~7, R29 // O32 ABI expects 8-byte aligned stack on function entry + MOVW R2, 16(R29) // save old g on stack + MOVW (g_stack+stack_hi)(R2), R2 + SUBU R3, R2 + MOVW R2, 20(R29) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + JAL (R25) + + // Restore g, stack pointer. R2 is return value. + MOVW 16(R29), g + JAL runtime·save_g(SB) + MOVW (g_stack+stack_hi)(g), R5 + MOVW 20(R29), R6 + SUBU R6, R5 + MOVW R5, R29 + + MOVW R2, ret+8(FP) + RET + +// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$12-12 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVW fn+0(FP), R5 + BNE R5, loadg + // Restore the g from frame. + MOVW frame+4(FP), g + JMP dropm + +loadg: + // Load m and g from thread-local storage. + MOVB runtime·iscgo(SB), R1 + BEQ R1, nocgo + JAL runtime·load_g(SB) +nocgo: + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + BEQ g, needm + + MOVW g_m(g), R3 + MOVW R3, savedm-4(SP) + JMP havem + +needm: + MOVW g, savedm-4(SP) // g is zero, so is m. + MOVW $runtime·needAndBindM(SB), R4 + JAL (R4) + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVW g_m(g), R3 + MOVW m_g0(R3), R1 + MOVW R29, (g_sched+gobuf_sp)(R1) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 4(R29) aka savedsp-8(SP). + MOVW m_g0(R3), R1 + MOVW (g_sched+gobuf_sp)(R1), R2 + MOVW R2, savedsp-12(SP) // must match frame size + MOVW R29, (g_sched+gobuf_sp)(R1) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVW m_curg(R3), g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R2 // prepare stack as R2 + MOVW (g_sched+gobuf_pc)(g), R4 + MOVW R4, -(12+4)(R2) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOVW fn+0(FP), R5 + MOVW frame+4(FP), R6 + MOVW ctxt+8(FP), R7 + MOVW $-(12+4)(R2), R29 // switch stack; must match frame size + MOVW R5, 4(R29) + MOVW R6, 8(R29) + MOVW R7, 12(R29) + JAL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOVW 0(R29), R4 + MOVW R4, (g_sched+gobuf_pc)(g) + MOVW $(12+4)(R29), R2 // must match frame size + MOVW R2, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVW g_m(g), R3 + MOVW m_g0(R3), g + JAL runtime·save_g(SB) + MOVW (g_sched+gobuf_sp)(g), R29 + MOVW savedsp-12(SP), R2 // must match frame size + MOVW R2, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVW savedm-4(SP), R3 + BNE R3, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVW _cgo_pthread_key_created(SB), R3 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + BEQ R3, dropm + MOVW (R3), R3 + BNE R3, droppedm + +dropm: + MOVW $runtime·dropm(SB), R4 + JAL (R4) +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +// This only happens if iscgo, so jump straight to save_g +TEXT runtime·setg(SB),NOSPLIT,$0-4 + MOVW gg+0(FP), g + JAL runtime·save_g(SB) + RET + +// void setg_gcc(G*); set g in C TLS. +// Must obey the gcc calling convention. +TEXT setg_gcc<>(SB),NOSPLIT,$0 + MOVW R4, g + JAL runtime·save_g(SB) + RET + +TEXT runtime·abort(SB),NOSPLIT,$0-0 + UNDEF + +// AES hashing not implemented for mips +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-16 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB),NOSPLIT,$0 + MOVW $0, R1 + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0 + // g (R30), R3 and REGTMP (R23) might be clobbered by load_g. R30 and R23 + // are callee-save in the gcc calling convention, so save them. + MOVW R23, R8 + MOVW g, R9 + MOVW R31, R10 // this call frame does not save LR + + JAL runtime·load_g(SB) + MOVW g_m(g), R1 + MOVW m_curg(R1), R1 + MOVW (g_stack+stack_hi)(R1), R2 // return value in R2 + + MOVW R8, R23 + MOVW R9, g + MOVW R10, R31 + + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + NOR R0, R0 // NOP + JAL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + NOR R0, R0 // NOP + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R1 + MOVB R1, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R25, and returns a pointer +// to the buffer space in R25. +// It clobbers R23 (the linker temp register). +// The act of CALLing gcWriteBarrier will clobber R31 (LR). +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$104 + // Save the registers clobbered by the fast path. + MOVW R1, 100(R29) + MOVW R2, 104(R29) +retry: + MOVW g_m(g), R1 + MOVW m_p(R1), R1 + MOVW (p_wbBuf+wbBuf_next)(R1), R2 + MOVW (p_wbBuf+wbBuf_end)(R1), R23 // R23 is linker temp register + // Increment wbBuf.next position. + ADD R25, R2 + // Is the buffer full? + SGTU R2, R23, R23 + BNE R23, flush + // Commit to the larger buffer. + MOVW R2, (p_wbBuf+wbBuf_next)(R1) + // Make return value (the original next position) + SUB R25, R2, R25 + // Restore registers. + MOVW 100(R29), R1 + MOVW 104(R29), R2 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + MOVW R20, 4(R29) + MOVW R21, 8(R29) + // R1 already saved + // R2 already saved + MOVW R3, 12(R29) + MOVW R4, 16(R29) + MOVW R5, 20(R29) + MOVW R6, 24(R29) + MOVW R7, 28(R29) + MOVW R8, 32(R29) + MOVW R9, 36(R29) + MOVW R10, 40(R29) + MOVW R11, 44(R29) + MOVW R12, 48(R29) + MOVW R13, 52(R29) + MOVW R14, 56(R29) + MOVW R15, 60(R29) + MOVW R16, 64(R29) + MOVW R17, 68(R29) + MOVW R18, 72(R29) + MOVW R19, 76(R29) + MOVW R20, 80(R29) + // R21 already saved + // R22 already saved. + MOVW R22, 84(R29) + // R23 is tmp register. + MOVW R24, 88(R29) + MOVW R25, 92(R29) + // R26 is reserved by kernel. + // R27 is reserved by kernel. + MOVW R28, 96(R29) + // R29 is SP. + // R30 is g. + // R31 is LR, which was saved by the prologue. + + CALL runtime·wbBufFlush(SB) + + MOVW 4(R29), R20 + MOVW 8(R29), R21 + MOVW 12(R29), R3 + MOVW 16(R29), R4 + MOVW 20(R29), R5 + MOVW 24(R29), R6 + MOVW 28(R29), R7 + MOVW 32(R29), R8 + MOVW 36(R29), R9 + MOVW 40(R29), R10 + MOVW 44(R29), R11 + MOVW 48(R29), R12 + MOVW 52(R29), R13 + MOVW 56(R29), R14 + MOVW 60(R29), R15 + MOVW 64(R29), R16 + MOVW 68(R29), R17 + MOVW 72(R29), R18 + MOVW 76(R29), R19 + MOVW 80(R29), R20 + MOVW 84(R29), R22 + MOVW 88(R29), R24 + MOVW 92(R29), R25 + MOVW 96(R29), R28 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVW $4, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVW $8, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVW $12, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVW $16, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVW $20, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVW $24, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVW $28, R25 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVW $32, R25 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 + MOVW R3, x+0(FP) + MOVW R4, y+4(FP) + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 + MOVW R3, x+0(FP) + MOVW R4, y+4(FP) + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 + MOVW R3, x+0(FP) + MOVW R4, y+4(FP) + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 + MOVW R3, x+0(FP) + MOVW R4, y+4(FP) + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 + MOVW R2, x+0(FP) + MOVW R3, y+4(FP) + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 + MOVW R1, x+0(FP) + MOVW R2, y+4(FP) + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 + MOVW R3, x+0(FP) + MOVW R4, y+4(FP) + JMP runtime·goPanicSliceConvert(SB) + +// Extended versions for 64-bit indexes. +TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendIndex(SB) +TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendIndexU(SB) +TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSliceAlen(SB) +TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSliceAlenU(SB) +TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSliceAcap(SB) +TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSliceAcapU(SB) +TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceB(SB) +TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSliceBU(SB) +TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R3, lo+4(FP) + MOVW R4, y+8(FP) + JMP runtime·goPanicExtendSlice3Alen(SB) +TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R3, lo+4(FP) + MOVW R4, y+8(FP) + JMP runtime·goPanicExtendSlice3AlenU(SB) +TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R3, lo+4(FP) + MOVW R4, y+8(FP) + JMP runtime·goPanicExtendSlice3Acap(SB) +TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R3, lo+4(FP) + MOVW R4, y+8(FP) + JMP runtime·goPanicExtendSlice3AcapU(SB) +TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3B(SB) +TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R2, lo+4(FP) + MOVW R3, y+8(FP) + JMP runtime·goPanicExtendSlice3BU(SB) +TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSlice3C(SB) +TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 + MOVW R5, hi+0(FP) + MOVW R1, lo+4(FP) + MOVW R2, y+8(FP) + JMP runtime·goPanicExtendSlice3CU(SB) diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.h b/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.h new file mode 100644 index 0000000000000000000000000000000000000000..65870fe020fa3d40a7a8799aae567c79b629d464 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.h @@ -0,0 +1,55 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// FIXED_FRAME defines the size of the fixed part of a stack frame. A stack +// frame looks like this: +// +// +---------------------+ +// | local variable area | +// +---------------------+ +// | argument area | +// +---------------------+ <- R1+FIXED_FRAME +// | fixed area | +// +---------------------+ <- R1 +// +// So a function that sets up a stack frame at all uses as least FIXED_FRAME +// bytes of stack. This mostly affects assembly that calls other functions +// with arguments (the arguments should be stored at FIXED_FRAME+0(R1), +// FIXED_FRAME+8(R1) etc) and some other low-level places. +// +// The reason for using a constant is to make supporting PIC easier (although +// we only support PIC on ppc64le which has a minimum 32 bytes of stack frame, +// and currently always use that much, PIC on ppc64 would need to use 48). + +#define FIXED_FRAME 32 + +// aix/ppc64 uses XCOFF which uses function descriptors. +// AIX cannot perform the TOC relocation in a text section. +// Therefore, these descriptors must live in a data section. +#ifdef GOOS_aix +#ifdef GOARCH_ppc64 +#define GO_PPC64X_HAS_FUNCDESC +#define DEFINE_PPC64X_FUNCDESC(funcname, localfuncname) \ + DATA funcname+0(SB)/8, $localfuncname(SB) \ + DATA funcname+8(SB)/8, $TOC(SB) \ + DATA funcname+16(SB)/8, $0 \ + GLOBL funcname(SB), NOPTR, $24 +#endif +#endif + +// linux/ppc64 uses ELFv1 which uses function descriptors. +// These must also look like ABI0 functions on linux/ppc64 +// to work with abi.FuncPCABI0(sigtramp) in os_linux.go. +// Only static codegen is supported on linux/ppc64, so TOC +// is not needed. +#ifdef GOOS_linux +#ifdef GOARCH_ppc64 +#define GO_PPC64X_HAS_FUNCDESC +#define DEFINE_PPC64X_FUNCDESC(funcname, localfuncname) \ + TEXT funcname(SB),NOSPLIT|NOFRAME,$0 \ + DWORD $localfuncname(SB) \ + DWORD $0 \ + DWORD $0 +#endif +#endif diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.s b/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..ff9b736430cc2fa3930b868d4574bcec8192b7ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_ppc64x.s @@ -0,0 +1,1618 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" +#include "asm_ppc64x.h" + +#ifdef GOOS_aix +#define cgoCalleeStackSize 48 +#else +#define cgoCalleeStackSize 32 +#endif + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R1 = stack; R3 = argc; R4 = argv; R13 = C TLS base pointer + + // initialize essential registers + BL runtime·reginit(SB) + + SUB $(FIXED_FRAME+16), R1 + MOVD R2, 24(R1) // stash the TOC pointer away again now we've created a new frame + MOVW R3, FIXED_FRAME+0(R1) // argc + MOVD R4, FIXED_FRAME+8(R1) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVD $runtime·g0(SB), g + BL runtime·save_g(SB) + MOVD $(-64*1024), R31 + ADD R31, R1, R3 + MOVD R3, g_stackguard0(g) + MOVD R3, g_stackguard1(g) + MOVD R3, (g_stack+stack_lo)(g) + MOVD R1, (g_stack+stack_hi)(g) + + // If there is a _cgo_init, call it using the gcc ABI. + MOVD _cgo_init(SB), R12 + CMP R0, R12 + BEQ nocgo + +#ifdef GO_PPC64X_HAS_FUNCDESC + // Load the real entry address from the first slot of the function descriptor. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif + MOVD R12, CTR // r12 = "global function entry point" + MOVD R13, R5 // arg 2: TLS base pointer + MOVD $setg_gcc<>(SB), R4 // arg 1: setg + MOVD g, R3 // arg 0: G + // C functions expect 32 (48 for AIX) bytes of space on caller + // stack frame and a 16-byte aligned R1 + MOVD R1, R14 // save current stack + SUB $cgoCalleeStackSize, R1 // reserve the callee area + RLDCR $0, R1, $~15, R1 // 16-byte align + BL (CTR) // may clobber R0, R3-R12 + MOVD R14, R1 // restore stack +#ifndef GOOS_aix + MOVD 24(R1), R2 +#endif + XOR R0, R0 // fix R0 + +nocgo: + // update stackguard after _cgo_init + MOVD (g_stack+stack_lo)(g), R3 + ADD $const_stackGuard, R3 + MOVD R3, g_stackguard0(g) + MOVD R3, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVD $runtime·m0(SB), R3 + + // save m->g0 = g0 + MOVD g, m_g0(R3) + // save m0 to g0->m + MOVD R3, g_m(g) + + BL runtime·check(SB) + + // args are already prepared + BL runtime·args(SB) + BL runtime·osinit(SB) + BL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVD $runtime·mainPC(SB), R3 // entry + MOVDU R3, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + BL runtime·newproc(SB) + ADD $(8+FIXED_FRAME), R1 + + // start this M + BL runtime·mstart(SB) + // Prevent dead-code elimination of debugCallV2, which is + // intended to be called by debuggers. +#ifdef GOARCH_ppc64le + MOVD $runtime·debugCallV2(SB), R31 +#endif + MOVD R0, 0(R0) + RET + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + TW $31, R0, R0 + RET + +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +// Any changes must be reflected to runtime/cgo/gcc_aix_ppc64.S:.crosscall_ppc64 +TEXT _cgo_reginit(SB),NOSPLIT|NOFRAME,$0-0 + // crosscall_ppc64 and crosscall2 need to reginit, but can't + // get at the 'runtime.reginit' symbol. + BR runtime·reginit(SB) + +TEXT runtime·reginit(SB),NOSPLIT|NOFRAME,$0-0 + // set R0 to zero, it's expected by the toolchain + XOR R0, R0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + BL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVD buf+0(FP), R5 + MOVD gobuf_g(R5), R6 + MOVD 0(R6), R4 // make sure g != nil + BR gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOVD R6, g + BL runtime·save_g(SB) + + MOVD gobuf_sp(R5), R1 + MOVD gobuf_lr(R5), R31 +#ifndef GOOS_aix + MOVD 24(R1), R2 // restore R2 +#endif + MOVD R31, LR + MOVD gobuf_ret(R5), R3 + MOVD gobuf_ctxt(R5), R11 + MOVD R0, gobuf_sp(R5) + MOVD R0, gobuf_ret(R5) + MOVD R0, gobuf_lr(R5) + MOVD R0, gobuf_ctxt(R5) + CMP R0, R0 // set condition codes for == test, needed by stack split + MOVD gobuf_pc(R5), R12 + MOVD R12, CTR + BR (CTR) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 + // Save caller state in g->sched + // R11 should be safe across save_g?? + MOVD R3, R11 + MOVD R1, (g_sched+gobuf_sp)(g) + MOVD LR, R31 + MOVD R31, (g_sched+gobuf_pc)(g) + MOVD R0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVD g, R3 + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + CMP g, R3 + BNE 2(PC) + BR runtime·badmcall(SB) + MOVD 0(R11), R12 // code pointer + MOVD R12, CTR + MOVD (g_sched+gobuf_sp)(g), R1 // sp = m->g0->sched.sp + // Don't need to do anything special for regabiargs here + // R3 is g; stack is set anyway + MOVDU R3, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + MOVDU R0, -8(R1) + BL (CTR) + MOVD 24(R1), R2 + BR runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + // We have several undefs here so that 16 bytes past + // $runtime·systemstack_switch lies within them whether or not the + // instructions that derive r2 from r12 are there. + UNDEF + UNDEF + UNDEF + BL (LR) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), R3 // R3 = fn + MOVD R3, R11 // context + MOVD g_m(g), R4 // R4 = m + + MOVD m_gsignal(R4), R5 // R5 = gsignal + CMP g, R5 + BEQ noswitch + + MOVD m_g0(R4), R5 // R5 = g0 + CMP g, R5 + BEQ noswitch + + MOVD m_curg(R4), R6 + CMP g, R6 + BEQ switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVD $runtime·badsystemstack(SB), R12 + MOVD R12, CTR + BL (CTR) + BL runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + BL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVD R5, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R1 + + // call target function + MOVD 0(R11), R12 // code pointer + MOVD R12, CTR + BL (CTR) + + // restore TOC pointer. It seems unlikely that we will use systemstack + // to call a function defined in another module, but the results of + // doing so would be so confusing that it's worth doing this. + MOVD g_m(g), R3 + MOVD m_curg(R3), g + MOVD (g_sched+gobuf_sp)(g), R3 +#ifndef GOOS_aix + MOVD 24(R3), R2 +#endif + // switch back to g + MOVD g_m(g), R3 + MOVD m_curg(R3), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R1 + MOVD R0, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // On other arches we do a tail call here, but it appears to be + // impossible to tail call a function pointer in shared mode on + // ppc64 because the caller is responsible for restoring the TOC. + MOVD 0(R11), R12 // code pointer + MOVD R12, CTR + BL (CTR) +#ifndef GOOS_aix + MOVD 24(R1), R2 +#endif + RET + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD R3, R11 // context register + MOVD g_m(g), R3 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + CALL runtime·save_g(SB) // clobbers R31 + MOVD R3, g_m(g) // g.m = curm + MOVD g, m_g0(R3) // curm.g0 = g + + // switch to crashstack + MOVD (g_stack+stack_hi)(g), R3 + SUB $(4*8), R3 + MOVD R3, R1 + + // call target function + MOVD 0(R11), R12 // code pointer + MOVD R12, CTR + BL (CTR) + + // should never return + CALL runtime·abort(SB) + UNDEF + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// R3: framesize, R4: argsize, R5: LR +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOVD R1, (g_sched+gobuf_sp)(g) + MOVD LR, R8 + MOVD R8, (g_sched+gobuf_pc)(g) + MOVD R5, (g_sched+gobuf_lr)(g) + MOVD R11, (g_sched+gobuf_ctxt)(g) + + // Cannot grow scheduler stack (m->g0). + MOVD g_m(g), R7 + MOVD m_g0(R7), R8 + CMP g, R8 + BNE 3(PC) + BL runtime·badmorestackg0(SB) + BL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVD m_gsignal(R7), R8 + CMP g, R8 + BNE 3(PC) + BL runtime·badmorestackgsignal(SB) + BL runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's caller. + MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVD R1, (m_morebuf+gobuf_sp)(R7) // f's caller's SP + MOVD g, (m_morebuf+gobuf_g)(R7) + + // Call newstack on m->g0's stack. + MOVD m_g0(R7), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R1 + MOVDU R0, -(FIXED_FRAME+0)(R1) // create a call frame on g0 + BL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R5), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + // Use OR R0, R1 instead of MOVD R1, R1 as the MOVD instruction + // has a special affect on Power8,9,10 by lowering the thread + // priority and causing a slowdown in execution time + + OR R0, R1 + MOVD R0, R11 + BR runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOVD $MAXSIZE, R31; \ + CMP R3, R31; \ + BGT 4(PC); \ + MOVD $NAME(SB), R12; \ + MOVD R12, CTR; \ + BR (CTR) +// Note: can't just "BR NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWZ frameSize+32(FP), R3 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVD $runtime·badreflectcall(SB), R12 + MOVD R12, CTR + BR (CTR) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVD stackArgs+16(FP), R3; \ + MOVWZ stackArgsSize+24(FP), R4; \ + MOVD R1, R5; \ + CMP R4, $8; \ + BLT tailsetup; \ + /* copy 8 at a time if possible */ \ + ADD $(FIXED_FRAME-8), R5; \ + SUB $8, R3; \ +top: \ + MOVDU 8(R3), R7; \ + MOVDU R7, 8(R5); \ + SUB $8, R4; \ + CMP R4, $8; \ + BGE top; \ + /* handle remaining bytes */ \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $7, R3; \ + ADD $7, R5; \ + BR tail; \ +tailsetup: \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $(FIXED_FRAME-1), R5; \ + SUB $1, R3; \ +tail: \ + MOVBU 1(R3), R6; \ + MOVBU R6, 1(R5); \ + SUB $1, R4; \ + CMP $0, R4; \ + BGT tail; \ +callfn: \ + /* call function */ \ + MOVD f+8(FP), R11; \ +#ifdef GOOS_aix \ + /* AIX won't trigger a SIGSEGV if R11 = nil */ \ + /* So it manually triggers it */ \ + CMP R0, R11 \ + BNE 2(PC) \ + MOVD R0, 0(R0) \ +#endif \ + MOVD regArgs+40(FP), R20; \ + BL runtime·unspillArgs(SB); \ + MOVD (R11), R12; \ + MOVD R12, CTR; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + BL (CTR); \ +#ifndef GOOS_aix \ + MOVD 24(R1), R2; \ +#endif \ + /* copy return values back */ \ + MOVD regArgs+40(FP), R20; \ + BL runtime·spillArgs(SB); \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R3; \ + MOVWZ stackArgsSize+24(FP), R4; \ + MOVWZ stackRetOffset+28(FP), R6; \ + ADD $FIXED_FRAME, R1, R5; \ + ADD R6, R5; \ + ADD R6, R3; \ + SUB R6, R4; \ + BL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS + MOVD R7, FIXED_FRAME+0(R1) + MOVD R3, FIXED_FRAME+8(R1) + MOVD R5, FIXED_FRAME+16(R1) + MOVD R4, FIXED_FRAME+24(R1) + MOVD R20, FIXED_FRAME+32(R1) + BL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0-4 + MOVW cycles+0(FP), R7 + // POWER does not have a pause/yield instruction equivalent. + // Instead, we can lower the program priority by setting the + // Program Priority Register prior to the wait loop and set it + // back to default afterwards. On Linux, the default priority is + // medium-low. For details, see page 837 of the ISA 3.0. + OR R1, R1, R1 // Set PPR priority to low +again: + SUB $1, R7 + CMP $0, R7 + BNE again + OR R6, R6, R6 // Set PPR priority back to medium-low + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R31. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVD $runtime·systemstack_switch(SB), R31 + ADD $16, R31 // get past prologue (including r2-setting instructions when they're there) + MOVD R31, (g_sched+gobuf_pc)(g) + MOVD R1, (g_sched+gobuf_sp)(g) + MOVD R0, (g_sched+gobuf_lr)(g) + MOVD R0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVD (g_sched+gobuf_ctxt)(g), R31 + CMP R0, R31 + BEQ 2(PC) + BL runtime·abort(SB) + RET + +#ifdef GOOS_aix +#define asmcgocallSaveOffset cgoCalleeStackSize + 8 +#else +#define asmcgocallSaveOffset cgoCalleeStackSize +#endif + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 + MOVD fn+0(FP), R3 + MOVD arg+8(FP), R4 + + MOVD R1, R15 + SUB $(asmcgocallSaveOffset+8), R1 + RLDCR $0, R1, $~15, R1 // 16-byte alignment for gcc ABI + MOVD R15, asmcgocallSaveOffset(R1) + + MOVD R0, 0(R1) // clear back chain pointer (TODO can we give it real back trace information?) + + // This is a "global call", so put the global entry point in r12 + MOVD R3, R12 + +#ifdef GO_PPC64X_HAS_FUNCDESC + // Load the real entry address from the first slot of the function descriptor. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif + MOVD R12, CTR + MOVD R4, R3 // arg in r3 + BL (CTR) + + // C code can clobber R0, so set it back to 0. F27-F31 are + // callee save, so we don't need to recover those. + XOR R0, R0 + + MOVD asmcgocallSaveOffset(R1), R1 // Restore stack pointer. +#ifndef GOOS_aix + MOVD 24(R1), R2 +#endif + + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOVD fn+0(FP), R3 + MOVD arg+8(FP), R4 + + MOVD R1, R7 // save original stack pointer + CMP $0, g + BEQ nosave + MOVD g, R5 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVD g_m(g), R8 + MOVD m_gsignal(R8), R6 + CMP R6, g + BEQ nosave + MOVD m_g0(R8), R6 + CMP R6, g + BEQ nosave + + BL gosave_systemstack_switch<>(SB) + MOVD R6, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R1 + + // Now on a scheduling stack (a pthread-created stack). +#ifdef GOOS_aix + // Create a fake LR to improve backtrace. + MOVD $runtime·asmcgocall(SB), R6 + MOVD R6, 16(R1) + // AIX also saves one argument on the stack. + SUB $8, R1 +#endif + // Save room for two of our pointers, plus the callee + // save area that lives on the caller stack. + SUB $(asmcgocallSaveOffset+16), R1 + RLDCR $0, R1, $~15, R1 // 16-byte alignment for gcc ABI + MOVD R5, (asmcgocallSaveOffset+8)(R1) // save old g on stack + MOVD (g_stack+stack_hi)(R5), R5 + SUB R7, R5 + MOVD R5, asmcgocallSaveOffset(R1) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) +#ifdef GOOS_aix + MOVD R7, 0(R1) // Save frame pointer to allow manual backtrace with gdb +#else + MOVD R0, 0(R1) // clear back chain pointer (TODO can we give it real back trace information?) +#endif + // This is a "global call", so put the global entry point in r12 + MOVD R3, R12 + +#ifdef GO_PPC64X_HAS_FUNCDESC + // Load the real entry address from the first slot of the function descriptor. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif + MOVD R12, CTR + MOVD R4, R3 // arg in r3 + BL (CTR) + + // Reinitialise zero value register. + XOR R0, R0 + + // Restore g, stack pointer, toc pointer. + // R3 is errno, so don't touch it + MOVD (asmcgocallSaveOffset+8)(R1), g + MOVD (g_stack+stack_hi)(g), R5 + MOVD asmcgocallSaveOffset(R1), R6 + SUB R6, R5 +#ifndef GOOS_aix + MOVD 24(R5), R2 +#endif + MOVD R5, R1 + BL runtime·save_g(SB) + + MOVW R3, ret+16(FP) + RET + +nosave: + // Running on a system stack, perhaps even without a g. + // Having no g can happen during thread creation or thread teardown. + // This code is like the above sequence but without saving/restoring g + // and without worrying about the stack moving out from under us + // (because we're on a system stack, not a goroutine stack). + // The above code could be used directly if already on a system stack, + // but then the only path through this code would be a rare case. + // Using this code for all "already on system stack" calls exercises it more, + // which should help keep it correct. + + SUB $(asmcgocallSaveOffset+8), R1 + RLDCR $0, R1, $~15, R1 // 16-byte alignment for gcc ABI + MOVD R7, asmcgocallSaveOffset(R1) // Save original stack pointer. + + MOVD R3, R12 // fn +#ifdef GO_PPC64X_HAS_FUNCDESC + // Load the real entry address from the first slot of the function descriptor. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif + MOVD R12, CTR + MOVD R4, R3 // arg + BL (CTR) + + // Reinitialise zero value register. + XOR R0, R0 + + MOVD asmcgocallSaveOffset(R1), R1 // Restore stack pointer. +#ifndef GOOS_aix + MOVD 24(R1), R2 +#endif + MOVW R3, ret+16(FP) + RET + +// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVD fn+0(FP), R5 + CMP R5, $0 + BNE loadg + // Restore the g from frame. + MOVD frame+8(FP), g + BR dropm + +loadg: + // Load m and g from thread-local storage. +#ifndef GOOS_openbsd + MOVBZ runtime·iscgo(SB), R3 + CMP R3, $0 + BEQ nocgo +#endif + BL runtime·load_g(SB) +nocgo: + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + CMP g, $0 + BEQ needm + + MOVD g_m(g), R8 + MOVD R8, savedm-8(SP) + BR havem + +needm: + MOVD g, savedm-8(SP) // g is zero, so is m. + MOVD $runtime·needAndBindM(SB), R12 + MOVD R12, CTR + BL (CTR) + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVD g_m(g), R8 + MOVD m_g0(R8), R3 + MOVD R1, (g_sched+gobuf_sp)(R3) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 8(R1) aka savedsp-16(SP). + MOVD m_g0(R8), R3 + MOVD (g_sched+gobuf_sp)(R3), R4 + MOVD R4, savedsp-24(SP) // must match frame size + MOVD R1, (g_sched+gobuf_sp)(R3) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVD m_curg(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 + MOVD (g_sched+gobuf_pc)(g), R5 + MOVD R5, -(24+FIXED_FRAME)(R4) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOVD fn+0(FP), R5 + MOVD frame+8(FP), R6 + MOVD ctxt+16(FP), R7 + MOVD $-(24+FIXED_FRAME)(R4), R1 // switch stack; must match frame size + MOVD R5, FIXED_FRAME+0(R1) + MOVD R6, FIXED_FRAME+8(R1) + MOVD R7, FIXED_FRAME+16(R1) + + MOVD $runtime·cgocallbackg(SB), R12 + MOVD R12, CTR + CALL (CTR) // indirect call to bypass nosplit check. We're on a different stack now. + + // Restore g->sched (== m->curg->sched) from saved values. + MOVD 0(R1), R5 + MOVD R5, (g_sched+gobuf_pc)(g) + MOVD $(24+FIXED_FRAME)(R1), R4 // must match frame size + MOVD R4, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R1 + MOVD savedsp-24(SP), R4 // must match frame size + MOVD R4, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVD savedm-8(SP), R6 + CMP R6, $0 + BNE droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVD _cgo_pthread_key_created(SB), R6 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CMP R6, $0 + BEQ dropm + MOVD (R6), R6 + CMP R6, $0 + BNE droppedm + +dropm: + MOVD $runtime·dropm(SB), R12 + MOVD R12, CTR + BL (CTR) +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVD gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + BL runtime·save_g(SB) + RET + +#ifdef GO_PPC64X_HAS_FUNCDESC +DEFINE_PPC64X_FUNCDESC(setg_gcc<>, _setg_gcc<>) +TEXT _setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 +#else +TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 +#endif + // The standard prologue clobbers R31, which is callee-save in + // the C ABI, so we have to use $-8-0 and save LR ourselves. + MOVD LR, R4 + // Also save g and R31, since they're callee-save in C ABI + MOVD R31, R5 + MOVD g, R6 + + MOVD R3, g + BL runtime·save_g(SB) + + MOVD R6, g + MOVD R5, R31 + MOVD R4, LR + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVW (R0), R0 + UNDEF + +#define TBR 268 + +// int64 runtime·cputicks(void) +TEXT runtime·cputicks(SB),NOSPLIT,$0-8 + MOVD SPR(TBR), R3 + MOVD R3, ret+0(FP) + RET + +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R20. +TEXT runtime·spillArgs(SB),NOSPLIT,$0-0 + MOVD R3, 0(R20) + MOVD R4, 8(R20) + MOVD R5, 16(R20) + MOVD R6, 24(R20) + MOVD R7, 32(R20) + MOVD R8, 40(R20) + MOVD R9, 48(R20) + MOVD R10, 56(R20) + MOVD R14, 64(R20) + MOVD R15, 72(R20) + MOVD R16, 80(R20) + MOVD R17, 88(R20) + FMOVD F1, 96(R20) + FMOVD F2, 104(R20) + FMOVD F3, 112(R20) + FMOVD F4, 120(R20) + FMOVD F5, 128(R20) + FMOVD F6, 136(R20) + FMOVD F7, 144(R20) + FMOVD F8, 152(R20) + FMOVD F9, 160(R20) + FMOVD F10, 168(R20) + FMOVD F11, 176(R20) + FMOVD F12, 184(R20) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R20. +TEXT runtime·unspillArgs(SB),NOSPLIT,$0-0 + MOVD 0(R20), R3 + MOVD 8(R20), R4 + MOVD 16(R20), R5 + MOVD 24(R20), R6 + MOVD 32(R20), R7 + MOVD 40(R20), R8 + MOVD 48(R20), R9 + MOVD 56(R20), R10 + MOVD 64(R20), R14 + MOVD 72(R20), R15 + MOVD 80(R20), R16 + MOVD 88(R20), R17 + FMOVD 96(R20), F1 + FMOVD 104(R20), F2 + FMOVD 112(R20), F3 + FMOVD 120(R20), F4 + FMOVD 128(R20), F5 + FMOVD 136(R20), F6 + FMOVD 144(R20), F7 + FMOVD 152(R20), F8 + FMOVD 160(R20), F9 + FMOVD 168(R20), F10 + FMOVD 176(R20), F11 + FMOVD 184(R20), F12 + RET + +// AES hashing not implemented for ppc64 +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVW $0, R3 + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +#ifdef GOOS_aix +// On AIX, _cgo_topofstack is defined in runtime/cgo, because it must +// be a longcall in order to prevent trampolines from ld. +TEXT __cgo_topofstack(SB),NOSPLIT|NOFRAME,$0 +#else +TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0 +#endif + // g (R30) and R31 are callee-save in the C ABI, so save them + MOVD g, R4 + MOVD R31, R5 + MOVD LR, R6 + + BL runtime·load_g(SB) // clobbers g (R30), R31 + MOVD g_m(g), R3 + MOVD m_curg(R3), R3 + MOVD (g_stack+stack_hi)(R3), R3 + + MOVD R4, g + MOVD R5, R31 + MOVD R6, LR + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +// +// When dynamically linking Go, it can be returned to from a function +// implemented in a different module and so needs to reload the TOC pointer +// from the stack (although this function declares that it does not set up x-a +// frame, newproc1 does in fact allocate one for goexit and saves the TOC +// pointer in the correct place). +// goexit+_PCQuantum is halfway through the usual global entry point prologue +// that derives r2 from r12 which is a bit silly, but not harmful. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + MOVD 24(R1), R2 + BL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + MOVD R0, R0 // NOP + +// prepGoExitFrame saves the current TOC pointer (i.e. the TOC pointer for the +// module containing runtime) to the frame that goexit will execute in when +// the goroutine exits. It's implemented in assembly mainly because that's the +// easiest way to get access to R2. +TEXT runtime·prepGoExitFrame(SB),NOSPLIT,$0-8 + MOVD sp+0(FP), R3 + MOVD R2, 24(R3) + RET + +TEXT runtime·addmoduledata(SB),NOSPLIT|NOFRAME,$0-0 + ADD $-8, R1 + MOVD R31, 0(R1) + MOVD runtime·lastmoduledatap(SB), R4 + MOVD R3, moduledata_next(R4) + MOVD R3, runtime·lastmoduledatap(SB) + MOVD 0(R1), R31 + ADD $8, R1 + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVW $1, R3 + MOVB R3, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R29, and returns a pointer +// to the buffer space in R29. +// It clobbers condition codes. +// It does not clobber R0 through R17 (except special registers), +// but may clobber any other register, *including* R31. +TEXT gcWriteBarrier<>(SB),NOSPLIT,$120 + // The standard prologue clobbers R31. + // We use R18, R19, and R31 as scratch registers. +retry: + MOVD g_m(g), R18 + MOVD m_p(R18), R18 + MOVD (p_wbBuf+wbBuf_next)(R18), R19 + MOVD (p_wbBuf+wbBuf_end)(R18), R31 + // Increment wbBuf.next position. + ADD R29, R19 + // Is the buffer full? + CMPU R31, R19 + BLT flush + // Commit to the larger buffer. + MOVD R19, (p_wbBuf+wbBuf_next)(R18) + // Make return value (the original next position) + SUB R29, R19, R29 + RET + +flush: + // Save registers R0 through R15 since these were not saved by the caller. + // We don't save all registers on ppc64 because it takes too much space. + MOVD R20, (FIXED_FRAME+0)(R1) + MOVD R21, (FIXED_FRAME+8)(R1) + // R0 is always 0, so no need to spill. + // R1 is SP. + // R2 is SB. + MOVD R3, (FIXED_FRAME+16)(R1) + MOVD R4, (FIXED_FRAME+24)(R1) + MOVD R5, (FIXED_FRAME+32)(R1) + MOVD R6, (FIXED_FRAME+40)(R1) + MOVD R7, (FIXED_FRAME+48)(R1) + MOVD R8, (FIXED_FRAME+56)(R1) + MOVD R9, (FIXED_FRAME+64)(R1) + MOVD R10, (FIXED_FRAME+72)(R1) + // R11, R12 may be clobbered by external-linker-inserted trampoline + // R13 is REGTLS + MOVD R14, (FIXED_FRAME+80)(R1) + MOVD R15, (FIXED_FRAME+88)(R1) + MOVD R16, (FIXED_FRAME+96)(R1) + MOVD R17, (FIXED_FRAME+104)(R1) + MOVD R29, (FIXED_FRAME+112)(R1) + + CALL runtime·wbBufFlush(SB) + + MOVD (FIXED_FRAME+0)(R1), R20 + MOVD (FIXED_FRAME+8)(R1), R21 + MOVD (FIXED_FRAME+16)(R1), R3 + MOVD (FIXED_FRAME+24)(R1), R4 + MOVD (FIXED_FRAME+32)(R1), R5 + MOVD (FIXED_FRAME+40)(R1), R6 + MOVD (FIXED_FRAME+48)(R1), R7 + MOVD (FIXED_FRAME+56)(R1), R8 + MOVD (FIXED_FRAME+64)(R1), R9 + MOVD (FIXED_FRAME+72)(R1), R10 + MOVD (FIXED_FRAME+80)(R1), R14 + MOVD (FIXED_FRAME+88)(R1), R15 + MOVD (FIXED_FRAME+96)(R1), R16 + MOVD (FIXED_FRAME+104)(R1), R17 + MOVD (FIXED_FRAME+112)(R1), R29 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVD $8, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVD $16, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVD $24, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVD $32, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVD $40, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVD $48, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVD $56, R29 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVD $64, R29 + JMP gcWriteBarrier<>(SB) + +DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large" +GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below + +// debugCallV2 is the entry point for debugger-injected function +// calls on running goroutines. It informs the runtime that a +// debug call has been injected and creates a call frame for the +// debugger to fill in. +// +// To inject a function call, a debugger should: +// 1. Check that the goroutine is in state _Grunning and that +// there are at least 320 bytes free on the stack. +// 2. Set SP as SP-32. +// 3. Store the current LR in (SP) (using the SP after step 2). +// 4. Store the current PC in the LR register. +// 5. Write the desired argument frame size at SP-32 +// 6. Save all machine registers (including flags and floating point registers) +// so they can be restored later by the debugger. +// 7. Set the PC to debugCallV2 and resume execution. +// +// If the goroutine is in state _Grunnable, then it's not generally +// safe to inject a call because it may return out via other runtime +// operations. Instead, the debugger should unwind the stack to find +// the return to non-runtime code, add a temporary breakpoint there, +// and inject the call once that breakpoint is hit. +// +// If the goroutine is in any other state, it's not safe to inject a call. +// +// This function communicates back to the debugger by setting R20 and +// invoking TW to raise a breakpoint signal. Note that the signal PC of +// the signal triggered by the TW instruction is the PC where the signal +// is trapped, not the next PC, so to resume execution, the debugger needs +// to set the signal PC to PC+4. See the comments in the implementation for +// the protocol the debugger is expected to follow. InjectDebugCall in the +// runtime tests demonstrates this protocol. +// The debugger must ensure that any pointers passed to the function +// obey escape analysis requirements. Specifically, it must not pass +// a stack pointer to an escaping argument. debugCallV2 cannot check +// this invariant. +// +// This is ABIInternal because Go code injects its PC directly into new +// goroutine stacks. +#ifdef GOARCH_ppc64le +TEXT runtime·debugCallV2(SB), NOSPLIT|NOFRAME, $0-0 + // save scratch register R31 first + MOVD R31, -184(R1) + MOVD 0(R1), R31 + // save caller LR + MOVD R31, -304(R1) + MOVD -32(R1), R31 + // save argument frame size + MOVD R31, -192(R1) + MOVD LR, R31 + MOVD R31, -320(R1) + ADD $-320, R1 + // save all registers that can contain pointers + // and the CR register + MOVW CR, R31 + MOVD R31, 8(R1) + MOVD R2, 24(R1) + MOVD R3, 56(R1) + MOVD R4, 64(R1) + MOVD R5, 72(R1) + MOVD R6, 80(R1) + MOVD R7, 88(R1) + MOVD R8, 96(R1) + MOVD R9, 104(R1) + MOVD R10, 112(R1) + MOVD R11, 120(R1) + MOVD R12, 144(R1) + MOVD R13, 152(R1) + MOVD R14, 160(R1) + MOVD R15, 168(R1) + MOVD R16, 176(R1) + MOVD R17, 184(R1) + MOVD R18, 192(R1) + MOVD R19, 200(R1) + MOVD R20, 208(R1) + MOVD R21, 216(R1) + MOVD R22, 224(R1) + MOVD R23, 232(R1) + MOVD R24, 240(R1) + MOVD R25, 248(R1) + MOVD R26, 256(R1) + MOVD R27, 264(R1) + MOVD R28, 272(R1) + MOVD R29, 280(R1) + MOVD g, 288(R1) + MOVD LR, R31 + MOVD R31, 32(R1) + CALL runtime·debugCallCheck(SB) + MOVD 40(R1), R22 + XOR R0, R0 + CMP R22, R0 + BEQ good + MOVD 48(R1), R22 + MOVD $8, R20 + TW $31, R0, R0 + + BR restore + +good: +#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \ + MOVD $MAXSIZE, R23; \ + CMP R26, R23; \ + BGT 5(PC); \ + MOVD $NAME(SB), R26; \ + MOVD R26, 32(R1); \ + CALL runtime·debugCallWrap(SB); \ + BR restore + + // the argument frame size + MOVD 128(R1), R26 + + DEBUG_CALL_DISPATCH(debugCall32<>, 32) + DEBUG_CALL_DISPATCH(debugCall64<>, 64) + DEBUG_CALL_DISPATCH(debugCall128<>, 128) + DEBUG_CALL_DISPATCH(debugCall256<>, 256) + DEBUG_CALL_DISPATCH(debugCall512<>, 512) + DEBUG_CALL_DISPATCH(debugCall1024<>, 1024) + DEBUG_CALL_DISPATCH(debugCall2048<>, 2048) + DEBUG_CALL_DISPATCH(debugCall4096<>, 4096) + DEBUG_CALL_DISPATCH(debugCall8192<>, 8192) + DEBUG_CALL_DISPATCH(debugCall16384<>, 16384) + DEBUG_CALL_DISPATCH(debugCall32768<>, 32768) + DEBUG_CALL_DISPATCH(debugCall65536<>, 65536) + // The frame size is too large. Report the error. + MOVD $debugCallFrameTooLarge<>(SB), R22 + MOVD R22, 32(R1) + MOVD $20, R22 + // length of debugCallFrameTooLarge string + MOVD R22, 40(R1) + MOVD $8, R20 + TW $31, R0, R0 + BR restore +restore: + MOVD $16, R20 + TW $31, R0, R0 + // restore all registers that can contain + // pointers including CR + MOVD 8(R1), R31 + MOVW R31, CR + MOVD 24(R1), R2 + MOVD 56(R1), R3 + MOVD 64(R1), R4 + MOVD 72(R1), R5 + MOVD 80(R1), R6 + MOVD 88(R1), R7 + MOVD 96(R1), R8 + MOVD 104(R1), R9 + MOVD 112(R1), R10 + MOVD 120(R1), R11 + MOVD 144(R1), R12 + MOVD 152(R1), R13 + MOVD 160(R1), R14 + MOVD 168(R1), R15 + MOVD 176(R1), R16 + MOVD 184(R1), R17 + MOVD 192(R1), R18 + MOVD 200(R1), R19 + MOVD 208(R1), R20 + MOVD 216(R1), R21 + MOVD 224(R1), R22 + MOVD 232(R1), R23 + MOVD 240(R1), R24 + MOVD 248(R1), R25 + MOVD 256(R1), R26 + MOVD 264(R1), R27 + MOVD 272(R1), R28 + MOVD 280(R1), R29 + MOVD 288(R1), g + MOVD 16(R1), R31 + // restore old LR + MOVD R31, LR + // restore caller PC + MOVD 0(R1), CTR + MOVD 136(R1), R31 + // Add 32 bytes more to compensate for SP change in saveSigContext + ADD $352, R1 + JMP (CTR) +#endif +#define DEBUG_CALL_FN(NAME,MAXSIZE) \ +TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \ + NO_LOCAL_POINTERS; \ + MOVD $0, R20; \ + TW $31, R0, R0 \ + MOVD $1, R20; \ + TW $31, R0, R0 \ + RET +DEBUG_CALL_FN(debugCall32<>, 32) +DEBUG_CALL_FN(debugCall64<>, 64) +DEBUG_CALL_FN(debugCall128<>, 128) +DEBUG_CALL_FN(debugCall256<>, 256) +DEBUG_CALL_FN(debugCall512<>, 512) +DEBUG_CALL_FN(debugCall1024<>, 1024) +DEBUG_CALL_FN(debugCall2048<>, 2048) +DEBUG_CALL_FN(debugCall4096<>, 4096) +DEBUG_CALL_FN(debugCall8192<>, 8192) +DEBUG_CALL_FN(debugCall16384<>, 16384) +DEBUG_CALL_FN(debugCall32768<>, 32768) +DEBUG_CALL_FN(debugCall65536<>, 65536) + +#ifdef GOARCH_ppc64le +// func debugCallPanicked(val interface{}) +TEXT runtime·debugCallPanicked(SB),NOSPLIT,$32-16 + // Copy the panic value to the top of stack at SP+32. + MOVD val_type+0(FP), R31 + MOVD R31, 32(R1) + MOVD val_data+8(FP), R31 + MOVD R31, 40(R1) + MOVD $2, R20 + TW $31, R0, R0 + RET +#endif +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOVD R5, R3 + MOVD R6, R4 + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOVD R5, R3 + MOVD R6, R4 + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOVD R5, R3 + MOVD R6, R4 + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOVD R5, R3 + MOVD R6, R4 + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOVD R4, R3 + MOVD R5, R4 + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOVD R5, R3 + MOVD R6, R4 + JMP runtime·goPanicSliceConvert(SB) + +// These functions are used when internal linking cgo with external +// objects compiled with the -Os on gcc. They reduce prologue/epilogue +// size by deferring preservation of callee-save registers to a shared +// function. These are defined in PPC64 ELFv2 2.3.3 (but also present +// in ELFv1) +// +// These appear unused, but the linker will redirect calls to functions +// like _savegpr0_14 or _restgpr1_14 to runtime.elf_savegpr0 or +// runtime.elf_restgpr1 with an appropriate offset based on the number +// register operations required when linking external objects which +// make these calls. For GPR/FPR saves, the minimum register value is +// 14, for VR it is 20. +// +// These are only used when linking such cgo code internally. Note, R12 +// and R0 may be used in different ways than regular ELF compliant +// functions. +TEXT runtime·elf_savegpr0(SB),NOSPLIT|NOFRAME,$0 + // R0 holds the LR of the caller's caller, R1 holds save location + MOVD R14, -144(R1) + MOVD R15, -136(R1) + MOVD R16, -128(R1) + MOVD R17, -120(R1) + MOVD R18, -112(R1) + MOVD R19, -104(R1) + MOVD R20, -96(R1) + MOVD R21, -88(R1) + MOVD R22, -80(R1) + MOVD R23, -72(R1) + MOVD R24, -64(R1) + MOVD R25, -56(R1) + MOVD R26, -48(R1) + MOVD R27, -40(R1) + MOVD R28, -32(R1) + MOVD R29, -24(R1) + MOVD g, -16(R1) + MOVD R31, -8(R1) + MOVD R0, 16(R1) + RET +TEXT runtime·elf_restgpr0(SB),NOSPLIT|NOFRAME,$0 + // R1 holds save location. This returns to the LR saved on stack (bypassing the caller) + MOVD -144(R1), R14 + MOVD -136(R1), R15 + MOVD -128(R1), R16 + MOVD -120(R1), R17 + MOVD -112(R1), R18 + MOVD -104(R1), R19 + MOVD -96(R1), R20 + MOVD -88(R1), R21 + MOVD -80(R1), R22 + MOVD -72(R1), R23 + MOVD -64(R1), R24 + MOVD -56(R1), R25 + MOVD -48(R1), R26 + MOVD -40(R1), R27 + MOVD -32(R1), R28 + MOVD -24(R1), R29 + MOVD -16(R1), g + MOVD -8(R1), R31 + MOVD 16(R1), R0 // Load and return to saved LR + MOVD R0, LR + RET +TEXT runtime·elf_savegpr1(SB),NOSPLIT|NOFRAME,$0 + // R12 holds the save location + MOVD R14, -144(R12) + MOVD R15, -136(R12) + MOVD R16, -128(R12) + MOVD R17, -120(R12) + MOVD R18, -112(R12) + MOVD R19, -104(R12) + MOVD R20, -96(R12) + MOVD R21, -88(R12) + MOVD R22, -80(R12) + MOVD R23, -72(R12) + MOVD R24, -64(R12) + MOVD R25, -56(R12) + MOVD R26, -48(R12) + MOVD R27, -40(R12) + MOVD R28, -32(R12) + MOVD R29, -24(R12) + MOVD g, -16(R12) + MOVD R31, -8(R12) + RET +TEXT runtime·elf_restgpr1(SB),NOSPLIT|NOFRAME,$0 + // R12 holds the save location + MOVD -144(R12), R14 + MOVD -136(R12), R15 + MOVD -128(R12), R16 + MOVD -120(R12), R17 + MOVD -112(R12), R18 + MOVD -104(R12), R19 + MOVD -96(R12), R20 + MOVD -88(R12), R21 + MOVD -80(R12), R22 + MOVD -72(R12), R23 + MOVD -64(R12), R24 + MOVD -56(R12), R25 + MOVD -48(R12), R26 + MOVD -40(R12), R27 + MOVD -32(R12), R28 + MOVD -24(R12), R29 + MOVD -16(R12), g + MOVD -8(R12), R31 + RET +TEXT runtime·elf_savefpr(SB),NOSPLIT|NOFRAME,$0 + // R0 holds the LR of the caller's caller, R1 holds save location + FMOVD F14, -144(R1) + FMOVD F15, -136(R1) + FMOVD F16, -128(R1) + FMOVD F17, -120(R1) + FMOVD F18, -112(R1) + FMOVD F19, -104(R1) + FMOVD F20, -96(R1) + FMOVD F21, -88(R1) + FMOVD F22, -80(R1) + FMOVD F23, -72(R1) + FMOVD F24, -64(R1) + FMOVD F25, -56(R1) + FMOVD F26, -48(R1) + FMOVD F27, -40(R1) + FMOVD F28, -32(R1) + FMOVD F29, -24(R1) + FMOVD F30, -16(R1) + FMOVD F31, -8(R1) + MOVD R0, 16(R1) + RET +TEXT runtime·elf_restfpr(SB),NOSPLIT|NOFRAME,$0 + // R1 holds save location. This returns to the LR saved on stack (bypassing the caller) + FMOVD -144(R1), F14 + FMOVD -136(R1), F15 + FMOVD -128(R1), F16 + FMOVD -120(R1), F17 + FMOVD -112(R1), F18 + FMOVD -104(R1), F19 + FMOVD -96(R1), F20 + FMOVD -88(R1), F21 + FMOVD -80(R1), F22 + FMOVD -72(R1), F23 + FMOVD -64(R1), F24 + FMOVD -56(R1), F25 + FMOVD -48(R1), F26 + FMOVD -40(R1), F27 + FMOVD -32(R1), F28 + FMOVD -24(R1), F29 + FMOVD -16(R1), F30 + FMOVD -8(R1), F31 + MOVD 16(R1), R0 // Load and return to saved LR + MOVD R0, LR + RET +TEXT runtime·elf_savevr(SB),NOSPLIT|NOFRAME,$0 + // R0 holds the save location, R12 is clobbered + MOVD $-192, R12 + STVX V20, (R0+R12) + MOVD $-176, R12 + STVX V21, (R0+R12) + MOVD $-160, R12 + STVX V22, (R0+R12) + MOVD $-144, R12 + STVX V23, (R0+R12) + MOVD $-128, R12 + STVX V24, (R0+R12) + MOVD $-112, R12 + STVX V25, (R0+R12) + MOVD $-96, R12 + STVX V26, (R0+R12) + MOVD $-80, R12 + STVX V27, (R0+R12) + MOVD $-64, R12 + STVX V28, (R0+R12) + MOVD $-48, R12 + STVX V29, (R0+R12) + MOVD $-32, R12 + STVX V30, (R0+R12) + MOVD $-16, R12 + STVX V31, (R0+R12) + RET +TEXT runtime·elf_restvr(SB),NOSPLIT|NOFRAME,$0 + // R0 holds the save location, R12 is clobbered + MOVD $-192, R12 + LVX (R0+R12), V20 + MOVD $-176, R12 + LVX (R0+R12), V21 + MOVD $-160, R12 + LVX (R0+R12), V22 + MOVD $-144, R12 + LVX (R0+R12), V23 + MOVD $-128, R12 + LVX (R0+R12), V24 + MOVD $-112, R12 + LVX (R0+R12), V25 + MOVD $-96, R12 + LVX (R0+R12), V26 + MOVD $-80, R12 + LVX (R0+R12), V27 + MOVD $-64, R12 + LVX (R0+R12), V28 + MOVD $-48, R12 + LVX (R0+R12), V29 + MOVD $-32, R12 + LVX (R0+R12), V30 + MOVD $-16, R12 + LVX (R0+R12), V31 + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/asm_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..491635b1cf1a3de3eb941e1e99ec592ed453635b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_riscv64.s @@ -0,0 +1,963 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "funcdata.h" +#include "textflag.h" + +// func rt0_go() +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // X2 = stack; A0 = argc; A1 = argv + SUB $24, X2 + MOV A0, 8(X2) // argc + MOV A1, 16(X2) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOV $runtime·g0(SB), g + MOV $(-64*1024), T0 + ADD T0, X2, T1 + MOV T1, g_stackguard0(g) + MOV T1, g_stackguard1(g) + MOV T1, (g_stack+stack_lo)(g) + MOV X2, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOV _cgo_init(SB), T0 + BEQ T0, ZERO, nocgo + + MOV ZERO, A3 // arg 3: not used + MOV ZERO, A2 // arg 2: not used + MOV $setg_gcc<>(SB), A1 // arg 1: setg + MOV g, A0 // arg 0: G + JALR RA, T0 + +nocgo: + // update stackguard after _cgo_init + MOV (g_stack+stack_lo)(g), T0 + ADD $const_stackGuard, T0 + MOV T0, g_stackguard0(g) + MOV T0, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOV $runtime·m0(SB), T0 + + // save m->g0 = g0 + MOV g, m_g0(T0) + // save m0 to g0->m + MOV T0, g_m(g) + + CALL runtime·check(SB) + + // args are already prepared + CALL runtime·args(SB) + CALL runtime·osinit(SB) + CALL runtime·schedinit(SB) + + // create a new goroutine to start program + MOV $runtime·mainPC(SB), T0 // entry + SUB $16, X2 + MOV T0, 8(X2) + MOV ZERO, 0(X2) + CALL runtime·newproc(SB) + ADD $16, X2 + + // start this M + CALL runtime·mstart(SB) + + WORD $0 // crash if reached + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + CALL runtime·mstart0(SB) + RET // not reached + +// void setg_gcc(G*); set g called from gcc with g in A0 +TEXT setg_gcc<>(SB),NOSPLIT,$0-0 + MOV A0, g + CALL runtime·save_g(SB) + RET + +// func cputicks() int64 +TEXT runtime·cputicks(SB),NOSPLIT,$0-8 + // RDTIME to emulate cpu ticks + // RDCYCLE reads counter that is per HART(core) based + // according to the riscv manual, see issue 46737 + RDTIME A0 + MOV A0, ret+0(FP) + RET + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + JALR RA, ZERO // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOV fn+0(FP), CTXT // CTXT = fn + MOV g_m(g), T0 // T0 = m + + MOV m_gsignal(T0), T1 // T1 = gsignal + BEQ g, T1, noswitch + + MOV m_g0(T0), T1 // T1 = g0 + BEQ g, T1, noswitch + + MOV m_curg(T0), T2 + BEQ g, T2, switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOV $runtime·badsystemstack(SB), T1 + JALR RA, T1 + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + CALL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOV T1, g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), T0 + MOV T0, X2 + + // call target function + MOV 0(CTXT), T1 // code pointer + JALR RA, T1 + + // switch back to g + MOV g_m(g), T0 + MOV m_curg(T0), g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), X2 + MOV ZERO, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOV 0(CTXT), T1 // code pointer + ADD $8, X2 + JMP (T1) + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOV X10, CTXT // context register + MOV g_m(g), X11 // curm + + // set g to gcrash + MOV $runtime·gcrash(SB), g // g = &gcrash + CALL runtime·save_g(SB) // clobbers X31 + MOV X11, g_m(g) // g.m = curm + MOV g, m_g0(X11) // curm.g0 = g + + // switch to crashstack + MOV (g_stack+stack_hi)(g), X11 + SUB $(4*8), X11 + MOV X11, X2 + + // call target function + MOV 0(CTXT), X10 + JALR X1, X10 + + // should never return + CALL runtime·abort(SB) + UNDEF + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Called with return address (i.e. caller's PC) in X5 (aka T0), +// and the LR register contains the caller's LR. +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. + +// func morestack() +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOV X2, (g_sched+gobuf_sp)(g) + MOV T0, (g_sched+gobuf_pc)(g) + MOV RA, (g_sched+gobuf_lr)(g) + MOV CTXT, (g_sched+gobuf_ctxt)(g) + + // Cannot grow scheduler stack (m->g0). + MOV g_m(g), A0 + MOV m_g0(A0), A1 + BNE g, A1, 3(PC) + CALL runtime·badmorestackg0(SB) + CALL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOV m_gsignal(A0), A1 + BNE g, A1, 3(PC) + CALL runtime·badmorestackgsignal(SB) + CALL runtime·abort(SB) + + // Called from f. + // Set m->morebuf to f's caller. + MOV RA, (m_morebuf+gobuf_pc)(A0) // f's caller's PC + MOV X2, (m_morebuf+gobuf_sp)(A0) // f's caller's SP + MOV g, (m_morebuf+gobuf_g)(A0) + + // Call newstack on m->g0's stack. + MOV m_g0(A0), g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), X2 + // Create a stack frame on g0 to call newstack. + MOV ZERO, -8(X2) // Zero saved LR in frame + SUB $8, X2 + CALL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +// func morestack_noctxt() +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register, and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOV X2, X2 + + MOV ZERO, CTXT + JMP runtime·morestack(SB) + +// AES hashing not implemented for riscv64 +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +// func return0() +TEXT runtime·return0(SB), NOSPLIT, $0 + MOV $0, A0 + RET + +// restore state from Gobuf; longjmp + +// func gogo(buf *gobuf) +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOV buf+0(FP), T0 + MOV gobuf_g(T0), T1 + MOV 0(T1), ZERO // make sure g != nil + JMP gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOV T1, g + CALL runtime·save_g(SB) + + MOV gobuf_sp(T0), X2 + MOV gobuf_lr(T0), RA + MOV gobuf_ret(T0), A0 + MOV gobuf_ctxt(T0), CTXT + MOV ZERO, gobuf_sp(T0) + MOV ZERO, gobuf_ret(T0) + MOV ZERO, gobuf_lr(T0) + MOV ZERO, gobuf_ctxt(T0) + MOV gobuf_pc(T0), T0 + JALR ZERO, T0 + +// func procyield(cycles uint32) +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + RET + +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. + +// func mcall(fn func(*g)) +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 + MOV X10, CTXT + + // Save caller state in g->sched + MOV X2, (g_sched+gobuf_sp)(g) + MOV RA, (g_sched+gobuf_pc)(g) + MOV ZERO, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOV g, X10 + MOV g_m(g), T1 + MOV m_g0(T1), g + CALL runtime·save_g(SB) + BNE g, X10, 2(PC) + JMP runtime·badmcall(SB) + MOV 0(CTXT), T1 // code pointer + MOV (g_sched+gobuf_sp)(g), X2 // sp = m->g0->sched.sp + // we don't need special macro for regabi since arg0(X10) = g + SUB $16, X2 + MOV X10, 8(X2) // setup g + MOV ZERO, 0(X2) // clear return address + JALR RA, T1 + JMP runtime·badmcall2(SB) + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes X31. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOV $runtime·systemstack_switch(SB), X31 + ADD $8, X31 // get past prologue + MOV X31, (g_sched+gobuf_pc)(g) + MOV X2, (g_sched+gobuf_sp)(g) + MOV ZERO, (g_sched+gobuf_lr)(g) + MOV ZERO, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOV (g_sched+gobuf_ctxt)(g), X31 + BEQ ZERO, X31, 2(PC) + CALL runtime·abort(SB) + RET + +// func asmcgocall_no_g(fn, arg unsafe.Pointer) +// Call fn(arg) aligned appropriately for the gcc ABI. +// Called on a system stack, and there may be no g yet (during needm). +TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 + MOV fn+0(FP), X5 + MOV arg+8(FP), X10 + JALR RA, (X5) + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + MOV fn+0(FP), X5 + MOV arg+8(FP), X10 + + MOV X2, X8 // save original stack pointer + MOV g, X9 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOV g_m(g), X6 + MOV m_gsignal(X6), X7 + BEQ X7, g, g0 + MOV m_g0(X6), X7 + BEQ X7, g, g0 + + CALL gosave_systemstack_switch<>(SB) + MOV X7, g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), X2 + + // Now on a scheduling stack (a pthread-created stack). +g0: + // Save room for two of our pointers. + SUB $16, X2 + MOV X9, 0(X2) // save old g on stack + MOV (g_stack+stack_hi)(X9), X9 + SUB X8, X9, X8 + MOV X8, 8(X2) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + + JALR RA, (X5) + + // Restore g, stack pointer. X10 is return value. + MOV 0(X2), g + CALL runtime·save_g(SB) + MOV (g_stack+stack_hi)(g), X5 + MOV 8(X2), X6 + SUB X6, X5, X6 + MOV X6, X2 + + MOVW X10, ret+16(FP) + RET + +// func asminit() +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOV $MAXSIZE, T1 \ + BLTU T1, T0, 3(PC) \ + MOV $NAME(SB), T2; \ + JALR ZERO, T2 +// Note: can't just "BR NAME(SB)" - bad inlining results. + +// func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +TEXT reflect·call(SB), NOSPLIT, $0-0 + JMP ·reflectcall(SB) + +// func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), T0 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOV $runtime·badreflectcall(SB), T2 + JALR ZERO, T2 + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOV stackArgs+16(FP), A1; \ + MOVWU stackArgsSize+24(FP), A2; \ + MOV X2, A3; \ + ADD $8, A3; \ + ADD A3, A2; \ + BEQ A3, A2, 6(PC); \ + MOVBU (A1), A4; \ + ADD $1, A1; \ + MOVB A4, (A3); \ + ADD $1, A3; \ + JMP -5(PC); \ + /* set up argument registers */ \ + MOV regArgs+40(FP), X25; \ + CALL ·unspillArgs(SB); \ + /* call function */ \ + MOV f+8(FP), CTXT; \ + MOV (CTXT), X25; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + JALR RA, X25; \ + /* copy return values back */ \ + MOV regArgs+40(FP), X25; \ + CALL ·spillArgs(SB); \ + MOV stackArgsType+0(FP), A5; \ + MOV stackArgs+16(FP), A1; \ + MOVWU stackArgsSize+24(FP), A2; \ + MOVWU stackRetOffset+28(FP), A4; \ + ADD $8, X2, A3; \ + ADD A4, A3; \ + ADD A4, A1; \ + SUB A4, A2; \ + CALL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS + MOV A5, 8(X2) + MOV A1, 16(X2) + MOV A3, 24(X2) + MOV A2, 32(X2) + MOV X25, 40(X2) + CALL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$8 + // g (X27) and REG_TMP (X31) might be clobbered by load_g. + // X27 is callee-save in the gcc calling convention, so save it. + MOV g, savedX27-8(SP) + + CALL runtime·load_g(SB) + MOV g_m(g), X5 + MOV m_curg(X5), X5 + MOV (g_stack+stack_hi)(X5), X10 // return value in X10 + + MOV savedX27-8(SP), g + RET + +// func goexit(neverCallThisFunction) +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + MOV ZERO, ZERO // NOP + JMP runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + MOV ZERO, ZERO // NOP + +// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOV fn+0(FP), X7 + BNE ZERO, X7, loadg + // Restore the g from frame. + MOV frame+8(FP), g + JMP dropm + +loadg: + // Load m and g from thread-local storage. + MOVBU runtime·iscgo(SB), X5 + BEQ ZERO, X5, nocgo + CALL runtime·load_g(SB) +nocgo: + + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + BEQ ZERO, g, needm + + MOV g_m(g), X5 + MOV X5, savedm-8(SP) + JMP havem + +needm: + MOV g, savedm-8(SP) // g is zero, so is m. + MOV $runtime·needAndBindM(SB), X6 + JALR RA, X6 + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOV g_m(g), X5 + MOV m_g0(X5), X6 + MOV X2, (g_sched+gobuf_sp)(X6) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 8(X2) aka savedsp-24(SP). + MOV m_g0(X5), X6 + MOV (g_sched+gobuf_sp)(X6), X7 + MOV X7, savedsp-24(SP) // must match frame size + MOV X2, (g_sched+gobuf_sp)(X6) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOV m_curg(X5), g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), X6 // prepare stack as X6 + MOV (g_sched+gobuf_pc)(g), X7 + MOV X7, -(24+8)(X6) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOV fn+0(FP), X7 + MOV frame+8(FP), X8 + MOV ctxt+16(FP), X9 + MOV $-(24+8)(X6), X2 // switch stack; must match frame size + MOV X7, 8(X2) + MOV X8, 16(X2) + MOV X9, 24(X2) + CALL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOV 0(X2), X7 + MOV X7, (g_sched+gobuf_pc)(g) + MOV $(24+8)(X2), X6 // must match frame size + MOV X6, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOV g_m(g), X5 + MOV m_g0(X5), g + CALL runtime·save_g(SB) + MOV (g_sched+gobuf_sp)(g), X2 + MOV savedsp-24(SP), X6 // must match frame size + MOV X6, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOV savedm-8(SP), X5 + BNE ZERO, X5, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOV _cgo_pthread_key_created(SB), X5 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + BEQ ZERO, X5, dropm + MOV (X5), X5 + BNE ZERO, X5, droppedm + +dropm: + MOV $runtime·dropm(SB), X6 + JALR RA, X6 +droppedm: + + // Done! + RET + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + EBREAK + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + EBREAK + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOV gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + CALL runtime·save_g(SB) + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOV $1, T0 + MOV T0, ret+0(FP) + RET + +// spillArgs stores return values from registers to a *internal/abi.RegArgs in X25. +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + MOV X10, (0*8)(X25) + MOV X11, (1*8)(X25) + MOV X12, (2*8)(X25) + MOV X13, (3*8)(X25) + MOV X14, (4*8)(X25) + MOV X15, (5*8)(X25) + MOV X16, (6*8)(X25) + MOV X17, (7*8)(X25) + MOV X8, (8*8)(X25) + MOV X9, (9*8)(X25) + MOV X18, (10*8)(X25) + MOV X19, (11*8)(X25) + MOV X20, (12*8)(X25) + MOV X21, (13*8)(X25) + MOV X22, (14*8)(X25) + MOV X23, (15*8)(X25) + MOVD F10, (16*8)(X25) + MOVD F11, (17*8)(X25) + MOVD F12, (18*8)(X25) + MOVD F13, (19*8)(X25) + MOVD F14, (20*8)(X25) + MOVD F15, (21*8)(X25) + MOVD F16, (22*8)(X25) + MOVD F17, (23*8)(X25) + MOVD F8, (24*8)(X25) + MOVD F9, (25*8)(X25) + MOVD F18, (26*8)(X25) + MOVD F19, (27*8)(X25) + MOVD F20, (28*8)(X25) + MOVD F21, (29*8)(X25) + MOVD F22, (30*8)(X25) + MOVD F23, (31*8)(X25) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in X25. +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + MOV (0*8)(X25), X10 + MOV (1*8)(X25), X11 + MOV (2*8)(X25), X12 + MOV (3*8)(X25), X13 + MOV (4*8)(X25), X14 + MOV (5*8)(X25), X15 + MOV (6*8)(X25), X16 + MOV (7*8)(X25), X17 + MOV (8*8)(X25), X8 + MOV (9*8)(X25), X9 + MOV (10*8)(X25), X18 + MOV (11*8)(X25), X19 + MOV (12*8)(X25), X20 + MOV (13*8)(X25), X21 + MOV (14*8)(X25), X22 + MOV (15*8)(X25), X23 + MOVD (16*8)(X25), F10 + MOVD (17*8)(X25), F11 + MOVD (18*8)(X25), F12 + MOVD (19*8)(X25), F13 + MOVD (20*8)(X25), F14 + MOVD (21*8)(X25), F15 + MOVD (22*8)(X25), F16 + MOVD (23*8)(X25), F17 + MOVD (24*8)(X25), F8 + MOVD (25*8)(X25), F9 + MOVD (26*8)(X25), F18 + MOVD (27*8)(X25), F19 + MOVD (28*8)(X25), F20 + MOVD (29*8)(X25), F21 + MOVD (30*8)(X25), F22 + MOVD (31*8)(X25), F23 + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in X24, and returns a pointer +// to the buffer space in X24. +// It clobbers X31 aka T6 (the linker temp register - REG_TMP). +// The act of CALLing gcWriteBarrier will clobber RA (LR). +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$208 + // Save the registers clobbered by the fast path. + MOV A0, 24*8(X2) + MOV A1, 25*8(X2) +retry: + MOV g_m(g), A0 + MOV m_p(A0), A0 + MOV (p_wbBuf+wbBuf_next)(A0), A1 + MOV (p_wbBuf+wbBuf_end)(A0), T6 // T6 is linker temp register (REG_TMP) + // Increment wbBuf.next position. + ADD X24, A1 + // Is the buffer full? + BLTU T6, A1, flush + // Commit to the larger buffer. + MOV A1, (p_wbBuf+wbBuf_next)(A0) + // Make the return value (the original next position) + SUB X24, A1, X24 + // Restore registers. + MOV 24*8(X2), A0 + MOV 25*8(X2), A1 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + MOV T0, 1*8(X2) + MOV T1, 2*8(X2) + // X0 is zero register + // X1 is LR, saved by prologue + // X2 is SP + // X3 is GP + // X4 is TP + MOV X7, 3*8(X2) + MOV X8, 4*8(X2) + MOV X9, 5*8(X2) + // X10 already saved (A0) + // X11 already saved (A1) + MOV X12, 6*8(X2) + MOV X13, 7*8(X2) + MOV X14, 8*8(X2) + MOV X15, 9*8(X2) + MOV X16, 10*8(X2) + MOV X17, 11*8(X2) + MOV X18, 12*8(X2) + MOV X19, 13*8(X2) + MOV X20, 14*8(X2) + MOV X21, 15*8(X2) + MOV X22, 16*8(X2) + MOV X23, 17*8(X2) + MOV X24, 18*8(X2) + MOV X25, 19*8(X2) + MOV X26, 20*8(X2) + // X27 is g. + MOV X28, 21*8(X2) + MOV X29, 22*8(X2) + MOV X30, 23*8(X2) + // X31 is tmp register. + + CALL runtime·wbBufFlush(SB) + + MOV 1*8(X2), T0 + MOV 2*8(X2), T1 + MOV 3*8(X2), X7 + MOV 4*8(X2), X8 + MOV 5*8(X2), X9 + MOV 6*8(X2), X12 + MOV 7*8(X2), X13 + MOV 8*8(X2), X14 + MOV 9*8(X2), X15 + MOV 10*8(X2), X16 + MOV 11*8(X2), X17 + MOV 12*8(X2), X18 + MOV 13*8(X2), X19 + MOV 14*8(X2), X20 + MOV 15*8(X2), X21 + MOV 16*8(X2), X22 + MOV 17*8(X2), X23 + MOV 18*8(X2), X24 + MOV 19*8(X2), X25 + MOV 20*8(X2), X26 + MOV 21*8(X2), X28 + MOV 22*8(X2), X29 + MOV 23*8(X2), X30 + + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOV $8, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOV $16, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOV $24, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOV $32, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOV $40, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOV $48, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOV $56, X24 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOV $64, X24 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers (ssa/gen/RISCV64Ops.go), but the space for those +// arguments are allocated in the caller's stack frame. +// These stubs write the args into that stack space and then tail call to the +// corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOV T2, X10 + MOV T3, X11 + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOV T2, X10 + MOV T3, X11 + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOV T2, X10 + MOV T3, X11 + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOV T2, X10 + MOV T3, X11 + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOV T1, X10 + MOV T2, X11 + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + MOV T0, X10 + MOV T1, X11 + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOV T2, X10 + MOV T3, X11 + JMP runtime·goPanicSliceConvert(SB) + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_s390x.s b/platform/dbops/binaries/go/go/src/runtime/asm_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..a8e1424bf1813ded69495643380b2fb0c740ae3c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_s390x.s @@ -0,0 +1,951 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +// _rt0_s390x_lib is common startup code for s390x systems when +// using -buildmode=c-archive or -buildmode=c-shared. The linker will +// arrange to invoke this function as a global constructor (for +// c-archive) or when the shared library is loaded (for c-shared). +// We expect argc and argv to be passed in the usual C ABI registers +// R2 and R3. +TEXT _rt0_s390x_lib(SB), NOSPLIT|NOFRAME, $0 + STMG R6, R15, 48(R15) + MOVD R2, _rt0_s390x_lib_argc<>(SB) + MOVD R3, _rt0_s390x_lib_argv<>(SB) + + // Save R6-R15 in the register save area of the calling function. + STMG R6, R15, 48(R15) + + // Allocate 80 bytes on the stack. + MOVD $-80(R15), R15 + + // Save F8-F15 in our stack frame. + FMOVD F8, 16(R15) + FMOVD F9, 24(R15) + FMOVD F10, 32(R15) + FMOVD F11, 40(R15) + FMOVD F12, 48(R15) + FMOVD F13, 56(R15) + FMOVD F14, 64(R15) + FMOVD F15, 72(R15) + + // Synchronous initialization. + MOVD $runtime·libpreinit(SB), R1 + BL R1 + + // Create a new thread to finish Go runtime initialization. + MOVD _cgo_sys_thread_create(SB), R1 + CMP R1, $0 + BEQ nocgo + MOVD $_rt0_s390x_lib_go(SB), R2 + MOVD $0, R3 + BL R1 + BR restore + +nocgo: + MOVD $0x800000, R1 // stacksize + MOVD R1, 0(R15) + MOVD $_rt0_s390x_lib_go(SB), R1 + MOVD R1, 8(R15) // fn + MOVD $runtime·newosproc(SB), R1 + BL R1 + +restore: + // Restore F8-F15 from our stack frame. + FMOVD 16(R15), F8 + FMOVD 24(R15), F9 + FMOVD 32(R15), F10 + FMOVD 40(R15), F11 + FMOVD 48(R15), F12 + FMOVD 56(R15), F13 + FMOVD 64(R15), F14 + FMOVD 72(R15), F15 + MOVD $80(R15), R15 + + // Restore R6-R15. + LMG 48(R15), R6, R15 + RET + +// _rt0_s390x_lib_go initializes the Go runtime. +// This is started in a separate thread by _rt0_s390x_lib. +TEXT _rt0_s390x_lib_go(SB), NOSPLIT|NOFRAME, $0 + MOVD _rt0_s390x_lib_argc<>(SB), R2 + MOVD _rt0_s390x_lib_argv<>(SB), R3 + MOVD $runtime·rt0_go(SB), R1 + BR R1 + +DATA _rt0_s390x_lib_argc<>(SB)/8, $0 +GLOBL _rt0_s390x_lib_argc<>(SB), NOPTR, $8 +DATA _rt0_s90x_lib_argv<>(SB)/8, $0 +GLOBL _rt0_s390x_lib_argv<>(SB), NOPTR, $8 + +TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer + // C TLS base pointer in AR0:AR1 + + // initialize essential registers + XOR R0, R0 + + SUB $24, R15 + MOVW R2, 8(R15) // argc + MOVD R3, 16(R15) // argv + + // create istack out of the given (operating system) stack. + // _cgo_init may update stackguard. + MOVD $runtime·g0(SB), g + MOVD R15, R11 + SUB $(64*1024), R11 + MOVD R11, g_stackguard0(g) + MOVD R11, g_stackguard1(g) + MOVD R11, (g_stack+stack_lo)(g) + MOVD R15, (g_stack+stack_hi)(g) + + // if there is a _cgo_init, call it using the gcc ABI. + MOVD _cgo_init(SB), R11 + CMPBEQ R11, $0, nocgo + MOVW AR0, R4 // (AR0 << 32 | AR1) is the TLS base pointer; MOVD is translated to EAR + SLD $32, R4, R4 + MOVW AR1, R4 // arg 2: TLS base pointer + MOVD $setg_gcc<>(SB), R3 // arg 1: setg + MOVD g, R2 // arg 0: G + // C functions expect 160 bytes of space on caller stack frame + // and an 8-byte aligned stack pointer + MOVD R15, R9 // save current stack (R9 is preserved in the Linux ABI) + SUB $160, R15 // reserve 160 bytes + MOVD $~7, R6 + AND R6, R15 // 8-byte align + BL R11 // this call clobbers volatile registers according to Linux ABI (R0-R5, R14) + MOVD R9, R15 // restore stack + XOR R0, R0 // zero R0 + +nocgo: + // update stackguard after _cgo_init + MOVD (g_stack+stack_lo)(g), R2 + ADD $const_stackGuard, R2 + MOVD R2, g_stackguard0(g) + MOVD R2, g_stackguard1(g) + + // set the per-goroutine and per-mach "registers" + MOVD $runtime·m0(SB), R2 + + // save m->g0 = g0 + MOVD g, m_g0(R2) + // save m0 to g0->m + MOVD R2, g_m(g) + + BL runtime·check(SB) + + // argc/argv are already prepared on stack + BL runtime·args(SB) + BL runtime·checkS390xCPU(SB) + BL runtime·osinit(SB) + BL runtime·schedinit(SB) + + // create a new goroutine to start program + MOVD $runtime·mainPC(SB), R2 // entry + SUB $16, R15 + MOVD R2, 8(R15) + MOVD $0, 0(R15) + BL runtime·newproc(SB) + ADD $16, R15 + + // start this M + BL runtime·mstart(SB) + + MOVD $0, 1(R0) + RET + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 + BRRK + RET + +TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 + RET + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + CALL runtime·mstart0(SB) + RET // not reached + +/* + * go-routine + */ + +// void gogo(Gobuf*) +// restore state from Gobuf; longjmp +TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVD buf+0(FP), R5 + MOVD gobuf_g(R5), R6 + MOVD 0(R6), R7 // make sure g != nil + BR gogo<>(SB) + +TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 + MOVD R6, g + BL runtime·save_g(SB) + + MOVD 0(g), R4 + MOVD gobuf_sp(R5), R15 + MOVD gobuf_lr(R5), LR + MOVD gobuf_ret(R5), R3 + MOVD gobuf_ctxt(R5), R12 + MOVD $0, gobuf_sp(R5) + MOVD $0, gobuf_ret(R5) + MOVD $0, gobuf_lr(R5) + MOVD $0, gobuf_ctxt(R5) + CMP R0, R0 // set condition codes for == test, needed by stack split + MOVD gobuf_pc(R5), R6 + BR (R6) + +// void mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT, $-8-8 + // Save caller state in g->sched + MOVD R15, (g_sched+gobuf_sp)(g) + MOVD LR, (g_sched+gobuf_pc)(g) + MOVD $0, (g_sched+gobuf_lr)(g) + + // Switch to m->g0 & its stack, call fn. + MOVD g, R3 + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + CMP g, R3 + BNE 2(PC) + BR runtime·badmcall(SB) + MOVD fn+0(FP), R12 // context + MOVD 0(R12), R4 // code pointer + MOVD (g_sched+gobuf_sp)(g), R15 // sp = m->g0->sched.sp + SUB $16, R15 + MOVD R3, 8(R15) + MOVD $0, 0(R15) + BL (R4) + BR runtime·badmcall2(SB) + +// systemstack_switch is a dummy routine that systemstack leaves at the bottom +// of the G stack. We need to distinguish the routine that +// lives at the bottom of the G stack from the one that lives +// at the top of the system stack because the one at the top of +// the system stack terminates the stack walk (see topofstack()). +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + UNDEF + BL (LR) // make sure this function is not leaf + RET + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), R3 // R3 = fn + MOVD R3, R12 // context + MOVD g_m(g), R4 // R4 = m + + MOVD m_gsignal(R4), R5 // R5 = gsignal + CMPBEQ g, R5, noswitch + + MOVD m_g0(R4), R5 // R5 = g0 + CMPBEQ g, R5, noswitch + + MOVD m_curg(R4), R6 + CMPBEQ g, R6, switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVD $runtime·badsystemstack(SB), R3 + BL (R3) + BL runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + BL gosave_systemstack_switch<>(SB) + + // switch to g0 + MOVD R5, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R15 + + // call target function + MOVD 0(R12), R3 // code pointer + BL (R3) + + // switch back to g + MOVD g_m(g), R3 + MOVD m_curg(R3), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R15 + MOVD $0, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // already on m stack, just call directly + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVD 0(R12), R3 // code pointer + MOVD 0(R15), LR // restore LR + ADD $8, R15 + BR (R3) + +/* + * support for morestack + */ + +// Called during function prolog when more stack is needed. +// Caller has already loaded: +// R3: framesize, R4: argsize, R5: LR +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Cannot grow scheduler stack (m->g0). + MOVD g_m(g), R7 + MOVD m_g0(R7), R8 + CMPBNE g, R8, 3(PC) + BL runtime·badmorestackg0(SB) + BL runtime·abort(SB) + + // Cannot grow signal stack (m->gsignal). + MOVD m_gsignal(R7), R8 + CMP g, R8 + BNE 3(PC) + BL runtime·badmorestackgsignal(SB) + BL runtime·abort(SB) + + // Called from f. + // Set g->sched to context in f. + MOVD R15, (g_sched+gobuf_sp)(g) + MOVD LR, R8 + MOVD R8, (g_sched+gobuf_pc)(g) + MOVD R5, (g_sched+gobuf_lr)(g) + MOVD R12, (g_sched+gobuf_ctxt)(g) + + // Called from f. + // Set m->morebuf to f's caller. + MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVD R15, (m_morebuf+gobuf_sp)(R7) // f's caller's SP + MOVD g, (m_morebuf+gobuf_g)(R7) + + // Call newstack on m->g0's stack. + MOVD m_g0(R7), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R15 + // Create a stack frame on g0 to call newstack. + MOVD $0, -8(R15) // Zero saved LR in frame + SUB $8, R15 + BL runtime·newstack(SB) + + // Not reached, but make sure the return PC from the call to newstack + // is still in this function, and not the beginning of the next. + UNDEF + +TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 + // Force SPWRITE. This function doesn't actually write SP, + // but it is called with a special calling convention where + // the caller doesn't save LR on stack but passes it as a + // register (R5), and the unwinder currently doesn't understand. + // Make it SPWRITE to stop unwinding. (See issue 54332) + MOVD R15, R15 + + MOVD $0, R12 + BR runtime·morestack(SB) + +// reflectcall: call a function with the given argument list +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +// we don't have variable-sized frames, so we use a small number +// of constant-sized-frame functions to encode a few bits of size in the pc. +// Caution: ugly multiline assembly macros in your future! + +#define DISPATCH(NAME,MAXSIZE) \ + MOVD $MAXSIZE, R4; \ + CMP R3, R4; \ + BGT 3(PC); \ + MOVD $NAME(SB), R5; \ + BR (R5) +// Note: can't just "BR NAME(SB)" - bad inlining results. + +TEXT ·reflectcall(SB), NOSPLIT, $-8-48 + MOVWZ frameSize+32(FP), R3 + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + MOVD $runtime·badreflectcall(SB), R5 + BR (R5) + +#define CALLFN(NAME,MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + /* copy arguments to stack */ \ + MOVD stackArgs+16(FP), R4; \ + MOVWZ stackArgsSize+24(FP), R5; \ + MOVD $stack-MAXSIZE(SP), R6; \ +loopArgs: /* copy 256 bytes at a time */ \ + CMP R5, $256; \ + BLT tailArgs; \ + SUB $256, R5; \ + MVC $256, 0(R4), 0(R6); \ + MOVD $256(R4), R4; \ + MOVD $256(R6), R6; \ + BR loopArgs; \ +tailArgs: /* copy remaining bytes */ \ + CMP R5, $0; \ + BEQ callFunction; \ + SUB $1, R5; \ + EXRL $callfnMVC<>(SB), R5; \ +callFunction: \ + MOVD f+8(FP), R12; \ + MOVD (R12), R8; \ + PCDATA $PCDATA_StackMapIndex, $0; \ + BL (R8); \ + /* copy return values back */ \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R6; \ + MOVWZ stackArgsSize+24(FP), R5; \ + MOVD $stack-MAXSIZE(SP), R4; \ + MOVWZ stackRetOffset+28(FP), R1; \ + ADD R1, R4; \ + ADD R1, R6; \ + SUB R1, R5; \ + BL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + MOVD R7, 8(R15) + MOVD R6, 16(R15) + MOVD R4, 24(R15) + MOVD R5, 32(R15) + MOVD $0, 40(R15) + BL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +// Not a function: target for EXRL (execute relative long) instruction. +TEXT callfnMVC<>(SB),NOSPLIT|NOFRAME,$0-0 + MVC $1, 0(R4), 0(R6) + +TEXT runtime·procyield(SB),NOSPLIT,$0-0 + RET + +// Save state of caller into g->sched, +// but using fake PC from systemstack_switch. +// Must only be called from functions with no locals ($0) +// or else unwinding from systemstack_switch is incorrect. +// Smashes R1. +TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 + MOVD $runtime·systemstack_switch(SB), R1 + ADD $16, R1 // get past prologue + MOVD R1, (g_sched+gobuf_pc)(g) + MOVD R15, (g_sched+gobuf_sp)(g) + MOVD $0, (g_sched+gobuf_lr)(g) + MOVD $0, (g_sched+gobuf_ret)(g) + // Assert ctxt is zero. See func save. + MOVD (g_sched+gobuf_ctxt)(g), R1 + CMPBEQ R1, $0, 2(PC) + BL runtime·abort(SB) + RET + +// func asmcgocall(fn, arg unsafe.Pointer) int32 +// Call fn(arg) on the scheduler stack, +// aligned appropriately for the gcc ABI. +// See cgocall.go for more details. +TEXT ·asmcgocall(SB),NOSPLIT,$0-20 + // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer + // C TLS base pointer in AR0:AR1 + MOVD fn+0(FP), R3 + MOVD arg+8(FP), R4 + + MOVD R15, R2 // save original stack pointer + MOVD g, R5 + + // Figure out if we need to switch to m->g0 stack. + // We get called to create new OS threads too, and those + // come in on the m->g0 stack already. Or we might already + // be on the m->gsignal stack. + MOVD g_m(g), R6 + MOVD m_gsignal(R6), R7 + CMPBEQ R7, g, g0 + MOVD m_g0(R6), R7 + CMPBEQ R7, g, g0 + BL gosave_systemstack_switch<>(SB) + MOVD R7, g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R15 + + // Now on a scheduling stack (a pthread-created stack). +g0: + // Save room for two of our pointers, plus 160 bytes of callee + // save area that lives on the caller stack. + SUB $176, R15 + MOVD $~7, R6 + AND R6, R15 // 8-byte alignment for gcc ABI + MOVD R5, 168(R15) // save old g on stack + MOVD (g_stack+stack_hi)(R5), R5 + SUB R2, R5 + MOVD R5, 160(R15) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) + MOVD $0, 0(R15) // clear back chain pointer (TODO can we give it real back trace information?) + MOVD R4, R2 // arg in R2 + BL R3 // can clobber: R0-R5, R14, F0-F3, F5, F7-F15 + + XOR R0, R0 // set R0 back to 0. + // Restore g, stack pointer. + MOVD 168(R15), g + BL runtime·save_g(SB) + MOVD (g_stack+stack_hi)(g), R5 + MOVD 160(R15), R6 + SUB R6, R5 + MOVD R5, R15 + + MOVW R2, ret+16(FP) + RET + +// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) +// See cgocall.go for more details. +TEXT ·cgocallback(SB),NOSPLIT,$24-24 + NO_LOCAL_POINTERS + + // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g. + // It is used to dropm while thread is exiting. + MOVD fn+0(FP), R1 + CMPBNE R1, $0, loadg + // Restore the g from frame. + MOVD frame+8(FP), g + BR dropm + +loadg: + // Load m and g from thread-local storage. + MOVB runtime·iscgo(SB), R3 + CMPBEQ R3, $0, nocgo + BL runtime·load_g(SB) + +nocgo: + // If g is nil, Go did not create the current thread, + // or if this thread never called into Go on pthread platforms. + // Call needm to obtain one for temporary use. + // In this case, we're running on the thread stack, so there's + // lots of space, but the linker doesn't know. Hide the call from + // the linker analysis by using an indirect call. + CMPBEQ g, $0, needm + + MOVD g_m(g), R8 + MOVD R8, savedm-8(SP) + BR havem + +needm: + MOVD g, savedm-8(SP) // g is zero, so is m. + MOVD $runtime·needAndBindM(SB), R3 + BL (R3) + + // Set m->sched.sp = SP, so that if a panic happens + // during the function we are about to execute, it will + // have a valid SP to run on the g0 stack. + // The next few lines (after the havem label) + // will save this SP onto the stack and then write + // the same SP back to m->sched.sp. That seems redundant, + // but if an unrecovered panic happens, unwindm will + // restore the g->sched.sp from the stack location + // and then systemstack will try to use it. If we don't set it here, + // that restored SP will be uninitialized (typically 0) and + // will not be usable. + MOVD g_m(g), R8 + MOVD m_g0(R8), R3 + MOVD R15, (g_sched+gobuf_sp)(R3) + +havem: + // Now there's a valid m, and we're running on its m->g0. + // Save current m->g0->sched.sp on stack and then set it to SP. + // Save current sp in m->g0->sched.sp in preparation for + // switch back to m->curg stack. + // NOTE: unwindm knows that the saved g->sched.sp is at 8(R1) aka savedsp-16(SP). + MOVD m_g0(R8), R3 + MOVD (g_sched+gobuf_sp)(R3), R4 + MOVD R4, savedsp-24(SP) // must match frame size + MOVD R15, (g_sched+gobuf_sp)(R3) + + // Switch to m->curg stack and call runtime.cgocallbackg. + // Because we are taking over the execution of m->curg + // but *not* resuming what had been running, we need to + // save that information (m->curg->sched) so we can restore it. + // We can restore m->curg->sched.sp easily, because calling + // runtime.cgocallbackg leaves SP unchanged upon return. + // To save m->curg->sched.pc, we push it onto the curg stack and + // open a frame the same size as cgocallback's g0 frame. + // Once we switch to the curg stack, the pushed PC will appear + // to be the return PC of cgocallback, so that the traceback + // will seamlessly trace back into the earlier calls. + MOVD m_curg(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 + MOVD (g_sched+gobuf_pc)(g), R5 + MOVD R5, -(24+8)(R4) // "saved LR"; must match frame size + // Gather our arguments into registers. + MOVD fn+0(FP), R1 + MOVD frame+8(FP), R2 + MOVD ctxt+16(FP), R3 + MOVD $-(24+8)(R4), R15 // switch stack; must match frame size + MOVD R1, 8(R15) + MOVD R2, 16(R15) + MOVD R3, 24(R15) + BL runtime·cgocallbackg(SB) + + // Restore g->sched (== m->curg->sched) from saved values. + MOVD 0(R15), R5 + MOVD R5, (g_sched+gobuf_pc)(g) + MOVD $(24+8)(R15), R4 // must match frame size + MOVD R4, (g_sched+gobuf_sp)(g) + + // Switch back to m->g0's stack and restore m->g0->sched.sp. + // (Unlike m->curg, the g0 goroutine never uses sched.pc, + // so we do not have to restore it.) + MOVD g_m(g), R8 + MOVD m_g0(R8), g + BL runtime·save_g(SB) + MOVD (g_sched+gobuf_sp)(g), R15 + MOVD savedsp-24(SP), R4 // must match frame size + MOVD R4, (g_sched+gobuf_sp)(g) + + // If the m on entry was nil, we called needm above to borrow an m, + // 1. for the duration of the call on non-pthread platforms, + // 2. or the duration of the C thread alive on pthread platforms. + // If the m on entry wasn't nil, + // 1. the thread might be a Go thread, + // 2. or it wasn't the first call from a C thread on pthread platforms, + // since then we skip dropm to reuse the m in the first call. + MOVD savedm-8(SP), R6 + CMPBNE R6, $0, droppedm + + // Skip dropm to reuse it in the next call, when a pthread key has been created. + MOVD _cgo_pthread_key_created(SB), R6 + // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm. + CMPBEQ R6, $0, dropm + MOVD (R6), R6 + CMPBNE R6, $0, droppedm + +dropm: + MOVD $runtime·dropm(SB), R3 + BL (R3) +droppedm: + + // Done! + RET + +// void setg(G*); set g. for use by needm. +TEXT runtime·setg(SB), NOSPLIT, $0-8 + MOVD gg+0(FP), g + // This only happens if iscgo, so jump straight to save_g + BL runtime·save_g(SB) + RET + +// void setg_gcc(G*); set g in C TLS. +// Must obey the gcc calling convention. +TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 + // The standard prologue clobbers LR (R14), which is callee-save in + // the C ABI, so we have to use NOFRAME and save LR ourselves. + MOVD LR, R1 + // Also save g, R10, and R11 since they're callee-save in C ABI + MOVD R10, R3 + MOVD g, R4 + MOVD R11, R5 + + MOVD R2, g + BL runtime·save_g(SB) + + MOVD R5, R11 + MOVD R4, g + MOVD R3, R10 + MOVD R1, LR + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + MOVW (R0), R0 + UNDEF + +// int64 runtime·cputicks(void) +TEXT runtime·cputicks(SB),NOSPLIT,$0-8 + // The TOD clock on s390 counts from the year 1900 in ~250ps intervals. + // This means that since about 1972 the msb has been set, making the + // result of a call to STORE CLOCK (stck) a negative number. + // We clear the msb to make it positive. + STCK ret+0(FP) // serialises before and after call + MOVD ret+0(FP), R3 // R3 will wrap to 0 in the year 2043 + SLD $1, R3 + SRD $1, R3 + MOVD R3, ret+0(FP) + RET + +// AES hashing not implemented for s390x +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB), NOSPLIT, $0 + MOVW $0, R3 + RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0 + // g (R13), R10, R11 and LR (R14) are callee-save in the C ABI, so save them + MOVD g, R1 + MOVD R10, R3 + MOVD LR, R4 + MOVD R11, R5 + + BL runtime·load_g(SB) // clobbers g (R13), R10, R11 + MOVD g_m(g), R2 + MOVD m_curg(R2), R2 + MOVD (g_stack+stack_hi)(R2), R2 + + MOVD R1, g + MOVD R3, R10 + MOVD R4, LR + MOVD R5, R11 + RET + +// The top-most function running on a goroutine +// returns to goexit+PCQuantum. +TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 + BYTE $0x07; BYTE $0x00; // 2-byte nop + BL runtime·goexit1(SB) // does not return + // traceback from goexit1 must hit code range of goexit + BYTE $0x07; BYTE $0x00; // 2-byte nop + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + // Stores are already ordered on s390x, so this is just a + // compile barrier. + RET + +// This is called from .init_array and follows the platform, not Go, ABI. +// We are overly conservative. We could only save the registers we use. +// However, since this function is only called once per loaded module +// performance is unimportant. +TEXT runtime·addmoduledata(SB),NOSPLIT|NOFRAME,$0-0 + // Save R6-R15 in the register save area of the calling function. + // Don't bother saving F8-F15 as we aren't doing any calls. + STMG R6, R15, 48(R15) + + // append the argument (passed in R2, as per the ELF ABI) to the + // moduledata linked list. + MOVD runtime·lastmoduledatap(SB), R1 + MOVD R2, moduledata_next(R1) + MOVD R2, runtime·lastmoduledatap(SB) + + // Restore R6-R15. + LMG 48(R15), R6, R15 + RET + +TEXT ·checkASM(SB),NOSPLIT,$0-1 + MOVB $1, ret+0(FP) + RET + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed in R9, and returns a pointer +// to the buffer space in R9. +// It clobbers R10 (the temp register) and R1 (used by PLT stub). +// It does not clobber any other general-purpose registers, +// but may clobber others (e.g., floating point registers). +TEXT gcWriteBarrier<>(SB),NOSPLIT,$96 + // Save the registers clobbered by the fast path. + MOVD R4, 96(R15) +retry: + MOVD g_m(g), R1 + MOVD m_p(R1), R1 + // Increment wbBuf.next position. + MOVD R9, R4 + ADD (p_wbBuf+wbBuf_next)(R1), R4 + // Is the buffer full? + MOVD (p_wbBuf+wbBuf_end)(R1), R10 + CMPUBGT R4, R10, flush + // Commit to the larger buffer. + MOVD R4, (p_wbBuf+wbBuf_next)(R1) + // Make return value (the original next position) + SUB R9, R4, R9 + // Restore registers. + MOVD 96(R15), R4 + RET + +flush: + // Save all general purpose registers since these could be + // clobbered by wbBufFlush and were not saved by the caller. + STMG R2, R3, 8(R15) + MOVD R0, 24(R15) + // R1 already saved. + // R4 already saved. + STMG R5, R12, 32(R15) // save R5 - R12 + // R13 is g. + // R14 is LR. + // R15 is SP. + + CALL runtime·wbBufFlush(SB) + + LMG 8(R15), R2, R3 // restore R2 - R3 + MOVD 24(R15), R0 // restore R0 + LMG 32(R15), R5, R12 // restore R5 - R12 + JMP retry + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + MOVD $8, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + MOVD $16, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + MOVD $24, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + MOVD $32, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + MOVD $40, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + MOVD $48, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + MOVD $56, R9 + JMP gcWriteBarrier<>(SB) +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + MOVD $64, R9 + JMP gcWriteBarrier<>(SB) + +// Note: these functions use a special calling convention to save generated code space. +// Arguments are passed in registers, but the space for those arguments are allocated +// in the caller's stack frame. These stubs write the args into that stack space and +// then tail call to the corresponding runtime handler. +// The tail call makes these stubs disappear in backtraces. +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 + MOVD R2, x+0(FP) + MOVD R3, y+8(FP) + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 + MOVD R2, x+0(FP) + MOVD R3, y+8(FP) + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 + MOVD R2, x+0(FP) + MOVD R3, y+8(FP) + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 + MOVD R2, x+0(FP) + MOVD R3, y+8(FP) + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 + MOVD R1, x+0(FP) + MOVD R2, y+8(FP) + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 + MOVD R0, x+0(FP) + MOVD R1, y+8(FP) + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 + MOVD R2, x+0(FP) + MOVD R3, y+8(FP) + JMP runtime·goPanicSliceConvert(SB) diff --git a/platform/dbops/binaries/go/go/src/runtime/asm_wasm.s b/platform/dbops/binaries/go/go/src/runtime/asm_wasm.s new file mode 100644 index 0000000000000000000000000000000000000000..b44a4f7dd4983d0319d08ceb1b953f594a851187 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/asm_wasm.s @@ -0,0 +1,558 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "funcdata.h" +#include "textflag.h" + +TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME|TOPFRAME, $0 + // save m->g0 = g0 + MOVD $runtime·g0(SB), runtime·m0+m_g0(SB) + // save m0 to g0->m + MOVD $runtime·m0(SB), runtime·g0+g_m(SB) + // set g to g0 + MOVD $runtime·g0(SB), g + CALLNORESUME runtime·check(SB) +#ifdef GOOS_js + CALLNORESUME runtime·args(SB) +#endif + CALLNORESUME runtime·osinit(SB) + CALLNORESUME runtime·schedinit(SB) + MOVD $runtime·mainPC(SB), 0(SP) + CALLNORESUME runtime·newproc(SB) + CALL runtime·mstart(SB) // WebAssembly stack will unwind when switching to another goroutine + UNDEF + +TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + CALL runtime·mstart0(SB) + RET // not reached + +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +GLOBL runtime·mainPC(SB),RODATA,$8 + +// func checkASM() bool +TEXT ·checkASM(SB), NOSPLIT, $0-1 + MOVB $1, ret+0(FP) + RET + +TEXT runtime·gogo(SB), NOSPLIT, $0-8 + MOVD buf+0(FP), R0 + MOVD gobuf_g(R0), R1 + MOVD 0(R1), R2 // make sure g != nil + MOVD R1, g + MOVD gobuf_sp(R0), SP + + // Put target PC at -8(SP), wasm_pc_f_loop will pick it up + Get SP + I32Const $8 + I32Sub + I64Load gobuf_pc(R0) + I64Store $0 + + MOVD gobuf_ret(R0), RET0 + MOVD gobuf_ctxt(R0), CTXT + // clear to help garbage collector + MOVD $0, gobuf_sp(R0) + MOVD $0, gobuf_ret(R0) + MOVD $0, gobuf_ctxt(R0) + + I32Const $1 + Return + +// func mcall(fn func(*g)) +// Switch to m->g0's stack, call fn(g). +// Fn must never return. It should gogo(&g->sched) +// to keep running g. +TEXT runtime·mcall(SB), NOSPLIT, $0-8 + // CTXT = fn + MOVD fn+0(FP), CTXT + // R1 = g.m + MOVD g_m(g), R1 + // R2 = g0 + MOVD m_g0(R1), R2 + + // save state in g->sched + MOVD 0(SP), g_sched+gobuf_pc(g) // caller's PC + MOVD $fn+0(FP), g_sched+gobuf_sp(g) // caller's SP + + // if g == g0 call badmcall + Get g + Get R2 + I64Eq + If + JMP runtime·badmcall(SB) + End + + // switch to g0's stack + I64Load (g_sched+gobuf_sp)(R2) + I64Const $8 + I64Sub + I32WrapI64 + Set SP + + // set arg to current g + MOVD g, 0(SP) + + // switch to g0 + MOVD R2, g + + // call fn + Get CTXT + I32WrapI64 + I64Load $0 + CALL + + Get SP + I32Const $8 + I32Add + Set SP + + JMP runtime·badmcall2(SB) + +// func systemstack(fn func()) +TEXT runtime·systemstack(SB), NOSPLIT, $0-8 + // R0 = fn + MOVD fn+0(FP), R0 + // R1 = g.m + MOVD g_m(g), R1 + // R2 = g0 + MOVD m_g0(R1), R2 + + // if g == g0 + Get g + Get R2 + I64Eq + If + // no switch: + MOVD R0, CTXT + + Get CTXT + I32WrapI64 + I64Load $0 + JMP + End + + // if g != m.curg + Get g + I64Load m_curg(R1) + I64Ne + If + CALLNORESUME runtime·badsystemstack(SB) + CALLNORESUME runtime·abort(SB) + End + + // switch: + + // save state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + MOVD $runtime·systemstack_switch(SB), g_sched+gobuf_pc(g) + + MOVD SP, g_sched+gobuf_sp(g) + + // switch to g0 + MOVD R2, g + + // make it look like mstart called systemstack on g0, to stop traceback + I64Load (g_sched+gobuf_sp)(R2) + I64Const $8 + I64Sub + Set R3 + + MOVD $runtime·mstart(SB), 0(R3) + MOVD R3, SP + + // call fn + MOVD R0, CTXT + + Get CTXT + I32WrapI64 + I64Load $0 + CALL + + // switch back to g + MOVD g_m(g), R1 + MOVD m_curg(R1), R2 + MOVD R2, g + MOVD g_sched+gobuf_sp(R2), SP + MOVD $0, g_sched+gobuf_sp(R2) + RET + +TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 + RET + +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + UNDEF + +// AES hashing not implemented for wasm +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) + +TEXT runtime·return0(SB), NOSPLIT, $0-0 + MOVD $0, RET0 + RET + +TEXT runtime·asminit(SB), NOSPLIT, $0-0 + // No per-thread init. + RET + +TEXT ·publicationBarrier(SB), NOSPLIT, $0-0 + RET + +TEXT runtime·procyield(SB), NOSPLIT, $0-0 // FIXME + RET + +TEXT runtime·breakpoint(SB), NOSPLIT, $0-0 + UNDEF + +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), CTXT // context register + MOVD g_m(g), R2 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + MOVD R2, g_m(g) // g.m = curm + MOVD g, m_g0(R2) // curm.g0 = g + + // switch to crashstack + I64Load (g_stack+stack_hi)(g) + I64Const $(-4*8) + I64Add + I32WrapI64 + Set SP + + // call target function + Get CTXT + I32WrapI64 + I64Load $0 + CALL + + // should never return + CALL runtime·abort(SB) + UNDEF + +// Called during function prolog when more stack is needed. +// +// The traceback routines see morestack on a g0 as being +// the top of a stack (for example, morestack calling newstack +// calling the scheduler calling newm calling gc), so we must +// record an argument size. For that purpose, it has no arguments. +TEXT runtime·morestack(SB), NOSPLIT, $0-0 + // R1 = g.m + MOVD g_m(g), R1 + + // R2 = g0 + MOVD m_g0(R1), R2 + + // Set g->sched to context in f. + NOP SP // tell vet SP changed - stop checking offsets + MOVD 0(SP), g_sched+gobuf_pc(g) + MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP + MOVD CTXT, g_sched+gobuf_ctxt(g) + + // Cannot grow scheduler stack (m->g0). + Get g + Get R2 + I64Eq + If + CALLNORESUME runtime·badmorestackg0(SB) + CALLNORESUME runtime·abort(SB) + End + + // Cannot grow signal stack (m->gsignal). + Get g + I64Load m_gsignal(R1) + I64Eq + If + CALLNORESUME runtime·badmorestackgsignal(SB) + CALLNORESUME runtime·abort(SB) + End + + // Called from f. + // Set m->morebuf to f's caller. + MOVD 8(SP), m_morebuf+gobuf_pc(R1) + MOVD $16(SP), m_morebuf+gobuf_sp(R1) // f's caller's SP + MOVD g, m_morebuf+gobuf_g(R1) + + // Call newstack on m->g0's stack. + MOVD R2, g + MOVD g_sched+gobuf_sp(R2), SP + CALL runtime·newstack(SB) + UNDEF // crash if newstack returns + +// morestack but not preserving ctxt. +TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 + MOVD $0, CTXT + JMP runtime·morestack(SB) + +TEXT ·asmcgocall(SB), NOSPLIT, $0-0 + UNDEF + +#define DISPATCH(NAME, MAXSIZE) \ + Get R0; \ + I64Const $MAXSIZE; \ + I64LeU; \ + If; \ + JMP NAME(SB); \ + End + +TEXT ·reflectcall(SB), NOSPLIT, $0-48 + I64Load fn+8(FP) + I64Eqz + If + CALLNORESUME runtime·sigpanic(SB) + End + + MOVW frameSize+32(FP), R0 + + DISPATCH(runtime·call16, 16) + DISPATCH(runtime·call32, 32) + DISPATCH(runtime·call64, 64) + DISPATCH(runtime·call128, 128) + DISPATCH(runtime·call256, 256) + DISPATCH(runtime·call512, 512) + DISPATCH(runtime·call1024, 1024) + DISPATCH(runtime·call2048, 2048) + DISPATCH(runtime·call4096, 4096) + DISPATCH(runtime·call8192, 8192) + DISPATCH(runtime·call16384, 16384) + DISPATCH(runtime·call32768, 32768) + DISPATCH(runtime·call65536, 65536) + DISPATCH(runtime·call131072, 131072) + DISPATCH(runtime·call262144, 262144) + DISPATCH(runtime·call524288, 524288) + DISPATCH(runtime·call1048576, 1048576) + DISPATCH(runtime·call2097152, 2097152) + DISPATCH(runtime·call4194304, 4194304) + DISPATCH(runtime·call8388608, 8388608) + DISPATCH(runtime·call16777216, 16777216) + DISPATCH(runtime·call33554432, 33554432) + DISPATCH(runtime·call67108864, 67108864) + DISPATCH(runtime·call134217728, 134217728) + DISPATCH(runtime·call268435456, 268435456) + DISPATCH(runtime·call536870912, 536870912) + DISPATCH(runtime·call1073741824, 1073741824) + JMP runtime·badreflectcall(SB) + +#define CALLFN(NAME, MAXSIZE) \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ + NO_LOCAL_POINTERS; \ + MOVW stackArgsSize+24(FP), R0; \ + \ + Get R0; \ + I64Eqz; \ + Not; \ + If; \ + Get SP; \ + I64Load stackArgs+16(FP); \ + I32WrapI64; \ + I64Load stackArgsSize+24(FP); \ + I32WrapI64; \ + MemoryCopy; \ + End; \ + \ + MOVD f+8(FP), CTXT; \ + Get CTXT; \ + I32WrapI64; \ + I64Load $0; \ + CALL; \ + \ + I64Load32U stackRetOffset+28(FP); \ + Set R0; \ + \ + MOVD stackArgsType+0(FP), RET0; \ + \ + I64Load stackArgs+16(FP); \ + Get R0; \ + I64Add; \ + Set RET1; \ + \ + Get SP; \ + I64ExtendI32U; \ + Get R0; \ + I64Add; \ + Set RET2; \ + \ + I64Load32U stackArgsSize+24(FP); \ + Get R0; \ + I64Sub; \ + Set RET3; \ + \ + CALL callRet<>(SB); \ + RET + +// callRet copies return values back at the end of call*. This is a +// separate function so it can allocate stack space for the arguments +// to reflectcallmove. It does not follow the Go ABI; it expects its +// arguments in registers. +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS + MOVD RET0, 0(SP) + MOVD RET1, 8(SP) + MOVD RET2, 16(SP) + MOVD RET3, 24(SP) + MOVD $0, 32(SP) + CALL runtime·reflectcallmove(SB) + RET + +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) + +TEXT runtime·goexit(SB), NOSPLIT|TOPFRAME, $0-0 + NOP // first PC of goexit is skipped + CALL runtime·goexit1(SB) // does not return + UNDEF + +TEXT runtime·cgocallback(SB), NOSPLIT, $0-24 + UNDEF + +// gcWriteBarrier informs the GC about heap pointer writes. +// +// gcWriteBarrier does NOT follow the Go ABI. It accepts the +// number of bytes of buffer needed as a wasm argument +// (put on the TOS by the caller, lives in local R0 in this body) +// and returns a pointer to the buffer space as a wasm result +// (left on the TOS in this body, appears on the wasm stack +// in the caller). +TEXT gcWriteBarrier<>(SB), NOSPLIT, $0 + Loop + // R3 = g.m + MOVD g_m(g), R3 + // R4 = p + MOVD m_p(R3), R4 + // R5 = wbBuf.next + MOVD p_wbBuf+wbBuf_next(R4), R5 + + // Increment wbBuf.next + Get R5 + Get R0 + I64Add + Set R5 + + // Is the buffer full? + Get R5 + I64Load (p_wbBuf+wbBuf_end)(R4) + I64LeU + If + // Commit to the larger buffer. + MOVD R5, p_wbBuf+wbBuf_next(R4) + + // Make return value (the original next position) + Get R5 + Get R0 + I64Sub + + Return + End + + // Flush + CALLNORESUME runtime·wbBufFlush(SB) + + // Retry + Br $0 + End + +TEXT runtime·gcWriteBarrier1(SB),NOSPLIT,$0 + I64Const $8 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier2(SB),NOSPLIT,$0 + I64Const $16 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier3(SB),NOSPLIT,$0 + I64Const $24 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier4(SB),NOSPLIT,$0 + I64Const $32 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier5(SB),NOSPLIT,$0 + I64Const $40 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier6(SB),NOSPLIT,$0 + I64Const $48 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier7(SB),NOSPLIT,$0 + I64Const $56 + Call gcWriteBarrier<>(SB) + Return +TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 + I64Const $64 + Call gcWriteBarrier<>(SB) + Return + +TEXT wasm_pc_f_loop(SB),NOSPLIT,$0 +// Call the function for the current PC_F. Repeat until PAUSE != 0 indicates pause or exit. +// The WebAssembly stack may unwind, e.g. when switching goroutines. +// The Go stack on the linear memory is then used to jump to the correct functions +// with this loop, without having to restore the full WebAssembly stack. +// It is expected to have a pending call before entering the loop, so check PAUSE first. + Get PAUSE + I32Eqz + If + loop: + Loop + // Get PC_B & PC_F from -8(SP) + Get SP + I32Const $8 + I32Sub + I32Load16U $0 // PC_B + + Get SP + I32Const $8 + I32Sub + I32Load16U $2 // PC_F + + CallIndirect $0 + Drop + + Get PAUSE + I32Eqz + BrIf loop + End + End + + I32Const $0 + Set PAUSE + + Return + +TEXT wasm_export_lib(SB),NOSPLIT,$0 + UNDEF diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_arm64.s b/platform/dbops/binaries/go/go/src/runtime/atomic_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..21b4d8ccd5951c9b0452a848ad0616b3683715d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_arm64.s @@ -0,0 +1,9 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + DMB $0xe // DMB ST + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_loong64.s b/platform/dbops/binaries/go/go/src/runtime/atomic_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..4818a827de4d20a90e1e8d7bf55e4085c100ba52 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_loong64.s @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + DBAR + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_mips64x.s b/platform/dbops/binaries/go/go/src/runtime/atomic_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..dd6380ce409e51e4b7076e060a0dd244a1ea70de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_mips64x.s @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "textflag.h" + +#define SYNC WORD $0xf + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + SYNC + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_mipsx.s b/platform/dbops/binaries/go/go/src/runtime/atomic_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..ac255fe7e626382265c4f040c32c62916177d818 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_mipsx.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" + +TEXT ·publicationBarrier(SB),NOSPLIT,$0 + SYNC + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_pointer.go b/platform/dbops/binaries/go/go/src/runtime/atomic_pointer.go new file mode 100644 index 0000000000000000000000000000000000000000..b61bf0b8b2a193fb0a8fdb15c6808a183b77646f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_pointer.go @@ -0,0 +1,114 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/goexperiment" + "runtime/internal/atomic" + "unsafe" +) + +// These functions cannot have go:noescape annotations, +// because while ptr does not escape, new does. +// If new is marked as not escaping, the compiler will make incorrect +// escape analysis decisions about the pointer value being stored. + +// atomicwb performs a write barrier before an atomic pointer write. +// The caller should guard the call with "if writeBarrier.enabled". +// +//go:nosplit +func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) { + slot := (*uintptr)(unsafe.Pointer(ptr)) + buf := getg().m.p.ptr().wbBuf.get2() + buf[0] = *slot + buf[1] = uintptr(new) +} + +// atomicstorep performs *ptr = new atomically and invokes a write barrier. +// +//go:nosplit +func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { + if writeBarrier.enabled { + atomicwb((*unsafe.Pointer)(ptr), new) + } + if goexperiment.CgoCheck2 { + cgoCheckPtrWrite((*unsafe.Pointer)(ptr), new) + } + atomic.StorepNoWB(noescape(ptr), new) +} + +// atomic_storePointer is the implementation of runtime/internal/UnsafePointer.Store +// (like StoreNoWB but with the write barrier). +// +//go:nosplit +//go:linkname atomic_storePointer runtime/internal/atomic.storePointer +func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { + atomicstorep(unsafe.Pointer(ptr), new) +} + +// atomic_casPointer is the implementation of runtime/internal/UnsafePointer.CompareAndSwap +// (like CompareAndSwapNoWB but with the write barrier). +// +//go:nosplit +//go:linkname atomic_casPointer runtime/internal/atomic.casPointer +func atomic_casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { + if writeBarrier.enabled { + atomicwb(ptr, new) + } + if goexperiment.CgoCheck2 { + cgoCheckPtrWrite(ptr, new) + } + return atomic.Casp1(ptr, old, new) +} + +// Like above, but implement in terms of sync/atomic's uintptr operations. +// We cannot just call the runtime routines, because the race detector expects +// to be able to intercept the sync/atomic forms but not the runtime forms. + +//go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr +func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) + +//go:linkname sync_atomic_StorePointer sync/atomic.StorePointer +//go:nosplit +func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { + if writeBarrier.enabled { + atomicwb(ptr, new) + } + if goexperiment.CgoCheck2 { + cgoCheckPtrWrite(ptr, new) + } + sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) +} + +//go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr +func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr + +//go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer +//go:nosplit +func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { + if writeBarrier.enabled { + atomicwb(ptr, new) + } + if goexperiment.CgoCheck2 { + cgoCheckPtrWrite(ptr, new) + } + old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new))) + return old +} + +//go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr +func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool + +//go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer +//go:nosplit +func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { + if writeBarrier.enabled { + atomicwb(ptr, new) + } + if goexperiment.CgoCheck2 { + cgoCheckPtrWrite(ptr, new) + } + return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new)) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_ppc64x.s b/platform/dbops/binaries/go/go/src/runtime/atomic_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..4742b6cf56789de50d90b5960f16301e06899482 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_ppc64x.s @@ -0,0 +1,14 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + // LWSYNC is the "export" barrier recommended by Power ISA + // v2.07 book II, appendix B.2.2.2. + // LWSYNC is a load/load, load/store, and store/store barrier. + LWSYNC + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/atomic_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/atomic_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..544a7c59727239f92a4637546f269f9738f9589c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/atomic_riscv64.s @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func publicationBarrier() +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + FENCE + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/auxv_none.go b/platform/dbops/binaries/go/go/src/runtime/auxv_none.go new file mode 100644 index 0000000000000000000000000000000000000000..5d473cab5ca482c1a16fc491caef8869e7a2717e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/auxv_none.go @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !solaris + +package runtime + +func sysargs(argc int32, argv **byte) { +} diff --git a/platform/dbops/binaries/go/go/src/runtime/callers_test.go b/platform/dbops/binaries/go/go/src/runtime/callers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..49a1d5a6f7612439311f668eaba072dd5a3263ed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/callers_test.go @@ -0,0 +1,489 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func f1(pan bool) []uintptr { + return f2(pan) // line 15 +} + +func f2(pan bool) []uintptr { + return f3(pan) // line 19 +} + +func f3(pan bool) []uintptr { + if pan { + panic("f3") // line 24 + } + ret := make([]uintptr, 20) + return ret[:runtime.Callers(0, ret)] // line 27 +} + +func testCallers(t *testing.T, pcs []uintptr, pan bool) { + m := make(map[string]int, len(pcs)) + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + if frame.Function != "" { + m[frame.Function] = frame.Line + } + if !more { + break + } + } + + var seen []string + for k := range m { + seen = append(seen, k) + } + t.Logf("functions seen: %s", strings.Join(seen, " ")) + + var f3Line int + if pan { + f3Line = 24 + } else { + f3Line = 27 + } + want := []struct { + name string + line int + }{ + {"f1", 15}, + {"f2", 19}, + {"f3", f3Line}, + } + for _, w := range want { + if got := m["runtime_test."+w.name]; got != w.line { + t.Errorf("%s is line %d, want %d", w.name, got, w.line) + } + } +} + +func testCallersEqual(t *testing.T, pcs []uintptr, want []string) { + t.Helper() + + got := make([]string, 0, len(want)) + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + if !more || len(got) >= len(want) { + break + } + got = append(got, frame.Function) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("wanted %v, got %v", want, got) + } +} + +func TestCallers(t *testing.T) { + testCallers(t, f1(false), false) +} + +func TestCallersPanic(t *testing.T) { + // Make sure we don't have any extra frames on the stack (due to + // open-coded defer processing) + want := []string{"runtime.Callers", "runtime_test.TestCallersPanic.func1", + "runtime.gopanic", "runtime_test.f3", "runtime_test.f2", "runtime_test.f1", + "runtime_test.TestCallersPanic"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallers(t, pcs, true) + testCallersEqual(t, pcs, want) + }() + f1(true) +} + +func TestCallersDoublePanic(t *testing.T) { + // Make sure we don't have any extra frames on the stack (due to + // open-coded defer processing) + want := []string{"runtime.Callers", "runtime_test.TestCallersDoublePanic.func1.1", + "runtime.gopanic", "runtime_test.TestCallersDoublePanic.func1", "runtime.gopanic", "runtime_test.TestCallersDoublePanic"} + + defer func() { + defer func() { + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + if recover() == nil { + t.Fatal("did not panic") + } + testCallersEqual(t, pcs, want) + }() + if recover() == nil { + t.Fatal("did not panic") + } + panic(2) + }() + panic(1) +} + +// Test that a defer after a successful recovery looks like it is called directly +// from the function with the defers. +func TestCallersAfterRecovery(t *testing.T) { + want := []string{"runtime.Callers", "runtime_test.TestCallersAfterRecovery.func1", "runtime_test.TestCallersAfterRecovery"} + + defer func() { + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + }() + defer func() { + if recover() == nil { + t.Fatal("did not recover from panic") + } + }() + panic(1) +} + +func TestCallersAbortedPanic(t *testing.T) { + want := []string{"runtime.Callers", "runtime_test.TestCallersAbortedPanic.func2", "runtime_test.TestCallersAbortedPanic"} + + defer func() { + r := recover() + if r != nil { + t.Fatalf("should be no panic remaining to recover") + } + }() + + defer func() { + // panic2 was aborted/replaced by panic1, so when panic2 was + // recovered, there is no remaining panic on the stack. + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + }() + defer func() { + r := recover() + if r != "panic2" { + t.Fatalf("got %v, wanted %v", r, "panic2") + } + }() + defer func() { + // panic2 aborts/replaces panic1, because it is a recursive panic + // that is not recovered within the defer function called by + // panic1 panicking sequence + panic("panic2") + }() + panic("panic1") +} + +func TestCallersAbortedPanic2(t *testing.T) { + want := []string{"runtime.Callers", "runtime_test.TestCallersAbortedPanic2.func2", "runtime_test.TestCallersAbortedPanic2"} + defer func() { + r := recover() + if r != nil { + t.Fatalf("should be no panic remaining to recover") + } + }() + defer func() { + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + }() + func() { + defer func() { + r := recover() + if r != "panic2" { + t.Fatalf("got %v, wanted %v", r, "panic2") + } + }() + func() { + defer func() { + // Again, panic2 aborts/replaces panic1 + panic("panic2") + }() + panic("panic1") + }() + }() +} + +func TestCallersNilPointerPanic(t *testing.T) { + // Make sure we don't have any extra frames on the stack (due to + // open-coded defer processing) + want := []string{"runtime.Callers", "runtime_test.TestCallersNilPointerPanic.func1", + "runtime.gopanic", "runtime.panicmem", "runtime.sigpanic", + "runtime_test.TestCallersNilPointerPanic"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + }() + var p *int + if *p == 3 { + t.Fatal("did not see nil pointer panic") + } +} + +func TestCallersDivZeroPanic(t *testing.T) { + // Make sure we don't have any extra frames on the stack (due to + // open-coded defer processing) + want := []string{"runtime.Callers", "runtime_test.TestCallersDivZeroPanic.func1", + "runtime.gopanic", "runtime.panicdivide", + "runtime_test.TestCallersDivZeroPanic"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + }() + var n int + if 5/n == 1 { + t.Fatal("did not see divide-by-sizer panic") + } +} + +func TestCallersDeferNilFuncPanic(t *testing.T) { + // Make sure we don't have any extra frames on the stack. We cut off the check + // at runtime.sigpanic, because non-open-coded defers (which may be used in + // non-opt or race checker mode) include an extra 'deferreturn' frame (which is + // where the nil pointer deref happens). + state := 1 + want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanic.func1", + "runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + if state == 1 { + t.Fatal("nil defer func panicked at defer time rather than function exit time") + } + + }() + var f func() + defer f() + // Use the value of 'state' to make sure nil defer func f causes panic at + // function exit, rather than at the defer statement. + state = 2 +} + +// Same test, but forcing non-open-coded defer by putting the defer in a loop. See +// issue #36050 +func TestCallersDeferNilFuncPanicWithLoop(t *testing.T) { + state := 1 + want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanicWithLoop.func1", + "runtime.gopanic", "runtime.panicmem", "runtime.sigpanic", "runtime.deferreturn", "runtime_test.TestCallersDeferNilFuncPanicWithLoop"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + if state == 1 { + t.Fatal("nil defer func panicked at defer time rather than function exit time") + } + + }() + + for i := 0; i < 1; i++ { + var f func() + defer f() + } + // Use the value of 'state' to make sure nil defer func f causes panic at + // function exit, rather than at the defer statement. + state = 2 +} + +// issue #51988 +// Func.Endlineno was lost when instantiating generic functions, leading to incorrect +// stack trace positions. +func TestCallersEndlineno(t *testing.T) { + testNormalEndlineno(t) + testGenericEndlineno[int](t) +} + +func testNormalEndlineno(t *testing.T) { + defer testCallerLine(t, callerLine(t, 0)+1) +} + +func testGenericEndlineno[_ any](t *testing.T) { + defer testCallerLine(t, callerLine(t, 0)+1) +} + +func testCallerLine(t *testing.T, want int) { + if have := callerLine(t, 1); have != want { + t.Errorf("callerLine(1) returned %d, but want %d\n", have, want) + } +} + +func callerLine(t *testing.T, skip int) int { + _, _, line, ok := runtime.Caller(skip + 1) + if !ok { + t.Fatalf("runtime.Caller(%d) failed", skip+1) + } + return line +} + +func BenchmarkCallers(b *testing.B) { + b.Run("cached", func(b *testing.B) { + // Very pcvalueCache-friendly, no inlining. + callersCached(b, 100) + }) + b.Run("inlined", func(b *testing.B) { + // Some inlining, still pretty cache-friendly. + callersInlined(b, 100) + }) + b.Run("no-cache", func(b *testing.B) { + // Cache-hostile + callersNoCache(b, 100) + }) +} + +func callersCached(b *testing.B, n int) int { + if n <= 0 { + pcs := make([]uintptr, 32) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.Callers(0, pcs) + } + b.StopTimer() + return 0 + } + return 1 + callersCached(b, n-1) +} + +func callersInlined(b *testing.B, n int) int { + if n <= 0 { + pcs := make([]uintptr, 32) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.Callers(0, pcs) + } + b.StopTimer() + return 0 + } + return 1 + callersInlined1(b, n-1) +} +func callersInlined1(b *testing.B, n int) int { return callersInlined2(b, n) } +func callersInlined2(b *testing.B, n int) int { return callersInlined3(b, n) } +func callersInlined3(b *testing.B, n int) int { return callersInlined4(b, n) } +func callersInlined4(b *testing.B, n int) int { return callersInlined(b, n) } + +func callersNoCache(b *testing.B, n int) int { + if n <= 0 { + pcs := make([]uintptr, 32) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.Callers(0, pcs) + } + b.StopTimer() + return 0 + } + switch n % 16 { + case 0: + return 1 + callersNoCache(b, n-1) + case 1: + return 1 + callersNoCache(b, n-1) + case 2: + return 1 + callersNoCache(b, n-1) + case 3: + return 1 + callersNoCache(b, n-1) + case 4: + return 1 + callersNoCache(b, n-1) + case 5: + return 1 + callersNoCache(b, n-1) + case 6: + return 1 + callersNoCache(b, n-1) + case 7: + return 1 + callersNoCache(b, n-1) + case 8: + return 1 + callersNoCache(b, n-1) + case 9: + return 1 + callersNoCache(b, n-1) + case 10: + return 1 + callersNoCache(b, n-1) + case 11: + return 1 + callersNoCache(b, n-1) + case 12: + return 1 + callersNoCache(b, n-1) + case 13: + return 1 + callersNoCache(b, n-1) + case 14: + return 1 + callersNoCache(b, n-1) + default: + return 1 + callersNoCache(b, n-1) + } +} + +func BenchmarkFPCallers(b *testing.B) { + b.Run("cached", func(b *testing.B) { + // Very pcvalueCache-friendly, no inlining. + fpCallersCached(b, 100) + }) +} + +func fpCallersCached(b *testing.B, n int) int { + if n <= 0 { + pcs := make([]uintptr, 32) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.FPCallers(pcs) + } + b.StopTimer() + return 0 + } + return 1 + fpCallersCached(b, n-1) +} + +func TestFPUnwindAfterRecovery(t *testing.T) { + if !runtime.FramePointerEnabled { + t.Skip("frame pointers not supported for this architecture") + } + // Make sure that frame pointer unwinding succeeds from a deferred + // function run after recovering from a panic. It can fail if the + // recovery does not properly restore the caller's frame pointer before + // running the remaining deferred functions. + // + // This test does not verify the accuracy of the call stack (it + // currently includes a frame from runtime.deferreturn which would + // normally be omitted). It is only intended to check that producing the + // call stack won't crash. + defer func() { + pcs := make([]uintptr, 32) + for i := range pcs { + // If runtime.recovery doesn't properly restore the + // frame pointer before returning control to this + // function, it will point somewhere lower in the stack + // from one of the frames of runtime.gopanic() or one of + // it's callees prior to recovery. So, we put some + // non-zero values on the stack to ensure that frame + // pointer unwinding will crash if it sees the old, + // invalid frame pointer. + pcs[i] = 10 + } + runtime.FPCallers(pcs) + t.Logf("%v", pcs) + }() + defer func() { + if recover() == nil { + t.Fatal("did not recover from panic") + } + }() + panic(1) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cgo.go b/platform/dbops/binaries/go/go/src/runtime/cgo.go new file mode 100644 index 0000000000000000000000000000000000000000..40c8c748d3e56ed5c1b2fb0d6c2f2fb200e55827 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgo.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +//go:cgo_export_static main + +// Filled in by runtime/cgo when linked into binary. + +//go:linkname _cgo_init _cgo_init +//go:linkname _cgo_thread_start _cgo_thread_start +//go:linkname _cgo_sys_thread_create _cgo_sys_thread_create +//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done +//go:linkname _cgo_callers _cgo_callers +//go:linkname _cgo_set_context_function _cgo_set_context_function +//go:linkname _cgo_yield _cgo_yield +//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created +//go:linkname _cgo_bindm _cgo_bindm +//go:linkname _cgo_getstackbound _cgo_getstackbound + +var ( + _cgo_init unsafe.Pointer + _cgo_thread_start unsafe.Pointer + _cgo_sys_thread_create unsafe.Pointer + _cgo_notify_runtime_init_done unsafe.Pointer + _cgo_callers unsafe.Pointer + _cgo_set_context_function unsafe.Pointer + _cgo_yield unsafe.Pointer + _cgo_pthread_key_created unsafe.Pointer + _cgo_bindm unsafe.Pointer + _cgo_getstackbound unsafe.Pointer +) + +// iscgo is set to true by the runtime/cgo package +var iscgo bool + +// set_crosscall2 is set by the runtime/cgo package +var set_crosscall2 func() + +// cgoHasExtraM is set on startup when an extra M is created for cgo. +// The extra M must be created before any C/C++ code calls cgocallback. +var cgoHasExtraM bool + +// cgoUse is called by cgo-generated code (using go:linkname to get at +// an unexported name). The calls serve two purposes: +// 1) they are opaque to escape analysis, so the argument is considered to +// escape to the heap. +// 2) they keep the argument alive until the call site; the call is emitted after +// the end of the (presumed) use of the argument by C. +// cgoUse should not actually be called (see cgoAlwaysFalse). +func cgoUse(any) { throw("cgoUse should not be called") } + +// cgoAlwaysFalse is a boolean value that is always false. +// The cgo-generated code says if cgoAlwaysFalse { cgoUse(p) }. +// The compiler cannot see that cgoAlwaysFalse is always false, +// so it emits the test and keeps the call, giving the desired +// escape analysis result. The test is cheaper than the call. +var cgoAlwaysFalse bool + +var cgo_yield = &_cgo_yield + +func cgoNoCallback(v bool) { + g := getg() + if g.nocgocallback && v { + panic("runtime: unexpected setting cgoNoCallback") + } + g.nocgocallback = v +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cgo_mmap.go b/platform/dbops/binaries/go/go/src/runtime/cgo_mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..36d776e628143e284ebc203e45ca6b18e3afb761 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgo_mmap.go @@ -0,0 +1,70 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for memory sanitizer. See runtime/cgo/mmap.go. + +//go:build (linux && (amd64 || arm64 || loong64)) || (freebsd && amd64) + +package runtime + +import "unsafe" + +// _cgo_mmap is filled in by runtime/cgo when it is linked into the +// program, so it is only non-nil when using cgo. +// +//go:linkname _cgo_mmap _cgo_mmap +var _cgo_mmap unsafe.Pointer + +// _cgo_munmap is filled in by runtime/cgo when it is linked into the +// program, so it is only non-nil when using cgo. +// +//go:linkname _cgo_munmap _cgo_munmap +var _cgo_munmap unsafe.Pointer + +// mmap is used to route the mmap system call through C code when using cgo, to +// support sanitizer interceptors. Don't allow stack splits, since this function +// (used by sysAlloc) is called in a lot of low-level parts of the runtime and +// callers often assume it won't acquire any locks. +// +//go:nosplit +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) { + if _cgo_mmap != nil { + // Make ret a uintptr so that writing to it in the + // function literal does not trigger a write barrier. + // A write barrier here could break because of the way + // that mmap uses the same value both as a pointer and + // an errno value. + var ret uintptr + systemstack(func() { + ret = callCgoMmap(addr, n, prot, flags, fd, off) + }) + if ret < 4096 { + return nil, int(ret) + } + return unsafe.Pointer(ret), 0 + } + return sysMmap(addr, n, prot, flags, fd, off) +} + +func munmap(addr unsafe.Pointer, n uintptr) { + if _cgo_munmap != nil { + systemstack(func() { callCgoMunmap(addr, n) }) + return + } + sysMunmap(addr, n) +} + +// sysMmap calls the mmap system call. It is implemented in assembly. +func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) + +// callCgoMmap calls the mmap function in the runtime/cgo package +// using the GCC calling convention. It is implemented in assembly. +func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr + +// sysMunmap calls the munmap system call. It is implemented in assembly. +func sysMunmap(addr unsafe.Pointer, n uintptr) + +// callCgoMunmap calls the munmap function in the runtime/cgo package +// using the GCC calling convention. It is implemented in assembly. +func callCgoMunmap(addr unsafe.Pointer, n uintptr) diff --git a/platform/dbops/binaries/go/go/src/runtime/cgo_ppc64x.go b/platform/dbops/binaries/go/go/src/runtime/cgo_ppc64x.go new file mode 100644 index 0000000000000000000000000000000000000000..c723213809e6d01478efe4f867e783caa7d9961b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgo_ppc64x.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package runtime + +// crosscall_ppc64 calls into the runtime to set up the registers the +// Go runtime expects and so the symbol it calls needs to be exported +// for external linking to work. +// +//go:cgo_export_static _cgo_reginit diff --git a/platform/dbops/binaries/go/go/src/runtime/cgo_sigaction.go b/platform/dbops/binaries/go/go/src/runtime/cgo_sigaction.go new file mode 100644 index 0000000000000000000000000000000000000000..9500c522059580be42f8d7e6c97e3b95eb8576f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgo_sigaction.go @@ -0,0 +1,94 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for sanitizers. See runtime/cgo/sigaction.go. + +//go:build (linux && amd64) || (freebsd && amd64) || (linux && arm64) || (linux && ppc64le) + +package runtime + +import "unsafe" + +// _cgo_sigaction is filled in by runtime/cgo when it is linked into the +// program, so it is only non-nil when using cgo. +// +//go:linkname _cgo_sigaction _cgo_sigaction +var _cgo_sigaction unsafe.Pointer + +//go:nosplit +//go:nowritebarrierrec +func sigaction(sig uint32, new, old *sigactiont) { + // racewalk.go avoids adding sanitizing instrumentation to package runtime, + // but we might be calling into instrumented C functions here, + // so we need the pointer parameters to be properly marked. + // + // Mark the input as having been written before the call + // and the output as read after. + if msanenabled && new != nil { + msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new)) + } + if asanenabled && new != nil { + asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new)) + } + if _cgo_sigaction == nil || inForkedChild { + sysSigaction(sig, new, old) + } else { + // We need to call _cgo_sigaction, which means we need a big enough stack + // for C. To complicate matters, we may be in libpreinit (before the + // runtime has been initialized) or in an asynchronous signal handler (with + // the current thread in transition between goroutines, or with the g0 + // system stack already in use). + + var ret int32 + + var g *g + if mainStarted { + g = getg() + } + sp := uintptr(unsafe.Pointer(&sig)) + switch { + case g == nil: + // No g: we're on a C stack or a signal stack. + ret = callCgoSigaction(uintptr(sig), new, old) + case sp < g.stack.lo || sp >= g.stack.hi: + // We're no longer on g's stack, so we must be handling a signal. It's + // possible that we interrupted the thread during a transition between g + // and g0, so we should stay on the current stack to avoid corrupting g0. + ret = callCgoSigaction(uintptr(sig), new, old) + default: + // We're running on g's stack, so either we're not in a signal handler or + // the signal handler has set the correct g. If we're on gsignal or g0, + // systemstack will make the call directly; otherwise, it will switch to + // g0 to ensure we have enough room to call a libc function. + // + // The function literal that we pass to systemstack is not nosplit, but + // that's ok: we'll be running on a fresh, clean system stack so the stack + // check will always succeed anyway. + systemstack(func() { + ret = callCgoSigaction(uintptr(sig), new, old) + }) + } + + const EINVAL = 22 + if ret == EINVAL { + // libc reserves certain signals — normally 32-33 — for pthreads, and + // returns EINVAL for sigaction calls on those signals. If we get EINVAL, + // fall back to making the syscall directly. + sysSigaction(sig, new, old) + } + } + + if msanenabled && old != nil { + msanread(unsafe.Pointer(old), unsafe.Sizeof(*old)) + } + if asanenabled && old != nil { + asanread(unsafe.Pointer(old), unsafe.Sizeof(*old)) + } +} + +// callCgoSigaction calls the sigaction function in the runtime/cgo package +// using the GCC calling convention. It is implemented in assembly. +// +//go:noescape +func callCgoSigaction(sig uintptr, new, old *sigactiont) int32 diff --git a/platform/dbops/binaries/go/go/src/runtime/cgocall.go b/platform/dbops/binaries/go/go/src/runtime/cgocall.go new file mode 100644 index 0000000000000000000000000000000000000000..0d3cc40903a3937e5807084fef82fac72f90ded2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgocall.go @@ -0,0 +1,769 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Cgo call and callback support. +// +// To call into the C function f from Go, the cgo-generated code calls +// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a +// gcc-compiled function written by cgo. +// +// runtime.cgocall (below) calls entersyscall so as not to block +// other goroutines or the garbage collector, and then calls +// runtime.asmcgocall(_cgo_Cfunc_f, frame). +// +// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack +// (assumed to be an operating system-allocated stack, so safe to run +// gcc-compiled code on) and calls _cgo_Cfunc_f(frame). +// +// _cgo_Cfunc_f invokes the actual C function f with arguments +// taken from the frame structure, records the results in the frame, +// and returns to runtime.asmcgocall. +// +// After it regains control, runtime.asmcgocall switches back to the +// original g (m->curg)'s stack and returns to runtime.cgocall. +// +// After it regains control, runtime.cgocall calls exitsyscall, which blocks +// until this m can run Go code without violating the $GOMAXPROCS limit, +// and then unlocks g from m. +// +// The above description skipped over the possibility of the gcc-compiled +// function f calling back into Go. If that happens, we continue down +// the rabbit hole during the execution of f. +// +// To make it possible for gcc-compiled C code to call a Go function p.GoF, +// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't +// know about packages). The gcc-compiled C function f calls GoF. +// +// GoF initializes "frame", a structure containing all of its +// arguments and slots for p.GoF's results. It calls +// crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI. +// +// crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from +// the gcc function call ABI to the gc function call ABI. At this +// point we're in the Go runtime, but we're still running on m.g0's +// stack and outside the $GOMAXPROCS limit. crosscall2 calls +// runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI. +// (crosscall2's framesize argument is no longer used, but there's one +// case where SWIG calls crosscall2 directly and expects to pass this +// argument. See _cgo_panic.) +// +// runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack +// to the original g (m.curg)'s stack, on which it calls +// runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the +// stack switch, runtime.cgocallback saves the current SP as +// m.g0.sched.sp, so that any use of m.g0's stack during the execution +// of the callback will be done below the existing stack frames. +// Before overwriting m.g0.sched.sp, it pushes the old value on the +// m.g0 stack, so that it can be restored later. +// +// runtime.cgocallbackg (below) is now running on a real goroutine +// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will +// block until the $GOMAXPROCS limit allows running this goroutine. +// Once exitsyscall has returned, it is safe to do things like call the memory +// allocator or invoke the Go callback function. runtime.cgocallbackg +// first defers a function to unwind m.g0.sched.sp, so that if p.GoF +// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack +// and the m.curg stack will be unwound in lock step. +// Then it calls _cgoexp_GoF(frame). +// +// _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments +// from frame, calls p.GoF, writes the results back to frame, and +// returns. Now we start unwinding this whole process. +// +// runtime.cgocallbackg pops but does not execute the deferred +// function to unwind m.g0.sched.sp, calls runtime.entersyscall, and +// returns to runtime.cgocallback. +// +// After it regains control, runtime.cgocallback switches back to +// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old +// m.g0.sched.sp value from the stack, and returns to crosscall2. +// +// crosscall2 restores the callee-save registers for gcc and returns +// to GoF, which unpacks any result values and returns to f. + +package runtime + +import ( + "internal/goarch" + "internal/goexperiment" + "runtime/internal/sys" + "unsafe" +) + +// Addresses collected in a cgo backtrace when crashing. +// Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c. +type cgoCallers [32]uintptr + +// argset matches runtime/cgo/linux_syscall.c:argset_t +type argset struct { + args unsafe.Pointer + retval uintptr +} + +// wrapper for syscall package to call cgocall for libc (cgo) calls. +// +//go:linkname syscall_cgocaller syscall.cgocaller +//go:nosplit +//go:uintptrescapes +func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr { + as := argset{args: unsafe.Pointer(&args[0])} + cgocall(fn, unsafe.Pointer(&as)) + return as.retval +} + +var ncgocall uint64 // number of cgo calls in total for dead m + +// Call from Go to C. +// +// This must be nosplit because it's used for syscalls on some +// platforms. Syscalls may have untyped arguments on the stack, so +// it's not safe to grow or scan the stack. +// +//go:nosplit +func cgocall(fn, arg unsafe.Pointer) int32 { + if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" { + throw("cgocall unavailable") + } + + if fn == nil { + throw("cgocall nil") + } + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + mp := getg().m + mp.ncgocall++ + + // Reset traceback. + mp.cgoCallers[0] = 0 + + // Announce we are entering a system call + // so that the scheduler knows to create another + // M to run goroutines while we are in the + // foreign code. + // + // The call to asmcgocall is guaranteed not to + // grow the stack and does not allocate memory, + // so it is safe to call while "in a system call", outside + // the $GOMAXPROCS accounting. + // + // fn may call back into Go code, in which case we'll exit the + // "system call", run the Go code (which may grow the stack), + // and then re-enter the "system call" reusing the PC and SP + // saved by entersyscall here. + entersyscall() + + // Tell asynchronous preemption that we're entering external + // code. We do this after entersyscall because this may block + // and cause an async preemption to fail, but at this point a + // sync preemption will succeed (though this is not a matter + // of correctness). + osPreemptExtEnter(mp) + + mp.incgo = true + // We use ncgo as a check during execution tracing for whether there is + // any C on the call stack, which there will be after this point. If + // there isn't, we can use frame pointer unwinding to collect call + // stacks efficiently. This will be the case for the first Go-to-C call + // on a stack, so it's preferable to update it here, after we emit a + // trace event in entersyscall above. + mp.ncgo++ + + errno := asmcgocall(fn, arg) + + // Update accounting before exitsyscall because exitsyscall may + // reschedule us on to a different M. + mp.incgo = false + mp.ncgo-- + + osPreemptExtExit(mp) + + exitsyscall() + + // Note that raceacquire must be called only after exitsyscall has + // wired this M to a P. + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + // From the garbage collector's perspective, time can move + // backwards in the sequence above. If there's a callback into + // Go code, GC will see this function at the call to + // asmcgocall. When the Go call later returns to C, the + // syscall PC/SP is rolled back and the GC sees this function + // back at the call to entersyscall. Normally, fn and arg + // would be live at entersyscall and dead at asmcgocall, so if + // time moved backwards, GC would see these arguments as dead + // and then live. Prevent these undead arguments from crashing + // GC by forcing them to stay live across this time warp. + KeepAlive(fn) + KeepAlive(arg) + KeepAlive(mp) + + return errno +} + +// Set or reset the system stack bounds for a callback on sp. +// +// Must be nosplit because it is called by needm prior to fully initializing +// the M. +// +//go:nosplit +func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { + g0 := mp.g0 + + inBound := sp > g0.stack.lo && sp <= g0.stack.hi + if mp.ncgo > 0 && !inBound { + // ncgo > 0 indicates that this M was in Go further up the stack + // (it called C and is now receiving a callback). + // + // !inBound indicates that we were called with SP outside the + // expected system stack bounds (C changed the stack out from + // under us between the cgocall and cgocallback?). + // + // It is not safe for the C call to change the stack out from + // under us, so throw. + + // Note that this case isn't possible for signal == true, as + // that is always passing a new M from needm. + + // Stack is bogus, but reset the bounds anyway so we can print. + hi := g0.stack.hi + lo := g0.stack.lo + g0.stack.hi = sp + 1024 + g0.stack.lo = sp - 32*1024 + g0.stackguard0 = g0.stack.lo + stackGuard + g0.stackguard1 = g0.stackguard0 + + print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]") + print("\n") + exit(2) + } + + if !mp.isextra { + // We allocated the stack for standard Ms. Don't replace the + // stack bounds with estimated ones when we already initialized + // with the exact ones. + return + } + + // This M does not have Go further up the stack. However, it may have + // previously called into Go, initializing the stack bounds. Between + // that call returning and now the stack may have changed (perhaps the + // C thread is running a coroutine library). We need to update the + // stack bounds for this case. + // + // N.B. we need to update the stack bounds even if SP appears to + // already be in bounds. Our "bounds" may actually be estimated dummy + // bounds (below). The actual stack bounds could have shifted but still + // have partial overlap with our dummy bounds. If we failed to update + // in that case, we could find ourselves seemingly called near the + // bottom of the stack bounds, where we quickly run out of space. + + // Set the stack bounds to match the current stack. If we don't + // actually know how big the stack is, like we don't know how big any + // scheduling stack is, but we assume there's at least 32 kB. If we + // can get a more accurate stack bound from pthread, use that, provided + // it actually contains SP.. + g0.stack.hi = sp + 1024 + g0.stack.lo = sp - 32*1024 + if !signal && _cgo_getstackbound != nil { + // Don't adjust if called from the signal handler. + // We are on the signal stack, not the pthread stack. + // (We could get the stack bounds from sigaltstack, but + // we're getting out of the signal handler very soon + // anyway. Not worth it.) + var bounds [2]uintptr + asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) + // getstackbound is an unsupported no-op on Windows. + // + // Don't use these bounds if they don't contain SP. Perhaps we + // were called by something not using the standard thread + // stack. + if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { + g0.stack.lo = bounds[0] + g0.stack.hi = bounds[1] + } + } + g0.stackguard0 = g0.stack.lo + stackGuard + g0.stackguard1 = g0.stackguard0 +} + +// Call from C back to Go. fn must point to an ABIInternal Go entry-point. +// +//go:nosplit +func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { + gp := getg() + if gp != gp.m.curg { + println("runtime: bad g in cgocallback") + exit(2) + } + + sp := gp.m.g0.sched.sp // system sp saved by cgocallback. + callbackUpdateSystemStack(gp.m, sp, false) + + // The call from C is on gp.m's g0 stack, so we must ensure + // that we stay on that M. We have to do this before calling + // exitsyscall, since it would otherwise be free to move us to + // a different M. The call to unlockOSThread is in this function + // after cgocallbackg1, or in the case of panicking, in unwindm. + lockOSThread() + + checkm := gp.m + + // Save current syscall parameters, so m.syscall can be + // used again if callback decide to make syscall. + syscall := gp.m.syscall + + // entersyscall saves the caller's SP to allow the GC to trace the Go + // stack. However, since we're returning to an earlier stack frame and + // need to pair with the entersyscall() call made by cgocall, we must + // save syscall* and let reentersyscall restore them. + savedsp := unsafe.Pointer(gp.syscallsp) + savedpc := gp.syscallpc + exitsyscall() // coming out of cgo call + gp.m.incgo = false + if gp.m.isextra { + gp.m.isExtraInC = false + } + + osPreemptExtExit(gp.m) + + if gp.nocgocallback { + panic("runtime: function marked with #cgo nocallback called back into Go") + } + + cgocallbackg1(fn, frame, ctxt) + + // At this point we're about to call unlockOSThread. + // The following code must not change to a different m. + // This is enforced by checking incgo in the schedule function. + gp.m.incgo = true + unlockOSThread() + + if gp.m.isextra { + gp.m.isExtraInC = true + } + + if gp.m != checkm { + throw("m changed unexpectedly in cgocallbackg") + } + + osPreemptExtEnter(gp.m) + + // going back to cgo call + reentersyscall(savedpc, uintptr(savedsp)) + + gp.m.syscall = syscall +} + +func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { + gp := getg() + + if gp.m.needextram || extraMWaiters.Load() > 0 { + gp.m.needextram = false + systemstack(newextram) + } + + if ctxt != 0 { + s := append(gp.cgoCtxt, ctxt) + + // Now we need to set gp.cgoCtxt = s, but we could get + // a SIGPROF signal while manipulating the slice, and + // the SIGPROF handler could pick up gp.cgoCtxt while + // tracing up the stack. We need to ensure that the + // handler always sees a valid slice, so set the + // values in an order such that it always does. + p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) + atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0])) + p.cap = cap(s) + p.len = len(s) + + defer func(gp *g) { + // Decrease the length of the slice by one, safely. + p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) + p.len-- + }(gp) + } + + if gp.m.ncgo == 0 { + // The C call to Go came from a thread not currently running + // any Go. In the case of -buildmode=c-archive or c-shared, + // this call may be coming in before package initialization + // is complete. Wait until it is. + <-main_init_done + } + + // Check whether the profiler needs to be turned on or off; this route to + // run Go code does not use runtime.execute, so bypasses the check there. + hz := sched.profilehz + if gp.m.profilehz != hz { + setThreadCPUProfiler(hz) + } + + // Add entry to defer stack in case of panic. + restore := true + defer unwindm(&restore) + + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + // Invoke callback. This function is generated by cmd/cgo and + // will unpack the argument frame and call the Go function. + var cb func(frame unsafe.Pointer) + cbFV := funcval{uintptr(fn)} + *(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV)) + cb(frame) + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + // Do not unwind m->g0->sched.sp. + // Our caller, cgocallback, will do that. + restore = false +} + +func unwindm(restore *bool) { + if *restore { + // Restore sp saved by cgocallback during + // unwind of g's stack (see comment at top of file). + mp := acquirem() + sched := &mp.g0.sched + sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign))) + + // Do the accounting that cgocall will not have a chance to do + // during an unwind. + // + // In the case where a Go call originates from C, ncgo is 0 + // and there is no matching cgocall to end. + if mp.ncgo > 0 { + mp.incgo = false + mp.ncgo-- + osPreemptExtExit(mp) + } + + // Undo the call to lockOSThread in cgocallbackg, only on the + // panicking path. In normal return case cgocallbackg will call + // unlockOSThread, ensuring no preemption point after the unlock. + // Here we don't need to worry about preemption, because we're + // panicking out of the callback and unwinding the g0 stack, + // instead of reentering cgo (which requires the same thread). + unlockOSThread() + + releasem(mp) + } +} + +// called from assembly. +func badcgocallback() { + throw("misaligned stack in cgocallback") +} + +// called from (incomplete) assembly. +func cgounimpl() { + throw("cgo not implemented") +} + +var racecgosync uint64 // represents possible synchronization in C code + +// Pointer checking for cgo code. + +// We want to detect all cases where a program that does not use +// unsafe makes a cgo call passing a Go pointer to memory that +// contains an unpinned Go pointer. Here a Go pointer is defined as a +// pointer to memory allocated by the Go runtime. Programs that use +// unsafe can evade this restriction easily, so we don't try to catch +// them. The cgo program will rewrite all possibly bad pointer +// arguments to call cgoCheckPointer, where we can catch cases of a Go +// pointer pointing to an unpinned Go pointer. + +// Complicating matters, taking the address of a slice or array +// element permits the C program to access all elements of the slice +// or array. In that case we will see a pointer to a single element, +// but we need to check the entire data structure. + +// The cgoCheckPointer call takes additional arguments indicating that +// it was called on an address expression. An additional argument of +// true means that it only needs to check a single element. An +// additional argument of a slice or array means that it needs to +// check the entire slice/array, but nothing else. Otherwise, the +// pointer could be anything, and we check the entire heap object, +// which is conservative but safe. + +// When and if we implement a moving garbage collector, +// cgoCheckPointer will pin the pointer for the duration of the cgo +// call. (This is necessary but not sufficient; the cgo program will +// also have to change to pin Go pointers that cannot point to Go +// pointers.) + +// cgoCheckPointer checks if the argument contains a Go pointer that +// points to an unpinned Go pointer, and panics if it does. +func cgoCheckPointer(ptr any, arg any) { + if !goexperiment.CgoCheck2 && debug.cgocheck == 0 { + return + } + + ep := efaceOf(&ptr) + t := ep._type + + top := true + if arg != nil && (t.Kind_&kindMask == kindPtr || t.Kind_&kindMask == kindUnsafePointer) { + p := ep.data + if t.Kind_&kindDirectIface == 0 { + p = *(*unsafe.Pointer)(p) + } + if p == nil || !cgoIsGoPointer(p) { + return + } + aep := efaceOf(&arg) + switch aep._type.Kind_ & kindMask { + case kindBool: + if t.Kind_&kindMask == kindUnsafePointer { + // We don't know the type of the element. + break + } + pt := (*ptrtype)(unsafe.Pointer(t)) + cgoCheckArg(pt.Elem, p, true, false, cgoCheckPointerFail) + return + case kindSlice: + // Check the slice rather than the pointer. + ep = aep + t = ep._type + case kindArray: + // Check the array rather than the pointer. + // Pass top as false since we have a pointer + // to the array. + ep = aep + t = ep._type + top = false + default: + throw("can't happen") + } + } + + cgoCheckArg(t, ep.data, t.Kind_&kindDirectIface == 0, top, cgoCheckPointerFail) +} + +const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer" +const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer" + +// cgoCheckArg is the real work of cgoCheckPointer. The argument p +// is either a pointer to the value (of type t), or the value itself, +// depending on indir. The top parameter is whether we are at the top +// level, where Go pointers are allowed. Go pointers to pinned objects are +// allowed as long as they don't reference other unpinned pointers. +func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { + if t.PtrBytes == 0 || p == nil { + // If the type has no pointers there is nothing to do. + return + } + + switch t.Kind_ & kindMask { + default: + throw("can't happen") + case kindArray: + at := (*arraytype)(unsafe.Pointer(t)) + if !indir { + if at.Len != 1 { + throw("can't happen") + } + cgoCheckArg(at.Elem, p, at.Elem.Kind_&kindDirectIface == 0, top, msg) + return + } + for i := uintptr(0); i < at.Len; i++ { + cgoCheckArg(at.Elem, p, true, top, msg) + p = add(p, at.Elem.Size_) + } + case kindChan, kindMap: + // These types contain internal pointers that will + // always be allocated in the Go heap. It's never OK + // to pass them to C. + panic(errorString(msg)) + case kindFunc: + if indir { + p = *(*unsafe.Pointer)(p) + } + if !cgoIsGoPointer(p) { + return + } + panic(errorString(msg)) + case kindInterface: + it := *(**_type)(p) + if it == nil { + return + } + // A type known at compile time is OK since it's + // constant. A type not known at compile time will be + // in the heap and will not be OK. + if inheap(uintptr(unsafe.Pointer(it))) { + panic(errorString(msg)) + } + p = *(*unsafe.Pointer)(add(p, goarch.PtrSize)) + if !cgoIsGoPointer(p) { + return + } + if !top && !isPinned(p) { + panic(errorString(msg)) + } + cgoCheckArg(it, p, it.Kind_&kindDirectIface == 0, false, msg) + case kindSlice: + st := (*slicetype)(unsafe.Pointer(t)) + s := (*slice)(p) + p = s.array + if p == nil || !cgoIsGoPointer(p) { + return + } + if !top && !isPinned(p) { + panic(errorString(msg)) + } + if st.Elem.PtrBytes == 0 { + return + } + for i := 0; i < s.cap; i++ { + cgoCheckArg(st.Elem, p, true, false, msg) + p = add(p, st.Elem.Size_) + } + case kindString: + ss := (*stringStruct)(p) + if !cgoIsGoPointer(ss.str) { + return + } + if !top && !isPinned(ss.str) { + panic(errorString(msg)) + } + case kindStruct: + st := (*structtype)(unsafe.Pointer(t)) + if !indir { + if len(st.Fields) != 1 { + throw("can't happen") + } + cgoCheckArg(st.Fields[0].Typ, p, st.Fields[0].Typ.Kind_&kindDirectIface == 0, top, msg) + return + } + for _, f := range st.Fields { + if f.Typ.PtrBytes == 0 { + continue + } + cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg) + } + case kindPtr, kindUnsafePointer: + if indir { + p = *(*unsafe.Pointer)(p) + if p == nil { + return + } + } + + if !cgoIsGoPointer(p) { + return + } + if !top && !isPinned(p) { + panic(errorString(msg)) + } + + cgoCheckUnknownPointer(p, msg) + } +} + +// cgoCheckUnknownPointer is called for an arbitrary pointer into Go +// memory. It checks whether that Go memory contains any other +// pointer into unpinned Go memory. If it does, we panic. +// The return values are unused but useful to see in panic tracebacks. +func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { + if inheap(uintptr(p)) { + b, span, _ := findObject(uintptr(p), 0, 0) + base = b + if base == 0 { + return + } + if goexperiment.AllocHeaders { + tp := span.typePointersOfUnchecked(base) + for { + var addr uintptr + if tp, addr = tp.next(base + span.elemsize); addr == 0 { + break + } + pp := *(*unsafe.Pointer)(unsafe.Pointer(addr)) + if cgoIsGoPointer(pp) && !isPinned(pp) { + panic(errorString(msg)) + } + } + } else { + n := span.elemsize + hbits := heapBitsForAddr(base, n) + for { + var addr uintptr + if hbits, addr = hbits.next(); addr == 0 { + break + } + pp := *(*unsafe.Pointer)(unsafe.Pointer(addr)) + if cgoIsGoPointer(pp) && !isPinned(pp) { + panic(errorString(msg)) + } + } + } + return + } + + for _, datap := range activeModules() { + if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { + // We have no way to know the size of the object. + // We have to assume that it might contain a pointer. + panic(errorString(msg)) + } + // In the text or noptr sections, we know that the + // pointer does not point to a Go pointer. + } + + return +} + +// cgoIsGoPointer reports whether the pointer is a Go pointer--a +// pointer to Go memory. We only care about Go memory that might +// contain pointers. +// +//go:nosplit +//go:nowritebarrierrec +func cgoIsGoPointer(p unsafe.Pointer) bool { + if p == nil { + return false + } + + if inHeapOrStack(uintptr(p)) { + return true + } + + for _, datap := range activeModules() { + if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { + return true + } + } + + return false +} + +// cgoInRange reports whether p is between start and end. +// +//go:nosplit +//go:nowritebarrierrec +func cgoInRange(p unsafe.Pointer, start, end uintptr) bool { + return start <= uintptr(p) && uintptr(p) < end +} + +// cgoCheckResult is called to check the result parameter of an +// exported Go function. It panics if the result is or contains any +// other pointer into unpinned Go memory. +func cgoCheckResult(val any) { + if !goexperiment.CgoCheck2 && debug.cgocheck == 0 { + return + } + + ep := efaceOf(&val) + t := ep._type + cgoCheckArg(t, ep.data, t.Kind_&kindDirectIface == 0, false, cgoResultFail) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cgocallback.go b/platform/dbops/binaries/go/go/src/runtime/cgocallback.go new file mode 100644 index 0000000000000000000000000000000000000000..59953f1ceed0e3e6f2bd0e9df02cda684c080dcc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgocallback.go @@ -0,0 +1,13 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// These functions are called from C code via cgo/callbacks.go. + +// Panic. + +func _cgo_panic_internal(p *byte) { + panic(gostringnocopy(p)) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cgocheck.go b/platform/dbops/binaries/go/go/src/runtime/cgocheck.go new file mode 100644 index 0000000000000000000000000000000000000000..3d6de4f855dc335ea699dc8dcf79ee157c78fcbd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cgocheck.go @@ -0,0 +1,306 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code to check that pointer writes follow the cgo rules. +// These functions are invoked when GOEXPERIMENT=cgocheck2 is enabled. + +package runtime + +import ( + "internal/goarch" + "internal/goexperiment" + "unsafe" +) + +const cgoWriteBarrierFail = "unpinned Go pointer stored into non-Go memory" + +// cgoCheckPtrWrite is called whenever a pointer is stored into memory. +// It throws if the program is storing an unpinned Go pointer into non-Go +// memory. +// +// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) { + if !mainStarted { + // Something early in startup hates this function. + // Don't start doing any actual checking until the + // runtime has set itself up. + return + } + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(unsafe.Pointer(dst)) { + return + } + + // If we are running on the system stack then dst might be an + // address on the stack, which is OK. + gp := getg() + if gp == gp.m.g0 || gp == gp.m.gsignal { + return + } + + // Allocating memory can write to various mfixalloc structs + // that look like they are non-Go memory. + if gp.m.mallocing != 0 { + return + } + + // If the object is pinned, it's safe to store it in C memory. The GC + // ensures it will not be moved or freed. + if isPinned(src) { + return + } + + // It's OK if writing to memory allocated by persistentalloc. + // Do this check last because it is more expensive and rarely true. + // If it is false the expense doesn't matter since we are crashing. + if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { + return + } + + systemstack(func() { + println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) + throw(cgoWriteBarrierFail) + }) +} + +// cgoCheckMemmove is called when moving a block of memory. +// It throws if the program is copying a block that contains an unpinned Go +// pointer into non-Go memory. +// +// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) { + cgoCheckMemmove2(typ, dst, src, 0, typ.Size_) +} + +// cgoCheckMemmove2 is called when moving a block of memory. +// dst and src point off bytes into the value to copy. +// size is the number of bytes to copy. +// It throws if the program is copying a block that contains an unpinned Go +// pointer into non-Go memory. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { + if typ.PtrBytes == 0 { + return + } + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + cgoCheckTypedBlock(typ, src, off, size) +} + +// cgoCheckSliceCopy is called when copying n elements of a slice. +// src and dst are pointers to the first element of the slice. +// typ is the element type of the slice. +// It throws if the program is copying slice elements that contain unpinned Go +// pointers into non-Go memory. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { + if typ.PtrBytes == 0 { + return + } + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + p := src + for i := 0; i < n; i++ { + cgoCheckTypedBlock(typ, p, 0, typ.Size_) + p = add(p, typ.Size_) + } +} + +// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, +// and throws if it finds an unpinned Go pointer. The type of the memory is typ, +// and src is off bytes into that type. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { + // Anything past typ.PtrBytes is not a pointer. + if typ.PtrBytes <= off { + return + } + if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize { + size = ptrdataSize + } + + if typ.Kind_&kindGCProg == 0 { + cgoCheckBits(src, typ.GCData, off, size) + return + } + + // The type has a GC program. Try to find GC bits somewhere else. + for _, datap := range activeModules() { + if cgoInRange(src, datap.data, datap.edata) { + doff := uintptr(src) - datap.data + cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size) + return + } + if cgoInRange(src, datap.bss, datap.ebss) { + boff := uintptr(src) - datap.bss + cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size) + return + } + } + + s := spanOfUnchecked(uintptr(src)) + if s.state.get() == mSpanManual { + // There are no heap bits for value stored on the stack. + // For a channel receive src might be on the stack of some + // other goroutine, so we can't unwind the stack even if + // we wanted to. + // We can't expand the GC program without extra storage + // space we can't easily get. + // Fortunately we have the type information. + systemstack(func() { + cgoCheckUsingType(typ, src, off, size) + }) + return + } + + // src must be in the regular heap. + if goexperiment.AllocHeaders { + tp := s.typePointersOf(uintptr(src), size) + for { + var addr uintptr + if tp, addr = tp.next(uintptr(src) + size); addr == 0 { + break + } + v := *(*unsafe.Pointer)(unsafe.Pointer(addr)) + if cgoIsGoPointer(v) && !isPinned(v) { + throw(cgoWriteBarrierFail) + } + } + } else { + hbits := heapBitsForAddr(uintptr(src), size) + for { + var addr uintptr + if hbits, addr = hbits.next(); addr == 0 { + break + } + v := *(*unsafe.Pointer)(unsafe.Pointer(addr)) + if cgoIsGoPointer(v) && !isPinned(v) { + throw(cgoWriteBarrierFail) + } + } + } +} + +// cgoCheckBits checks the block of memory at src, for up to size +// bytes, and throws if it finds an unpinned Go pointer. The gcbits mark each +// pointer value. The src pointer is off bytes into the gcbits. +// +//go:nosplit +//go:nowritebarrier +func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { + skipMask := off / goarch.PtrSize / 8 + skipBytes := skipMask * goarch.PtrSize * 8 + ptrmask := addb(gcbits, skipMask) + src = add(src, skipBytes) + off -= skipBytes + size += off + var bits uint32 + for i := uintptr(0); i < size; i += goarch.PtrSize { + if i&(goarch.PtrSize*8-1) == 0 { + bits = uint32(*ptrmask) + ptrmask = addb(ptrmask, 1) + } else { + bits >>= 1 + } + if off > 0 { + off -= goarch.PtrSize + } else { + if bits&1 != 0 { + v := *(*unsafe.Pointer)(add(src, i)) + if cgoIsGoPointer(v) && !isPinned(v) { + throw(cgoWriteBarrierFail) + } + } + } + } +} + +// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch +// fall back to look for pointers in src using the type information. +// We only use this when looking at a value on the stack when the type +// uses a GC program, because otherwise it's more efficient to use the +// GC bits. This is called on the system stack. +// +//go:nowritebarrier +//go:systemstack +func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { + if typ.PtrBytes == 0 { + return + } + + // Anything past typ.PtrBytes is not a pointer. + if typ.PtrBytes <= off { + return + } + if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize { + size = ptrdataSize + } + + if typ.Kind_&kindGCProg == 0 { + cgoCheckBits(src, typ.GCData, off, size) + return + } + switch typ.Kind_ & kindMask { + default: + throw("can't happen") + case kindArray: + at := (*arraytype)(unsafe.Pointer(typ)) + for i := uintptr(0); i < at.Len; i++ { + if off < at.Elem.Size_ { + cgoCheckUsingType(at.Elem, src, off, size) + } + src = add(src, at.Elem.Size_) + skipped := off + if skipped > at.Elem.Size_ { + skipped = at.Elem.Size_ + } + checked := at.Elem.Size_ - skipped + off -= skipped + if size <= checked { + return + } + size -= checked + } + case kindStruct: + st := (*structtype)(unsafe.Pointer(typ)) + for _, f := range st.Fields { + if off < f.Typ.Size_ { + cgoCheckUsingType(f.Typ, src, off, size) + } + src = add(src, f.Typ.Size_) + skipped := off + if skipped > f.Typ.Size_ { + skipped = f.Typ.Size_ + } + checked := f.Typ.Size_ - skipped + off -= skipped + if size <= checked { + return + } + size -= checked + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/chan.go b/platform/dbops/binaries/go/go/src/runtime/chan.go new file mode 100644 index 0000000000000000000000000000000000000000..ff9e2a9155af22cfccfdbbebb0bdd56df91d551c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/chan.go @@ -0,0 +1,851 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go channels. + +// Invariants: +// At least one of c.sendq and c.recvq is empty, +// except for the case of an unbuffered channel with a single goroutine +// blocked on it for both sending and receiving using a select statement, +// in which case the length of c.sendq and c.recvq is limited only by the +// size of the select statement. +// +// For buffered channels, also: +// c.qcount > 0 implies that c.recvq is empty. +// c.qcount < c.dataqsiz implies that c.sendq is empty. + +import ( + "internal/abi" + "runtime/internal/atomic" + "runtime/internal/math" + "unsafe" +) + +const ( + maxAlign = 8 + hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) + debugChan = false +) + +type hchan struct { + qcount uint // total data in the queue + dataqsiz uint // size of the circular queue + buf unsafe.Pointer // points to an array of dataqsiz elements + elemsize uint16 + closed uint32 + elemtype *_type // element type + sendx uint // send index + recvx uint // receive index + recvq waitq // list of recv waiters + sendq waitq // list of send waiters + + // lock protects all fields in hchan, as well as several + // fields in sudogs blocked on this channel. + // + // Do not change another G's status while holding this lock + // (in particular, do not ready a G), as this can deadlock + // with stack shrinking. + lock mutex +} + +type waitq struct { + first *sudog + last *sudog +} + +//go:linkname reflect_makechan reflect.makechan +func reflect_makechan(t *chantype, size int) *hchan { + return makechan(t, size) +} + +func makechan64(t *chantype, size int64) *hchan { + if int64(int(size)) != size { + panic(plainError("makechan: size out of range")) + } + + return makechan(t, int(size)) +} + +func makechan(t *chantype, size int) *hchan { + elem := t.Elem + + // compiler checks this but be safe. + if elem.Size_ >= 1<<16 { + throw("makechan: invalid channel element type") + } + if hchanSize%maxAlign != 0 || elem.Align_ > maxAlign { + throw("makechan: bad alignment") + } + + mem, overflow := math.MulUintptr(elem.Size_, uintptr(size)) + if overflow || mem > maxAlloc-hchanSize || size < 0 { + panic(plainError("makechan: size out of range")) + } + + // Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers. + // buf points into the same allocation, elemtype is persistent. + // SudoG's are referenced from their owning thread so they can't be collected. + // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. + var c *hchan + switch { + case mem == 0: + // Queue or element size is zero. + c = (*hchan)(mallocgc(hchanSize, nil, true)) + // Race detector uses this location for synchronization. + c.buf = c.raceaddr() + case elem.PtrBytes == 0: + // Elements do not contain pointers. + // Allocate hchan and buf in one call. + c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) + c.buf = add(unsafe.Pointer(c), hchanSize) + default: + // Elements contain pointers. + c = new(hchan) + c.buf = mallocgc(mem, elem, true) + } + + c.elemsize = uint16(elem.Size_) + c.elemtype = elem + c.dataqsiz = uint(size) + lockInit(&c.lock, lockRankHchan) + + if debugChan { + print("makechan: chan=", c, "; elemsize=", elem.Size_, "; dataqsiz=", size, "\n") + } + return c +} + +// chanbuf(c, i) is pointer to the i'th slot in the buffer. +func chanbuf(c *hchan, i uint) unsafe.Pointer { + return add(c.buf, uintptr(i)*uintptr(c.elemsize)) +} + +// full reports whether a send on c would block (that is, the channel is full). +// It uses a single word-sized read of mutable state, so although +// the answer is instantaneously true, the correct answer may have changed +// by the time the calling function receives the return value. +func full(c *hchan) bool { + // c.dataqsiz is immutable (never written after the channel is created) + // so it is safe to read at any time during channel operation. + if c.dataqsiz == 0 { + // Assumes that a pointer read is relaxed-atomic. + return c.recvq.first == nil + } + // Assumes that a uint read is relaxed-atomic. + return c.qcount == c.dataqsiz +} + +// entry point for c <- x from compiled code. +// +//go:nosplit +func chansend1(c *hchan, elem unsafe.Pointer) { + chansend(c, elem, true, getcallerpc()) +} + +/* + * generic single channel send/recv + * If block is not nil, + * then the protocol will not + * sleep but return if it could + * not complete. + * + * sleep can wake up with g.param == nil + * when a channel involved in the sleep has + * been closed. it is easiest to loop and re-run + * the operation; we'll see that it's now closed. + */ +func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { + if c == nil { + if !block { + return false + } + gopark(nil, nil, waitReasonChanSendNilChan, traceBlockForever, 2) + throw("unreachable") + } + + if debugChan { + print("chansend: chan=", c, "\n") + } + + if raceenabled { + racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend)) + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + // + // After observing that the channel is not closed, we observe that the channel is + // not ready for sending. Each of these observations is a single word-sized read + // (first c.closed and second full()). + // Because a closed channel cannot transition from 'ready for sending' to + // 'not ready for sending', even if the channel is closed between the two observations, + // they imply a moment between the two when the channel was both not yet closed + // and not ready for sending. We behave as if we observed the channel at that moment, + // and report that the send cannot proceed. + // + // It is okay if the reads are reordered here: if we observe that the channel is not + // ready for sending and then observe that it is not closed, that implies that the + // channel wasn't closed during the first observation. However, nothing here + // guarantees forward progress. We rely on the side effects of lock release in + // chanrecv() and closechan() to update this thread's view of c.closed and full(). + if !block && c.closed == 0 && full(c) { + return false + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + + if c.closed != 0 { + unlock(&c.lock) + panic(plainError("send on closed channel")) + } + + if sg := c.recvq.dequeue(); sg != nil { + // Found a waiting receiver. We pass the value we want to send + // directly to the receiver, bypassing the channel buffer (if any). + send(c, sg, ep, func() { unlock(&c.lock) }, 3) + return true + } + + if c.qcount < c.dataqsiz { + // Space is available in the channel buffer. Enqueue the element to send. + qp := chanbuf(c, c.sendx) + if raceenabled { + racenotify(c, c.sendx, nil) + } + typedmemmove(c.elemtype, qp, ep) + c.sendx++ + if c.sendx == c.dataqsiz { + c.sendx = 0 + } + c.qcount++ + unlock(&c.lock) + return true + } + + if !block { + unlock(&c.lock) + return false + } + + // Block on the channel. Some receiver will complete our operation for us. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + // No stack splits between assigning elem and enqueuing mysg + // on gp.waiting where copystack can find it. + mysg.elem = ep + mysg.waitlink = nil + mysg.g = gp + mysg.isSelect = false + mysg.c = c + gp.waiting = mysg + gp.param = nil + c.sendq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + gp.parkingOnChan.Store(true) + gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceBlockChanSend, 2) + // Ensure the value being sent is kept alive until the + // receiver copies it out. The sudog has a pointer to the + // stack object, but sudogs aren't considered as roots of the + // stack tracer. + KeepAlive(ep) + + // someone woke us up. + if mysg != gp.waiting { + throw("G waiting list is corrupted") + } + gp.waiting = nil + gp.activeStackChans = false + closed := !mysg.success + gp.param = nil + if mysg.releasetime > 0 { + blockevent(mysg.releasetime-t0, 2) + } + mysg.c = nil + releaseSudog(mysg) + if closed { + if c.closed == 0 { + throw("chansend: spurious wakeup") + } + panic(plainError("send on closed channel")) + } + return true +} + +// send processes a send operation on an empty channel c. +// The value ep sent by the sender is copied to the receiver sg. +// The receiver is then woken up to go on its merry way. +// Channel c must be empty and locked. send unlocks c with unlockf. +// sg must already be dequeued from c. +// ep must be non-nil and point to the heap or the caller's stack. +func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { + if raceenabled { + if c.dataqsiz == 0 { + racesync(c, sg) + } else { + // Pretend we go through the buffer, even though + // we copy directly. Note that we need to increment + // the head/tail locations only when raceenabled. + racenotify(c, c.recvx, nil) + racenotify(c, c.recvx, sg) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz + } + } + if sg.elem != nil { + sendDirect(c.elemtype, sg, ep) + sg.elem = nil + } + gp := sg.g + unlockf() + gp.param = unsafe.Pointer(sg) + sg.success = true + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp, skip+1) +} + +// Sends and receives on unbuffered or empty-buffered channels are the +// only operations where one running goroutine writes to the stack of +// another running goroutine. The GC assumes that stack writes only +// happen when the goroutine is running and are only done by that +// goroutine. Using a write barrier is sufficient to make up for +// violating that assumption, but the write barrier has to work. +// typedmemmove will call bulkBarrierPreWrite, but the target bytes +// are not in the heap, so that will not help. We arrange to call +// memmove and typeBitsBulkBarrier instead. + +func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { + // src is on our stack, dst is a slot on another stack. + + // Once we read sg.elem out of sg, it will no longer + // be updated if the destination's stack gets copied (shrunk). + // So make sure that no preemption points can happen between read & use. + dst := sg.elem + typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) + // No need for cgo write barrier checks because dst is always + // Go memory. + memmove(dst, src, t.Size_) +} + +func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { + // dst is on our stack or the heap, src is on another stack. + // The channel is locked, so src will not move during this + // operation. + src := sg.elem + typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) + memmove(dst, src, t.Size_) +} + +func closechan(c *hchan) { + if c == nil { + panic(plainError("close of nil channel")) + } + + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic(plainError("close of closed channel")) + } + + if raceenabled { + callerpc := getcallerpc() + racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan)) + racerelease(c.raceaddr()) + } + + c.closed = 1 + + var glist gList + + // release all readers + for { + sg := c.recvq.dequeue() + if sg == nil { + break + } + if sg.elem != nil { + typedmemclr(c.elemtype, sg.elem) + sg.elem = nil + } + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + gp := sg.g + gp.param = unsafe.Pointer(sg) + sg.success = false + if raceenabled { + raceacquireg(gp, c.raceaddr()) + } + glist.push(gp) + } + + // release all writers (they will panic) + for { + sg := c.sendq.dequeue() + if sg == nil { + break + } + sg.elem = nil + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + gp := sg.g + gp.param = unsafe.Pointer(sg) + sg.success = false + if raceenabled { + raceacquireg(gp, c.raceaddr()) + } + glist.push(gp) + } + unlock(&c.lock) + + // Ready all Gs now that we've dropped the channel lock. + for !glist.empty() { + gp := glist.pop() + gp.schedlink = 0 + goready(gp, 3) + } +} + +// empty reports whether a read from c would block (that is, the channel is +// empty). It uses a single atomic read of mutable state. +func empty(c *hchan) bool { + // c.dataqsiz is immutable. + if c.dataqsiz == 0 { + return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil + } + return atomic.Loaduint(&c.qcount) == 0 +} + +// entry points for <- c from compiled code. +// +//go:nosplit +func chanrecv1(c *hchan, elem unsafe.Pointer) { + chanrecv(c, elem, true) +} + +//go:nosplit +func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) { + _, received = chanrecv(c, elem, true) + return +} + +// chanrecv receives on channel c and writes the received data to ep. +// ep may be nil, in which case received data is ignored. +// If block == false and no elements are available, returns (false, false). +// Otherwise, if c is closed, zeros *ep and returns (true, false). +// Otherwise, fills in *ep with an element and returns (true, true). +// A non-nil ep must point to the heap or the caller's stack. +func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) { + // raceenabled: don't need to check ep, as it is always on the stack + // or is new memory allocated by reflect. + + if debugChan { + print("chanrecv: chan=", c, "\n") + } + + if c == nil { + if !block { + return + } + gopark(nil, nil, waitReasonChanReceiveNilChan, traceBlockForever, 2) + throw("unreachable") + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + if !block && empty(c) { + // After observing that the channel is not ready for receiving, we observe whether the + // channel is closed. + // + // Reordering of these checks could lead to incorrect behavior when racing with a close. + // For example, if the channel was open and not empty, was closed, and then drained, + // reordered reads could incorrectly indicate "open and empty". To prevent reordering, + // we use atomic loads for both checks, and rely on emptying and closing to happen in + // separate critical sections under the same lock. This assumption fails when closing + // an unbuffered channel with a blocked send, but that is an error condition anyway. + if atomic.Load(&c.closed) == 0 { + // Because a channel cannot be reopened, the later observation of the channel + // being not closed implies that it was also not closed at the moment of the + // first observation. We behave as if we observed the channel at that moment + // and report that the receive cannot proceed. + return + } + // The channel is irreversibly closed. Re-check whether the channel has any pending data + // to receive, which could have arrived between the empty and closed checks above. + // Sequential consistency is also required here, when racing with such a send. + if empty(c) { + // The channel is irreversibly closed and empty. + if raceenabled { + raceacquire(c.raceaddr()) + } + if ep != nil { + typedmemclr(c.elemtype, ep) + } + return true, false + } + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + + if c.closed != 0 { + if c.qcount == 0 { + if raceenabled { + raceacquire(c.raceaddr()) + } + unlock(&c.lock) + if ep != nil { + typedmemclr(c.elemtype, ep) + } + return true, false + } + // The channel has been closed, but the channel's buffer have data. + } else { + // Just found waiting sender with not closed. + if sg := c.sendq.dequeue(); sg != nil { + // Found a waiting sender. If buffer is size 0, receive value + // directly from sender. Otherwise, receive from head of queue + // and add sender's value to the tail of the queue (both map to + // the same buffer slot because the queue is full). + recv(c, sg, ep, func() { unlock(&c.lock) }, 3) + return true, true + } + } + + if c.qcount > 0 { + // Receive directly from queue + qp := chanbuf(c, c.recvx) + if raceenabled { + racenotify(c, c.recvx, nil) + } + if ep != nil { + typedmemmove(c.elemtype, ep, qp) + } + typedmemclr(c.elemtype, qp) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + unlock(&c.lock) + return true, true + } + + if !block { + unlock(&c.lock) + return false, false + } + + // no sender available: block on this channel. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + // No stack splits between assigning elem and enqueuing mysg + // on gp.waiting where copystack can find it. + mysg.elem = ep + mysg.waitlink = nil + gp.waiting = mysg + mysg.g = gp + mysg.isSelect = false + mysg.c = c + gp.param = nil + c.recvq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + gp.parkingOnChan.Store(true) + gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceBlockChanRecv, 2) + + // someone woke us up + if mysg != gp.waiting { + throw("G waiting list is corrupted") + } + gp.waiting = nil + gp.activeStackChans = false + if mysg.releasetime > 0 { + blockevent(mysg.releasetime-t0, 2) + } + success := mysg.success + gp.param = nil + mysg.c = nil + releaseSudog(mysg) + return true, success +} + +// recv processes a receive operation on a full channel c. +// There are 2 parts: +// 1. The value sent by the sender sg is put into the channel +// and the sender is woken up to go on its merry way. +// 2. The value received by the receiver (the current G) is +// written to ep. +// +// For synchronous channels, both values are the same. +// For asynchronous channels, the receiver gets its data from +// the channel buffer and the sender's data is put in the +// channel buffer. +// Channel c must be full and locked. recv unlocks c with unlockf. +// sg must already be dequeued from c. +// A non-nil ep must point to the heap or the caller's stack. +func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { + if c.dataqsiz == 0 { + if raceenabled { + racesync(c, sg) + } + if ep != nil { + // copy data from sender + recvDirect(c.elemtype, sg, ep) + } + } else { + // Queue is full. Take the item at the + // head of the queue. Make the sender enqueue + // its item at the tail of the queue. Since the + // queue is full, those are both the same slot. + qp := chanbuf(c, c.recvx) + if raceenabled { + racenotify(c, c.recvx, nil) + racenotify(c, c.recvx, sg) + } + // copy data from queue to receiver + if ep != nil { + typedmemmove(c.elemtype, ep, qp) + } + // copy data from sender to queue + typedmemmove(c.elemtype, qp, sg.elem) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz + } + sg.elem = nil + gp := sg.g + unlockf() + gp.param = unsafe.Pointer(sg) + sg.success = true + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp, skip+1) +} + +func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool { + // There are unlocked sudogs that point into gp's stack. Stack + // copying must lock the channels of those sudogs. + // Set activeStackChans here instead of before we try parking + // because we could self-deadlock in stack growth on the + // channel lock. + gp.activeStackChans = true + // Mark that it's safe for stack shrinking to occur now, + // because any thread acquiring this G's stack for shrinking + // is guaranteed to observe activeStackChans after this store. + gp.parkingOnChan.Store(false) + // Make sure we unlock after setting activeStackChans and + // unsetting parkingOnChan. The moment we unlock chanLock + // we risk gp getting readied by a channel operation and + // so gp could continue running before everything before + // the unlock is visible (even to gp itself). + unlock((*mutex)(chanLock)) + return true +} + +// compiler implements +// +// select { +// case c <- v: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selectnbsend(c, v) { +// ... foo +// } else { +// ... bar +// } +func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) { + return chansend(c, elem, false, getcallerpc()) +} + +// compiler implements +// +// select { +// case v, ok = <-c: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selected, ok = selectnbrecv(&v, c); selected { +// ... foo +// } else { +// ... bar +// } +func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected, received bool) { + return chanrecv(c, elem, false) +} + +//go:linkname reflect_chansend reflect.chansend0 +func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) { + return chansend(c, elem, !nb, getcallerpc()) +} + +//go:linkname reflect_chanrecv reflect.chanrecv +func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) { + return chanrecv(c, elem, !nb) +} + +//go:linkname reflect_chanlen reflect.chanlen +func reflect_chanlen(c *hchan) int { + if c == nil { + return 0 + } + return int(c.qcount) +} + +//go:linkname reflectlite_chanlen internal/reflectlite.chanlen +func reflectlite_chanlen(c *hchan) int { + if c == nil { + return 0 + } + return int(c.qcount) +} + +//go:linkname reflect_chancap reflect.chancap +func reflect_chancap(c *hchan) int { + if c == nil { + return 0 + } + return int(c.dataqsiz) +} + +//go:linkname reflect_chanclose reflect.chanclose +func reflect_chanclose(c *hchan) { + closechan(c) +} + +func (q *waitq) enqueue(sgp *sudog) { + sgp.next = nil + x := q.last + if x == nil { + sgp.prev = nil + q.first = sgp + q.last = sgp + return + } + sgp.prev = x + x.next = sgp + q.last = sgp +} + +func (q *waitq) dequeue() *sudog { + for { + sgp := q.first + if sgp == nil { + return nil + } + y := sgp.next + if y == nil { + q.first = nil + q.last = nil + } else { + y.prev = nil + q.first = y + sgp.next = nil // mark as removed (see dequeueSudoG) + } + + // if a goroutine was put on this queue because of a + // select, there is a small window between the goroutine + // being woken up by a different case and it grabbing the + // channel locks. Once it has the lock + // it removes itself from the queue, so we won't see it after that. + // We use a flag in the G struct to tell us when someone + // else has won the race to signal this goroutine but the goroutine + // hasn't removed itself from the queue yet. + if sgp.isSelect && !sgp.g.selectDone.CompareAndSwap(0, 1) { + continue + } + + return sgp + } +} + +func (c *hchan) raceaddr() unsafe.Pointer { + // Treat read-like and write-like operations on the channel to + // happen at this address. Avoid using the address of qcount + // or dataqsiz, because the len() and cap() builtins read + // those addresses, and we don't want them racing with + // operations like close(). + return unsafe.Pointer(&c.buf) +} + +func racesync(c *hchan, sg *sudog) { + racerelease(chanbuf(c, 0)) + raceacquireg(sg.g, chanbuf(c, 0)) + racereleaseg(sg.g, chanbuf(c, 0)) + raceacquire(chanbuf(c, 0)) +} + +// Notify the race detector of a send or receive involving buffer entry idx +// and a channel c or its communicating partner sg. +// This function handles the special case of c.elemsize==0. +func racenotify(c *hchan, idx uint, sg *sudog) { + // We could have passed the unsafe.Pointer corresponding to entry idx + // instead of idx itself. However, in a future version of this function, + // we can use idx to better handle the case of elemsize==0. + // A future improvement to the detector is to call TSan with c and idx: + // this way, Go will continue to not allocating buffer entries for channels + // of elemsize==0, yet the race detector can be made to handle multiple + // sync objects underneath the hood (one sync object per idx) + qp := chanbuf(c, idx) + // When elemsize==0, we don't allocate a full buffer for the channel. + // Instead of individual buffer entries, the race detector uses the + // c.buf as the only buffer entry. This simplification prevents us from + // following the memory model's happens-before rules (rules that are + // implemented in racereleaseacquire). Instead, we accumulate happens-before + // information in the synchronization object associated with c.buf. + if c.elemsize == 0 { + if sg == nil { + raceacquire(qp) + racerelease(qp) + } else { + raceacquireg(sg.g, qp) + racereleaseg(sg.g, qp) + } + } else { + if sg == nil { + racereleaseacquire(qp) + } else { + racereleaseacquireg(sg.g, qp) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/chan_test.go b/platform/dbops/binaries/go/go/src/runtime/chan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..526d45bb43013675593ba87265bc0cad176b0741 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/chan_test.go @@ -0,0 +1,1222 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/testenv" + "math" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestChan(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := 200 + if testing.Short() { + N = 20 + } + for chanCap := 0; chanCap < N; chanCap++ { + { + // Ensure that receive from empty chan blocks. + c := make(chan int, chanCap) + recv1 := false + go func() { + _ = <-c + recv1 = true + }() + recv2 := false + go func() { + _, _ = <-c + recv2 = true + }() + time.Sleep(time.Millisecond) + if recv1 || recv2 { + t.Fatalf("chan[%d]: receive from empty chan", chanCap) + } + // Ensure that non-blocking receive does not block. + select { + case _ = <-c: + t.Fatalf("chan[%d]: receive from empty chan", chanCap) + default: + } + select { + case _, _ = <-c: + t.Fatalf("chan[%d]: receive from empty chan", chanCap) + default: + } + c <- 0 + c <- 0 + } + + { + // Ensure that send to full chan blocks. + c := make(chan int, chanCap) + for i := 0; i < chanCap; i++ { + c <- i + } + sent := uint32(0) + go func() { + c <- 0 + atomic.StoreUint32(&sent, 1) + }() + time.Sleep(time.Millisecond) + if atomic.LoadUint32(&sent) != 0 { + t.Fatalf("chan[%d]: send to full chan", chanCap) + } + // Ensure that non-blocking send does not block. + select { + case c <- 0: + t.Fatalf("chan[%d]: send to full chan", chanCap) + default: + } + <-c + } + + { + // Ensure that we receive 0 from closed chan. + c := make(chan int, chanCap) + for i := 0; i < chanCap; i++ { + c <- i + } + close(c) + for i := 0; i < chanCap; i++ { + v := <-c + if v != i { + t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) + } + } + if v := <-c; v != 0 { + t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0) + } + if v, ok := <-c; v != 0 || ok { + t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false) + } + } + + { + // Ensure that close unblocks receive. + c := make(chan int, chanCap) + done := make(chan bool) + go func() { + v, ok := <-c + done <- v == 0 && ok == false + }() + time.Sleep(time.Millisecond) + close(c) + if !<-done { + t.Fatalf("chan[%d]: received non zero from closed chan", chanCap) + } + } + + { + // Send 100 integers, + // ensure that we receive them non-corrupted in FIFO order. + c := make(chan int, chanCap) + go func() { + for i := 0; i < 100; i++ { + c <- i + } + }() + for i := 0; i < 100; i++ { + v := <-c + if v != i { + t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) + } + } + + // Same, but using recv2. + go func() { + for i := 0; i < 100; i++ { + c <- i + } + }() + for i := 0; i < 100; i++ { + v, ok := <-c + if !ok { + t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i) + } + if v != i { + t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i) + } + } + + // Send 1000 integers in 4 goroutines, + // ensure that we receive what we send. + const P = 4 + const L = 1000 + for p := 0; p < P; p++ { + go func() { + for i := 0; i < L; i++ { + c <- i + } + }() + } + done := make(chan map[int]int) + for p := 0; p < P; p++ { + go func() { + recv := make(map[int]int) + for i := 0; i < L; i++ { + v := <-c + recv[v] = recv[v] + 1 + } + done <- recv + }() + } + recv := make(map[int]int) + for p := 0; p < P; p++ { + for k, v := range <-done { + recv[k] = recv[k] + v + } + } + if len(recv) != L { + t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L) + } + for _, v := range recv { + if v != P { + t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P) + } + } + } + + { + // Test len/cap. + c := make(chan int, chanCap) + if len(c) != 0 || cap(c) != chanCap { + t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c)) + } + for i := 0; i < chanCap; i++ { + c <- i + } + if len(c) != chanCap || cap(c) != chanCap { + t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c)) + } + } + + } +} + +func TestNonblockRecvRace(t *testing.T) { + n := 10000 + if testing.Short() { + n = 100 + } + for i := 0; i < n; i++ { + c := make(chan int, 1) + c <- 1 + go func() { + select { + case <-c: + default: + t.Error("chan is not ready") + } + }() + close(c) + <-c + if t.Failed() { + return + } + } +} + +// This test checks that select acts on the state of the channels at one +// moment in the execution, not over a smeared time window. +// In the test, one goroutine does: +// +// create c1, c2 +// make c1 ready for receiving +// create second goroutine +// make c2 ready for receiving +// make c1 no longer ready for receiving (if possible) +// +// The second goroutine does a non-blocking select receiving from c1 and c2. +// From the time the second goroutine is created, at least one of c1 and c2 +// is always ready for receiving, so the select in the second goroutine must +// always receive from one or the other. It must never execute the default case. +func TestNonblockSelectRace(t *testing.T) { + n := 100000 + if testing.Short() { + n = 1000 + } + done := make(chan bool, 1) + for i := 0; i < n; i++ { + c1 := make(chan int, 1) + c2 := make(chan int, 1) + c1 <- 1 + go func() { + select { + case <-c1: + case <-c2: + default: + done <- false + return + } + done <- true + }() + c2 <- 1 + select { + case <-c1: + default: + } + if !<-done { + t.Fatal("no chan is ready") + } + } +} + +// Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1. +func TestNonblockSelectRace2(t *testing.T) { + n := 100000 + if testing.Short() { + n = 1000 + } + done := make(chan bool, 1) + for i := 0; i < n; i++ { + c1 := make(chan int, 1) + c2 := make(chan int) + c1 <- 1 + go func() { + select { + case <-c1: + case <-c2: + default: + done <- false + return + } + done <- true + }() + close(c2) + select { + case <-c1: + default: + } + if !<-done { + t.Fatal("no chan is ready") + } + } +} + +func TestSelfSelect(t *testing.T) { + // Ensure that send/recv on the same chan in select + // does not crash nor deadlock. + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + for _, chanCap := range []int{0, 10} { + var wg sync.WaitGroup + wg.Add(2) + c := make(chan int, chanCap) + for p := 0; p < 2; p++ { + p := p + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + if p == 0 || i%2 == 0 { + select { + case c <- p: + case v := <-c: + if chanCap == 0 && v == p { + t.Errorf("self receive") + return + } + } + } else { + select { + case v := <-c: + if chanCap == 0 && v == p { + t.Errorf("self receive") + return + } + case c <- p: + } + } + } + }() + } + wg.Wait() + } +} + +func TestSelectStress(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10)) + var c [4]chan int + c[0] = make(chan int) + c[1] = make(chan int) + c[2] = make(chan int, 2) + c[3] = make(chan int, 3) + N := int(1e5) + if testing.Short() { + N /= 10 + } + // There are 4 goroutines that send N values on each of the chans, + // + 4 goroutines that receive N values on each of the chans, + // + 1 goroutine that sends N values on each of the chans in a single select, + // + 1 goroutine that receives N values on each of the chans in a single select. + // All these sends, receives and selects interact chaotically at runtime, + // but we are careful that this whole construct does not deadlock. + var wg sync.WaitGroup + wg.Add(10) + for k := 0; k < 4; k++ { + k := k + go func() { + for i := 0; i < N; i++ { + c[k] <- 0 + } + wg.Done() + }() + go func() { + for i := 0; i < N; i++ { + <-c[k] + } + wg.Done() + }() + } + go func() { + var n [4]int + c1 := c + for i := 0; i < 4*N; i++ { + select { + case c1[3] <- 0: + n[3]++ + if n[3] == N { + c1[3] = nil + } + case c1[2] <- 0: + n[2]++ + if n[2] == N { + c1[2] = nil + } + case c1[0] <- 0: + n[0]++ + if n[0] == N { + c1[0] = nil + } + case c1[1] <- 0: + n[1]++ + if n[1] == N { + c1[1] = nil + } + } + } + wg.Done() + }() + go func() { + var n [4]int + c1 := c + for i := 0; i < 4*N; i++ { + select { + case <-c1[0]: + n[0]++ + if n[0] == N { + c1[0] = nil + } + case <-c1[1]: + n[1]++ + if n[1] == N { + c1[1] = nil + } + case <-c1[2]: + n[2]++ + if n[2] == N { + c1[2] = nil + } + case <-c1[3]: + n[3]++ + if n[3] == N { + c1[3] = nil + } + } + } + wg.Done() + }() + wg.Wait() +} + +func TestSelectFairness(t *testing.T) { + const trials = 10000 + if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" { + testenv.SkipFlaky(t, 22047) + } + c1 := make(chan byte, trials+1) + c2 := make(chan byte, trials+1) + for i := 0; i < trials+1; i++ { + c1 <- 1 + c2 <- 2 + } + c3 := make(chan byte) + c4 := make(chan byte) + out := make(chan byte) + done := make(chan byte) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + var b byte + select { + case b = <-c3: + case b = <-c4: + case b = <-c1: + case b = <-c2: + } + select { + case out <- b: + case <-done: + return + } + } + }() + cnt1, cnt2 := 0, 0 + for i := 0; i < trials; i++ { + switch b := <-out; b { + case 1: + cnt1++ + case 2: + cnt2++ + default: + t.Fatalf("unexpected value %d on channel", b) + } + } + // If the select in the goroutine is fair, + // cnt1 and cnt2 should be about the same value. + // See if we're more than 10 sigma away from the expected value. + // 10 sigma is a lot, but we're ok with some systematic bias as + // long as it isn't too severe. + const mean = trials * 0.5 + const variance = trials * 0.5 * (1 - 0.5) + stddev := math.Sqrt(variance) + if math.Abs(float64(cnt1-mean)) > 10*stddev { + t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2) + } + close(done) + wg.Wait() +} + +func TestChanSendInterface(t *testing.T) { + type mt struct{} + m := &mt{} + c := make(chan any, 1) + c <- m + select { + case c <- m: + default: + } + select { + case c <- m: + case c <- &mt{}: + default: + } +} + +func TestPseudoRandomSend(t *testing.T) { + n := 100 + for _, chanCap := range []int{0, n} { + c := make(chan int, chanCap) + l := make([]int, n) + var m sync.Mutex + m.Lock() + go func() { + for i := 0; i < n; i++ { + runtime.Gosched() + l[i] = <-c + } + m.Unlock() + }() + for i := 0; i < n; i++ { + select { + case c <- 1: + case c <- 0: + } + } + m.Lock() // wait + n0 := 0 + n1 := 0 + for _, i := range l { + n0 += (i + 1) % 2 + n1 += i + } + if n0 <= n/10 || n1 <= n/10 { + t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap) + } + } +} + +func TestMultiConsumer(t *testing.T) { + const nwork = 23 + const niter = 271828 + + pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31} + + q := make(chan int, nwork*3) + r := make(chan int, nwork*3) + + // workers + var wg sync.WaitGroup + for i := 0; i < nwork; i++ { + wg.Add(1) + go func(w int) { + for v := range q { + // mess with the fifo-ish nature of range + if pn[w%len(pn)] == v { + runtime.Gosched() + } + r <- v + } + wg.Done() + }(i) + } + + // feeder & closer + expect := 0 + go func() { + for i := 0; i < niter; i++ { + v := pn[i%len(pn)] + expect += v + q <- v + } + close(q) // no more work + wg.Wait() // workers done + close(r) // ... so there can be no more results + }() + + // consume & check + n := 0 + s := 0 + for v := range r { + n++ + s += v + } + if n != niter || s != expect { + t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)", + expect, s, niter, n) + } +} + +func TestShrinkStackDuringBlockedSend(t *testing.T) { + // make sure that channel operations still work when we are + // blocked on a channel send and we shrink the stack. + // NOTE: this test probably won't fail unless stack1.go:stackDebug + // is set to >= 1. + const n = 10 + c := make(chan int) + done := make(chan struct{}) + + go func() { + for i := 0; i < n; i++ { + c <- i + // use lots of stack, briefly. + stackGrowthRecursive(20) + } + done <- struct{}{} + }() + + for i := 0; i < n; i++ { + x := <-c + if x != i { + t.Errorf("bad channel read: want %d, got %d", i, x) + } + // Waste some time so sender can finish using lots of stack + // and block in channel send. + time.Sleep(1 * time.Millisecond) + // trigger GC which will shrink the stack of the sender. + runtime.GC() + } + <-done +} + +func TestNoShrinkStackWhileParking(t *testing.T) { + if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 49382) + } + if runtime.GOOS == "openbsd" { + testenv.SkipFlaky(t, 51482) + } + + // The goal of this test is to trigger a "racy sudog adjustment" + // throw. Basically, there's a window between when a goroutine + // becomes available for preemption for stack scanning (and thus, + // stack shrinking) but before the goroutine has fully parked on a + // channel. See issue 40641 for more details on the problem. + // + // The way we try to induce this failure is to set up two + // goroutines: a sender and a receiver that communicate across + // a channel. We try to set up a situation where the sender + // grows its stack temporarily then *fully* blocks on a channel + // often. Meanwhile a GC is triggered so that we try to get a + // mark worker to shrink the sender's stack and race with the + // sender parking. + // + // Unfortunately the race window here is so small that we + // either need a ridiculous number of iterations, or we add + // "usleep(1000)" to park_m, just before the unlockf call. + const n = 10 + send := func(c chan<- int, done chan struct{}) { + for i := 0; i < n; i++ { + c <- i + // Use lots of stack briefly so that + // the GC is going to want to shrink us + // when it scans us. Make sure not to + // do any function calls otherwise + // in order to avoid us shrinking ourselves + // when we're preempted. + stackGrowthRecursive(20) + } + done <- struct{}{} + } + recv := func(c <-chan int, done chan struct{}) { + for i := 0; i < n; i++ { + // Sleep here so that the sender always + // fully blocks. + time.Sleep(10 * time.Microsecond) + <-c + } + done <- struct{}{} + } + for i := 0; i < n*20; i++ { + c := make(chan int) + done := make(chan struct{}) + go recv(c, done) + go send(c, done) + // Wait a little bit before triggering + // the GC to make sure the sender and + // receiver have gotten into their groove. + time.Sleep(50 * time.Microsecond) + runtime.GC() + <-done + <-done + } +} + +func TestSelectDuplicateChannel(t *testing.T) { + // This test makes sure we can queue a G on + // the same channel multiple times. + c := make(chan int) + d := make(chan int) + e := make(chan int) + + // goroutine A + go func() { + select { + case <-c: + case <-c: + case <-d: + } + e <- 9 + }() + time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c + + // goroutine B + go func() { + <-c + }() + time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing + + d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. + <-e // A tells us it's done + c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) +} + +func TestSelectStackAdjust(t *testing.T) { + // Test that channel receive slots that contain local stack + // pointers are adjusted correctly by stack shrinking. + c := make(chan *int) + d := make(chan *int) + ready1 := make(chan bool) + ready2 := make(chan bool) + + f := func(ready chan bool, dup bool) { + // Temporarily grow the stack to 10K. + stackGrowthRecursive((10 << 10) / (128 * 8)) + + // We're ready to trigger GC and stack shrink. + ready <- true + + val := 42 + var cx *int + cx = &val + + var c2 chan *int + var d2 chan *int + if dup { + c2 = c + d2 = d + } + + // Receive from d. cx won't be affected. + select { + case cx = <-c: + case <-c2: + case <-d: + case <-d2: + } + + // Check that pointer in cx was adjusted correctly. + if cx != &val { + t.Error("cx no longer points to val") + } else if val != 42 { + t.Error("val changed") + } else { + *cx = 43 + if val != 43 { + t.Error("changing *cx failed to change val") + } + } + ready <- true + } + + go f(ready1, false) + go f(ready2, true) + + // Let the goroutines get into the select. + <-ready1 + <-ready2 + time.Sleep(10 * time.Millisecond) + + // Force concurrent GC to shrink the stacks. + runtime.GC() + + // Wake selects. + close(d) + <-ready1 + <-ready2 +} + +type struct0 struct{} + +func BenchmarkMakeChan(b *testing.B) { + b.Run("Byte", func(b *testing.B) { + var x chan byte + for i := 0; i < b.N; i++ { + x = make(chan byte, 8) + } + close(x) + }) + b.Run("Int", func(b *testing.B) { + var x chan int + for i := 0; i < b.N; i++ { + x = make(chan int, 8) + } + close(x) + }) + b.Run("Ptr", func(b *testing.B) { + var x chan *byte + for i := 0; i < b.N; i++ { + x = make(chan *byte, 8) + } + close(x) + }) + b.Run("Struct", func(b *testing.B) { + b.Run("0", func(b *testing.B) { + var x chan struct0 + for i := 0; i < b.N; i++ { + x = make(chan struct0, 8) + } + close(x) + }) + b.Run("32", func(b *testing.B) { + var x chan struct32 + for i := 0; i < b.N; i++ { + x = make(chan struct32, 8) + } + close(x) + }) + b.Run("40", func(b *testing.B) { + var x chan struct40 + for i := 0; i < b.N; i++ { + x = make(chan struct40, 8) + } + close(x) + }) + }) +} + +func BenchmarkChanNonblocking(b *testing.B) { + myc := make(chan int) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + select { + case <-myc: + default: + } + } + }) +} + +func BenchmarkSelectUncontended(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + myc1 := make(chan int, 1) + myc2 := make(chan int, 1) + myc1 <- 0 + for pb.Next() { + select { + case <-myc1: + myc2 <- 0 + case <-myc2: + myc1 <- 0 + } + } + }) +} + +func BenchmarkSelectSyncContended(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int) + done := make(chan int) + b.RunParallel(func(pb *testing.PB) { + go func() { + for { + select { + case myc1 <- 0: + case myc2 <- 0: + case myc3 <- 0: + case <-done: + return + } + } + }() + for pb.Next() { + select { + case <-myc1: + case <-myc2: + case <-myc3: + } + } + }) + close(done) +} + +func BenchmarkSelectAsyncContended(b *testing.B) { + procs := runtime.GOMAXPROCS(0) + myc1 := make(chan int, procs) + myc2 := make(chan int, procs) + b.RunParallel(func(pb *testing.PB) { + myc1 <- 0 + for pb.Next() { + select { + case <-myc1: + myc2 <- 0 + case <-myc2: + myc1 <- 0 + } + } + }) +} + +func BenchmarkSelectNonblock(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int, 1) + myc4 := make(chan int, 1) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + select { + case <-myc1: + default: + } + select { + case myc2 <- 0: + default: + } + select { + case <-myc3: + default: + } + select { + case myc4 <- 0: + default: + } + } + }) +} + +func BenchmarkChanUncontended(b *testing.B) { + const C = 100 + b.RunParallel(func(pb *testing.PB) { + myc := make(chan int, C) + for pb.Next() { + for i := 0; i < C; i++ { + myc <- 0 + } + for i := 0; i < C; i++ { + <-myc + } + } + }) +} + +func BenchmarkChanContended(b *testing.B) { + const C = 100 + myc := make(chan int, C*runtime.GOMAXPROCS(0)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + for i := 0; i < C; i++ { + myc <- 0 + } + for i := 0; i < C; i++ { + <-myc + } + } + }) +} + +func benchmarkChanSync(b *testing.B, work int) { + const CallsPerSched = 1000 + procs := 2 + N := int32(b.N / CallsPerSched / procs * procs) + c := make(chan bool, procs) + myc := make(chan int) + for p := 0; p < procs; p++ { + go func() { + for { + i := atomic.AddInt32(&N, -1) + if i < 0 { + break + } + for g := 0; g < CallsPerSched; g++ { + if i%2 == 0 { + <-myc + localWork(work) + myc <- 0 + localWork(work) + } else { + myc <- 0 + localWork(work) + <-myc + localWork(work) + } + } + } + c <- true + }() + } + for p := 0; p < procs; p++ { + <-c + } +} + +func BenchmarkChanSync(b *testing.B) { + benchmarkChanSync(b, 0) +} + +func BenchmarkChanSyncWork(b *testing.B) { + benchmarkChanSync(b, 1000) +} + +func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) { + const CallsPerSched = 1000 + procs := runtime.GOMAXPROCS(-1) + N := int32(b.N / CallsPerSched) + c := make(chan bool, 2*procs) + myc := make(chan int, chanSize) + for p := 0; p < procs; p++ { + go func() { + foo := 0 + for atomic.AddInt32(&N, -1) >= 0 { + for g := 0; g < CallsPerSched; g++ { + for i := 0; i < localWork; i++ { + foo *= 2 + foo /= 2 + } + myc <- 1 + } + } + myc <- 0 + c <- foo == 42 + }() + go func() { + foo := 0 + for { + v := <-myc + if v == 0 { + break + } + for i := 0; i < localWork; i++ { + foo *= 2 + foo /= 2 + } + } + c <- foo == 42 + }() + } + for p := 0; p < procs; p++ { + <-c + <-c + } +} + +func BenchmarkChanProdCons0(b *testing.B) { + benchmarkChanProdCons(b, 0, 0) +} + +func BenchmarkChanProdCons10(b *testing.B) { + benchmarkChanProdCons(b, 10, 0) +} + +func BenchmarkChanProdCons100(b *testing.B) { + benchmarkChanProdCons(b, 100, 0) +} + +func BenchmarkChanProdConsWork0(b *testing.B) { + benchmarkChanProdCons(b, 0, 100) +} + +func BenchmarkChanProdConsWork10(b *testing.B) { + benchmarkChanProdCons(b, 10, 100) +} + +func BenchmarkChanProdConsWork100(b *testing.B) { + benchmarkChanProdCons(b, 100, 100) +} + +func BenchmarkSelectProdCons(b *testing.B) { + const CallsPerSched = 1000 + procs := runtime.GOMAXPROCS(-1) + N := int32(b.N / CallsPerSched) + c := make(chan bool, 2*procs) + myc := make(chan int, 128) + myclose := make(chan bool) + for p := 0; p < procs; p++ { + go func() { + // Producer: sends to myc. + foo := 0 + // Intended to not fire during benchmarking. + mytimer := time.After(time.Hour) + for atomic.AddInt32(&N, -1) >= 0 { + for g := 0; g < CallsPerSched; g++ { + // Model some local work. + for i := 0; i < 100; i++ { + foo *= 2 + foo /= 2 + } + select { + case myc <- 1: + case <-mytimer: + case <-myclose: + } + } + } + myc <- 0 + c <- foo == 42 + }() + go func() { + // Consumer: receives from myc. + foo := 0 + // Intended to not fire during benchmarking. + mytimer := time.After(time.Hour) + loop: + for { + select { + case v := <-myc: + if v == 0 { + break loop + } + case <-mytimer: + case <-myclose: + } + // Model some local work. + for i := 0; i < 100; i++ { + foo *= 2 + foo /= 2 + } + } + c <- foo == 42 + }() + } + for p := 0; p < procs; p++ { + <-c + <-c + } +} + +func BenchmarkReceiveDataFromClosedChan(b *testing.B) { + count := b.N + ch := make(chan struct{}, count) + for i := 0; i < count; i++ { + ch <- struct{}{} + } + close(ch) + + b.ResetTimer() + for range ch { + } +} + +func BenchmarkChanCreation(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + myc := make(chan int, 1) + myc <- 0 + <-myc + } + }) +} + +func BenchmarkChanSem(b *testing.B) { + type Empty struct{} + myc := make(chan Empty, runtime.GOMAXPROCS(0)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + myc <- Empty{} + <-myc + } + }) +} + +func BenchmarkChanPopular(b *testing.B) { + const n = 1000 + c := make(chan bool) + var a []chan bool + var wg sync.WaitGroup + wg.Add(n) + for j := 0; j < n; j++ { + d := make(chan bool) + a = append(a, d) + go func() { + for i := 0; i < b.N; i++ { + select { + case <-c: + case <-d: + } + } + wg.Done() + }() + } + for i := 0; i < b.N; i++ { + for _, d := range a { + d <- true + } + } + wg.Wait() +} + +func BenchmarkChanClosed(b *testing.B) { + c := make(chan struct{}) + close(c) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + select { + case <-c: + default: + b.Error("Unreachable") + } + } + }) +} + +var ( + alwaysFalse = false + workSink = 0 +) + +func localWork(w int) { + foo := 0 + for i := 0; i < w; i++ { + foo /= (foo + 1) + } + if alwaysFalse { + workSink += foo + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/chanbarrier_test.go b/platform/dbops/binaries/go/go/src/runtime/chanbarrier_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d4795748bf1ffdb696e5a65a518b710dfc482930 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/chanbarrier_test.go @@ -0,0 +1,83 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "sync" + "testing" +) + +type response struct { +} + +type myError struct { +} + +func (myError) Error() string { return "" } + +func doRequest(useSelect bool) (*response, error) { + type async struct { + resp *response + err error + } + ch := make(chan *async, 0) + done := make(chan struct{}, 0) + + if useSelect { + go func() { + select { + case ch <- &async{resp: nil, err: myError{}}: + case <-done: + } + }() + } else { + go func() { + ch <- &async{resp: nil, err: myError{}} + }() + } + + r := <-ch + runtime.Gosched() + return r.resp, r.err +} + +func TestChanSendSelectBarrier(t *testing.T) { + testChanSendBarrier(true) +} + +func TestChanSendBarrier(t *testing.T) { + testChanSendBarrier(false) +} + +func testChanSendBarrier(useSelect bool) { + var wg sync.WaitGroup + var globalMu sync.Mutex + outer := 100 + inner := 100000 + if testing.Short() || runtime.GOARCH == "wasm" { + outer = 10 + inner = 1000 + } + for i := 0; i < outer; i++ { + wg.Add(1) + go func() { + defer wg.Done() + var garbage []byte + for j := 0; j < inner; j++ { + _, err := doRequest(useSelect) + _, ok := err.(myError) + if !ok { + panic(1) + } + garbage = make([]byte, 1<<10) + } + globalMu.Lock() + global = garbage + globalMu.Unlock() + }() + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/runtime/checkptr.go b/platform/dbops/binaries/go/go/src/runtime/checkptr.go new file mode 100644 index 0000000000000000000000000000000000000000..3c49645a446ffc7c331f5b4f02c41fb0a42367e8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/checkptr.go @@ -0,0 +1,109 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) { + // nil pointer is always suitably aligned (#47430). + if p == nil { + return + } + + // Check that (*[n]elem)(p) is appropriately aligned. + // Note that we allow unaligned pointers if the types they point to contain + // no pointers themselves. See issue 37298. + // TODO(mdempsky): What about fieldAlign? + if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 { + throw("checkptr: misaligned pointer conversion") + } + + // Check that (*[n]elem)(p) doesn't straddle multiple heap objects. + // TODO(mdempsky): Fix #46938 so we don't need to worry about overflow here. + if checkptrStraddles(p, n*elem.Size_) { + throw("checkptr: converted pointer straddles multiple allocations") + } +} + +// checkptrStraddles reports whether the first size-bytes of memory +// addressed by ptr is known to straddle more than one Go allocation. +func checkptrStraddles(ptr unsafe.Pointer, size uintptr) bool { + if size <= 1 { + return false + } + + // Check that add(ptr, size-1) won't overflow. This avoids the risk + // of producing an illegal pointer value (assuming ptr is legal). + if uintptr(ptr) >= -(size - 1) { + return true + } + end := add(ptr, size-1) + + // TODO(mdempsky): Detect when [ptr, end] contains Go allocations, + // but neither ptr nor end point into one themselves. + + return checkptrBase(ptr) != checkptrBase(end) +} + +func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) { + if 0 < uintptr(p) && uintptr(p) < minLegalPointer { + throw("checkptr: pointer arithmetic computed bad pointer value") + } + + // Check that if the computed pointer p points into a heap + // object, then one of the original pointers must have pointed + // into the same object. + base := checkptrBase(p) + if base == 0 { + return + } + + for _, original := range originals { + if base == checkptrBase(original) { + return + } + } + + throw("checkptr: pointer arithmetic result points to invalid allocation") +} + +// checkptrBase returns the base address for the allocation containing +// the address p. +// +// Importantly, if p1 and p2 point into the same variable, then +// checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse +// is not necessarily true as allocations can have trailing padding, +// and multiple variables may be packed into a single allocation. +func checkptrBase(p unsafe.Pointer) uintptr { + // stack + if gp := getg(); gp.stack.lo <= uintptr(p) && uintptr(p) < gp.stack.hi { + // TODO(mdempsky): Walk the stack to identify the + // specific stack frame or even stack object that p + // points into. + // + // In the mean time, use "1" as a pseudo-address to + // represent the stack. This is an invalid address on + // all platforms, so it's guaranteed to be distinct + // from any of the addresses we might return below. + return 1 + } + + // heap (must check after stack because of #35068) + if base, _, _ := findObject(uintptr(p), 0, 0); base != 0 { + return base + } + + // data or bss + for _, datap := range activeModules() { + if datap.data <= uintptr(p) && uintptr(p) < datap.edata { + return datap.data + } + if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { + return datap.bss + } + } + + return 0 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/checkptr_test.go b/platform/dbops/binaries/go/go/src/runtime/checkptr_test.go new file mode 100644 index 0000000000000000000000000000000000000000..811c0f035534205684740b9569d0c26785315cd3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/checkptr_test.go @@ -0,0 +1,108 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/testenv" + "os/exec" + "strings" + "testing" +) + +func TestCheckPtr(t *testing.T) { + // This test requires rebuilding packages with -d=checkptr=1, + // so it's somewhat slow. + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + t.Parallel() + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprog", "-gcflags=all=-d=checkptr=1") + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + cmd string + want string + }{ + {"CheckPtrAlignmentPtr", "fatal error: checkptr: misaligned pointer conversion\n"}, + {"CheckPtrAlignmentNoPtr", ""}, + {"CheckPtrAlignmentNilPtr", ""}, + {"CheckPtrArithmetic", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"}, + {"CheckPtrArithmetic2", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"}, + {"CheckPtrSize", "fatal error: checkptr: converted pointer straddles multiple allocations\n"}, + {"CheckPtrSmall", "fatal error: checkptr: pointer arithmetic computed bad pointer value\n"}, + {"CheckPtrSliceOK", ""}, + {"CheckPtrSliceFail", "fatal error: checkptr: unsafe.Slice result straddles multiple allocations\n"}, + {"CheckPtrStringOK", ""}, + {"CheckPtrStringFail", "fatal error: checkptr: unsafe.String result straddles multiple allocations\n"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.cmd, func(t *testing.T) { + t.Parallel() + got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput() + if err != nil { + t.Log(err) + } + if tc.want == "" { + if len(got) > 0 { + t.Errorf("output:\n%s\nwant no output", got) + } + return + } + if !strings.HasPrefix(string(got), tc.want) { + t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want) + } + }) + } +} + +func TestCheckPtr2(t *testing.T) { + // This test requires rebuilding packages with -d=checkptr=2, + // so it's somewhat slow. + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + t.Parallel() + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprog", "-gcflags=all=-d=checkptr=2") + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + cmd string + want string + }{ + {"CheckPtrAlignmentNested", "fatal error: checkptr: converted pointer straddles multiple allocations\n"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.cmd, func(t *testing.T) { + t.Parallel() + got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput() + if err != nil { + t.Log(err) + } + if tc.want == "" { + if len(got) > 0 { + t.Errorf("output:\n%s\nwant no output", got) + } + return + } + if !strings.HasPrefix(string(got), tc.want) { + t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/closure_test.go b/platform/dbops/binaries/go/go/src/runtime/closure_test.go new file mode 100644 index 0000000000000000000000000000000000000000..741c932eabde0d35f035312f87ac5d1504096986 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/closure_test.go @@ -0,0 +1,54 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import "testing" + +var s int + +func BenchmarkCallClosure(b *testing.B) { + for i := 0; i < b.N; i++ { + s += func(ii int) int { return 2 * ii }(i) + } +} + +func BenchmarkCallClosure1(b *testing.B) { + for i := 0; i < b.N; i++ { + j := i + s += func(ii int) int { return 2*ii + j }(i) + } +} + +var ss *int + +func BenchmarkCallClosure2(b *testing.B) { + for i := 0; i < b.N; i++ { + j := i + s += func() int { + ss = &j + return 2 + }() + } +} + +func addr1(x int) *int { + return func() *int { return &x }() +} + +func BenchmarkCallClosure3(b *testing.B) { + for i := 0; i < b.N; i++ { + ss = addr1(i) + } +} + +func addr2() (x int, p *int) { + return 0, func() *int { return &x }() +} + +func BenchmarkCallClosure4(b *testing.B) { + for i := 0; i < b.N; i++ { + _, ss = addr2() + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/compiler.go b/platform/dbops/binaries/go/go/src/runtime/compiler.go new file mode 100644 index 0000000000000000000000000000000000000000..f430a27719684559706586d10cbe9ef816dccb80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/compiler.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// Compiler is the name of the compiler toolchain that built the +// running binary. Known toolchains are: +// +// gc Also known as cmd/compile. +// gccgo The gccgo front end, part of the GCC compiler suite. +const Compiler = "gc" diff --git a/platform/dbops/binaries/go/go/src/runtime/complex.go b/platform/dbops/binaries/go/go/src/runtime/complex.go new file mode 100644 index 0000000000000000000000000000000000000000..07c596fc0bb19833ba4a739ef55e1a500573507b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/complex.go @@ -0,0 +1,61 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// inf2one returns a signed 1 if f is an infinity and a signed 0 otherwise. +// The sign of the result is the sign of f. +func inf2one(f float64) float64 { + g := 0.0 + if isInf(f) { + g = 1.0 + } + return copysign(g, f) +} + +func complex128div(n complex128, m complex128) complex128 { + var e, f float64 // complex(e, f) = n/m + + // Algorithm for robust complex division as described in + // Robert L. Smith: Algorithm 116: Complex division. Commun. ACM 5(8): 435 (1962). + if abs(real(m)) >= abs(imag(m)) { + ratio := imag(m) / real(m) + denom := real(m) + ratio*imag(m) + e = (real(n) + imag(n)*ratio) / denom + f = (imag(n) - real(n)*ratio) / denom + } else { + ratio := real(m) / imag(m) + denom := imag(m) + ratio*real(m) + e = (real(n)*ratio + imag(n)) / denom + f = (imag(n)*ratio - real(n)) / denom + } + + if isNaN(e) && isNaN(f) { + // Correct final result to infinities and zeros if applicable. + // Matches C99: ISO/IEC 9899:1999 - G.5.1 Multiplicative operators. + + a, b := real(n), imag(n) + c, d := real(m), imag(m) + + switch { + case m == 0 && (!isNaN(a) || !isNaN(b)): + e = copysign(inf, c) * a + f = copysign(inf, c) * b + + case (isInf(a) || isInf(b)) && isFinite(c) && isFinite(d): + a = inf2one(a) + b = inf2one(b) + e = inf * (a*c + b*d) + f = inf * (b*c - a*d) + + case (isInf(c) || isInf(d)) && isFinite(a) && isFinite(b): + c = inf2one(c) + d = inf2one(d) + e = 0 * (a*c + b*d) + f = 0 * (b*c - a*d) + } + } + + return complex(e, f) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/complex_test.go b/platform/dbops/binaries/go/go/src/runtime/complex_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f41e6a3570140cf71bbade054b7e5591b3ebc927 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/complex_test.go @@ -0,0 +1,67 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math/cmplx" + "testing" +) + +var result complex128 + +func BenchmarkComplex128DivNormal(b *testing.B) { + d := 15 + 2i + n := 32 + 3i + res := 0i + for i := 0; i < b.N; i++ { + n += 0.1i + res += n / d + } + result = res +} + +func BenchmarkComplex128DivNisNaN(b *testing.B) { + d := cmplx.NaN() + n := 32 + 3i + res := 0i + for i := 0; i < b.N; i++ { + n += 0.1i + res += n / d + } + result = res +} + +func BenchmarkComplex128DivDisNaN(b *testing.B) { + d := 15 + 2i + n := cmplx.NaN() + res := 0i + for i := 0; i < b.N; i++ { + d += 0.1i + res += n / d + } + result = res +} + +func BenchmarkComplex128DivNisInf(b *testing.B) { + d := 15 + 2i + n := cmplx.Inf() + res := 0i + for i := 0; i < b.N; i++ { + d += 0.1i + res += n / d + } + result = res +} + +func BenchmarkComplex128DivDisInf(b *testing.B) { + d := cmplx.Inf() + n := 32 + 3i + res := 0i + for i := 0; i < b.N; i++ { + n += 0.1i + res += n / d + } + result = res +} diff --git a/platform/dbops/binaries/go/go/src/runtime/conv_wasm_test.go b/platform/dbops/binaries/go/go/src/runtime/conv_wasm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5054fca04dc40a8b65c30e9451112d907c166ed1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/conv_wasm_test.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "testing" +) + +var res int64 +var ures uint64 + +func TestFloatTruncation(t *testing.T) { + testdata := []struct { + input float64 + convInt64 int64 + convUInt64 uint64 + overflow bool + }{ + // max +- 1 + { + input: 0x7fffffffffffffff, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + // For out-of-bounds conversion, the result is implementation-dependent. + // This test verifies the implementation of wasm architecture. + { + input: 0x8000000000000000, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: 0x7ffffffffffffffe, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + // neg max +- 1 + { + input: -0x8000000000000000, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: -0x8000000000000001, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: -0x7fffffffffffffff, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + // trunc point +- 1 + { + input: 0x7ffffffffffffdff, + convInt64: 0x7ffffffffffffc00, + convUInt64: 0x7ffffffffffffc00, + }, + { + input: 0x7ffffffffffffe00, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: 0x7ffffffffffffdfe, + convInt64: 0x7ffffffffffffc00, + convUInt64: 0x7ffffffffffffc00, + }, + // neg trunc point +- 1 + { + input: -0x7ffffffffffffdff, + convInt64: -0x7ffffffffffffc00, + convUInt64: 0x8000000000000000, + }, + { + input: -0x7ffffffffffffe00, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: -0x7ffffffffffffdfe, + convInt64: -0x7ffffffffffffc00, + convUInt64: 0x8000000000000000, + }, + // umax +- 1 + { + input: 0xffffffffffffffff, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: 0x10000000000000000, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: 0xfffffffffffffffe, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + // umax trunc +- 1 + { + input: 0xfffffffffffffbff, + convInt64: -0x8000000000000000, + convUInt64: 0xfffffffffffff800, + }, + { + input: 0xfffffffffffffc00, + convInt64: -0x8000000000000000, + convUInt64: 0x8000000000000000, + }, + { + input: 0xfffffffffffffbfe, + convInt64: -0x8000000000000000, + convUInt64: 0xfffffffffffff800, + }, + } + for _, item := range testdata { + if got, want := int64(item.input), item.convInt64; got != want { + t.Errorf("int64(%f): got %x, want %x", item.input, got, want) + } + if got, want := uint64(item.input), item.convUInt64; got != want { + t.Errorf("uint64(%f): got %x, want %x", item.input, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/coro.go b/platform/dbops/binaries/go/go/src/runtime/coro.go new file mode 100644 index 0000000000000000000000000000000000000000..0d6666e343b520105fe03f7e110a3277414802db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/coro.go @@ -0,0 +1,165 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// A coro represents extra concurrency without extra parallelism, +// as would be needed for a coroutine implementation. +// The coro does not represent a specific coroutine, only the ability +// to do coroutine-style control transfers. +// It can be thought of as like a special channel that always has +// a goroutine blocked on it. If another goroutine calls coroswitch(c), +// the caller becomes the goroutine blocked in c, and the goroutine +// formerly blocked in c starts running. +// These switches continue until a call to coroexit(c), +// which ends the use of the coro by releasing the blocked +// goroutine in c and exiting the current goroutine. +// +// Coros are heap allocated and garbage collected, so that user code +// can hold a pointer to a coro without causing potential dangling +// pointer errors. +type coro struct { + gp guintptr + f func(*coro) +} + +//go:linkname newcoro + +// newcoro creates a new coro containing a +// goroutine blocked waiting to run f +// and returns that coro. +func newcoro(f func(*coro)) *coro { + c := new(coro) + c.f = f + pc := getcallerpc() + gp := getg() + systemstack(func() { + start := corostart + startfv := *(**funcval)(unsafe.Pointer(&start)) + gp = newproc1(startfv, gp, pc) + }) + gp.coroarg = c + gp.waitreason = waitReasonCoroutine + casgstatus(gp, _Grunnable, _Gwaiting) + c.gp.set(gp) + return c +} + +//go:linkname corostart + +// corostart is the entry func for a new coroutine. +// It runs the coroutine user function f passed to corostart +// and then calls coroexit to remove the extra concurrency. +func corostart() { + gp := getg() + c := gp.coroarg + gp.coroarg = nil + + c.f(c) + coroexit(c) +} + +// coroexit is like coroswitch but closes the coro +// and exits the current goroutine +func coroexit(c *coro) { + gp := getg() + gp.coroarg = c + gp.coroexit = true + mcall(coroswitch_m) +} + +//go:linkname coroswitch + +// coroswitch switches to the goroutine blocked on c +// and then blocks the current goroutine on c. +func coroswitch(c *coro) { + gp := getg() + gp.coroarg = c + mcall(coroswitch_m) +} + +// coroswitch_m is the implementation of coroswitch +// that runs on the m stack. +// +// Note: Coroutine switches are expected to happen at +// an order of magnitude (or more) higher frequency +// than regular goroutine switches, so this path is heavily +// optimized to remove unnecessary work. +// The fast path here is three CAS: the one at the top on gp.atomicstatus, +// the one in the middle to choose the next g, +// and the one at the bottom on gnext.atomicstatus. +// It is important not to add more atomic operations or other +// expensive operations to the fast path. +func coroswitch_m(gp *g) { + // TODO(rsc,mknyszek): add tracing support in a lightweight manner. + // Probably the tracer will need a global bool (set and cleared during STW) + // that this code can check to decide whether to use trace.gen.Load(); + // we do not want to do the atomic load all the time, especially when + // tracer use is relatively rare. + c := gp.coroarg + gp.coroarg = nil + exit := gp.coroexit + gp.coroexit = false + mp := gp.m + + if exit { + gdestroy(gp) + gp = nil + } else { + // If we can CAS ourselves directly from running to waiting, so do, + // keeping the control transfer as lightweight as possible. + gp.waitreason = waitReasonCoroutine + if !gp.atomicstatus.CompareAndSwap(_Grunning, _Gwaiting) { + // The CAS failed: use casgstatus, which will take care of + // coordinating with the garbage collector about the state change. + casgstatus(gp, _Grunning, _Gwaiting) + } + + // Clear gp.m. + setMNoWB(&gp.m, nil) + } + + // The goroutine stored in c is the one to run next. + // Swap it with ourselves. + var gnext *g + for { + // Note: this is a racy load, but it will eventually + // get the right value, and if it gets the wrong value, + // the c.gp.cas will fail, so no harm done other than + // a wasted loop iteration. + // The cas will also sync c.gp's + // memory enough that the next iteration of the racy load + // should see the correct value. + // We are avoiding the atomic load to keep this path + // as lightweight as absolutely possible. + // (The atomic load is free on x86 but not free elsewhere.) + next := c.gp + if next.ptr() == nil { + throw("coroswitch on exited coro") + } + var self guintptr + self.set(gp) + if c.gp.cas(next, self) { + gnext = next.ptr() + break + } + } + + // Start running next, without heavy scheduling machinery. + // Set mp.curg and gnext.m and then update scheduling state + // directly if possible. + setGNoWB(&mp.curg, gnext) + setMNoWB(&gnext.m, mp) + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { + // The CAS failed: use casgstatus, which will take care of + // coordinating with the garbage collector about the state change. + casgstatus(gnext, _Gwaiting, _Grunnable) + casgstatus(gnext, _Grunnable, _Grunning) + } + + // Switch to gnext. Does not return. + gogo(&gnext.sched) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/covercounter.go b/platform/dbops/binaries/go/go/src/runtime/covercounter.go new file mode 100644 index 0000000000000000000000000000000000000000..72842bdd94c88853206804c19ab6afb168928472 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/covercounter.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/coverage/rtcov" + "unsafe" +) + +//go:linkname runtime_coverage_getCovCounterList runtime/coverage.getCovCounterList +func runtime_coverage_getCovCounterList() []rtcov.CovCounterBlob { + res := []rtcov.CovCounterBlob{} + u32sz := unsafe.Sizeof(uint32(0)) + for datap := &firstmoduledata; datap != nil; datap = datap.next { + if datap.covctrs == datap.ecovctrs { + continue + } + res = append(res, rtcov.CovCounterBlob{ + Counters: (*uint32)(unsafe.Pointer(datap.covctrs)), + Len: uint64((datap.ecovctrs - datap.covctrs) / u32sz), + }) + } + return res +} diff --git a/platform/dbops/binaries/go/go/src/runtime/covermeta.go b/platform/dbops/binaries/go/go/src/runtime/covermeta.go new file mode 100644 index 0000000000000000000000000000000000000000..54ef42ae1ff440bdcde58bd3700290383f074351 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/covermeta.go @@ -0,0 +1,72 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/coverage/rtcov" + "unsafe" +) + +// covMeta is the top-level container for bits of state related to +// code coverage meta-data in the runtime. +var covMeta struct { + // metaList contains the list of currently registered meta-data + // blobs for the running program. + metaList []rtcov.CovMetaBlob + + // pkgMap records mappings from hard-coded package IDs to + // slots in the covMetaList above. + pkgMap map[int]int + + // Set to true if we discover a package mapping glitch. + hardCodedListNeedsUpdating bool +} + +// addCovMeta is invoked during package "init" functions by the +// compiler when compiling for coverage instrumentation; here 'p' is a +// meta-data blob of length 'dlen' for the package in question, 'hash' +// is a compiler-computed md5.sum for the blob, 'pkpath' is the +// package path, 'pkid' is the hard-coded ID that the compiler is +// using for the package (or -1 if the compiler doesn't think a +// hard-coded ID is needed), and 'cmode'/'cgran' are the coverage +// counter mode and granularity requested by the user. Return value is +// the ID for the package for use by the package code itself. +func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkpath string, pkid int, cmode uint8, cgran uint8) uint32 { + slot := len(covMeta.metaList) + covMeta.metaList = append(covMeta.metaList, + rtcov.CovMetaBlob{ + P: (*byte)(p), + Len: dlen, + Hash: hash, + PkgPath: pkpath, + PkgID: pkid, + CounterMode: cmode, + CounterGranularity: cgran, + }) + if pkid != -1 { + if covMeta.pkgMap == nil { + covMeta.pkgMap = make(map[int]int) + } + if _, ok := covMeta.pkgMap[pkid]; ok { + throw("runtime.addCovMeta: coverage package map collision") + } + // Record the real slot (position on meta-list) for this + // package; we'll use the map to fix things up later on. + covMeta.pkgMap[pkid] = slot + } + + // ID zero is reserved as invalid. + return uint32(slot + 1) +} + +//go:linkname runtime_coverage_getCovMetaList runtime/coverage.getCovMetaList +func runtime_coverage_getCovMetaList() []rtcov.CovMetaBlob { + return covMeta.metaList +} + +//go:linkname runtime_coverage_getCovPkgMap runtime/coverage.getCovPkgMap +func runtime_coverage_getCovPkgMap() map[int]int { + return covMeta.pkgMap +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cpuflags.go b/platform/dbops/binaries/go/go/src/runtime/cpuflags.go new file mode 100644 index 0000000000000000000000000000000000000000..bbe93c5bea2d7e46d752b25ebfca2e7b1640bdd6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cpuflags.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/cpu" + "unsafe" +) + +// Offsets into internal/cpu records for use in assembly. +const ( + offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX) + offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) + offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS) + offsetX86HasRDTSCP = unsafe.Offsetof(cpu.X86.HasRDTSCP) + + offsetARMHasIDIVA = unsafe.Offsetof(cpu.ARM.HasIDIVA) + + offsetMIPS64XHasMSA = unsafe.Offsetof(cpu.MIPS64X.HasMSA) +) + +var ( + // Set in runtime.cpuinit. + // TODO: deprecate these; use internal/cpu directly. + x86HasPOPCNT bool + x86HasSSE41 bool + x86HasFMA bool + + armHasVFPv4 bool + + arm64HasATOMICS bool +) diff --git a/platform/dbops/binaries/go/go/src/runtime/cpuflags_amd64.go b/platform/dbops/binaries/go/go/src/runtime/cpuflags_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..8cca4bca8f0b5d08d8173dab921014d894ce8560 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cpuflags_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/cpu" +) + +var useAVXmemmove bool + +func init() { + // Let's remove stepping and reserved fields + processor := processorVersionInfo & 0x0FFF3FF0 + + isIntelBridgeFamily := isIntel && + processor == 0x206A0 || + processor == 0x206D0 || + processor == 0x306A0 || + processor == 0x306E0 + + useAVXmemmove = cpu.X86.HasAVX && !isIntelBridgeFamily +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cpuflags_arm64.go b/platform/dbops/binaries/go/go/src/runtime/cpuflags_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..2ed1811456131fa9946f36c9fae6d4bbd6eb9f63 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cpuflags_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/cpu" +) + +var arm64UseAlignedLoads bool + +func init() { + if cpu.ARM64.IsNeoverse { + arm64UseAlignedLoads = true + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cpuprof.go b/platform/dbops/binaries/go/go/src/runtime/cpuprof.go new file mode 100644 index 0000000000000000000000000000000000000000..b2898ba9094e108d5ce0df74ba9d0424dcfafda9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cpuprof.go @@ -0,0 +1,241 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU profiling. +// +// The signal handler for the profiling clock tick adds a new stack trace +// to a log of recent traces. The log is read by a user goroutine that +// turns it into formatted profile data. If the reader does not keep up +// with the log, those writes will be recorded as a count of lost records. +// The actual profile buffer is in profbuf.go. + +package runtime + +import ( + "internal/abi" + "runtime/internal/sys" + "unsafe" +) + +const ( + maxCPUProfStack = 64 + + // profBufWordCount is the size of the CPU profile buffer's storage for the + // header and stack of each sample, measured in 64-bit words. Every sample + // has a required header of two words. With a small additional header (a + // word or two) and stacks at the profiler's maximum length of 64 frames, + // that capacity can support 1900 samples or 19 thread-seconds at a 100 Hz + // sample rate, at a cost of 1 MiB. + profBufWordCount = 1 << 17 + // profBufTagCount is the size of the CPU profile buffer's storage for the + // goroutine tags associated with each sample. A capacity of 1<<14 means + // room for 16k samples, or 160 thread-seconds at a 100 Hz sample rate. + profBufTagCount = 1 << 14 +) + +type cpuProfile struct { + lock mutex + on bool // profiling is on + log *profBuf // profile events written here + + // extra holds extra stacks accumulated in addNonGo + // corresponding to profiling signals arriving on + // non-Go-created threads. Those stacks are written + // to log the next time a normal Go thread gets the + // signal handler. + // Assuming the stacks are 2 words each (we don't get + // a full traceback from those threads), plus one word + // size for framing, 100 Hz profiling would generate + // 300 words per second. + // Hopefully a normal Go thread will get the profiling + // signal at least once every few seconds. + extra [1000]uintptr + numExtra int + lostExtra uint64 // count of frames lost because extra is full + lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily +} + +var cpuprof cpuProfile + +// SetCPUProfileRate sets the CPU profiling rate to hz samples per second. +// If hz <= 0, SetCPUProfileRate turns off profiling. +// If the profiler is on, the rate cannot be changed without first turning it off. +// +// Most clients should use the [runtime/pprof] package or +// the [testing] package's -test.cpuprofile flag instead of calling +// SetCPUProfileRate directly. +func SetCPUProfileRate(hz int) { + // Clamp hz to something reasonable. + if hz < 0 { + hz = 0 + } + if hz > 1000000 { + hz = 1000000 + } + + lock(&cpuprof.lock) + if hz > 0 { + if cpuprof.on || cpuprof.log != nil { + print("runtime: cannot set cpu profile rate until previous profile has finished.\n") + unlock(&cpuprof.lock) + return + } + + cpuprof.on = true + cpuprof.log = newProfBuf(1, profBufWordCount, profBufTagCount) + hdr := [1]uint64{uint64(hz)} + cpuprof.log.write(nil, nanotime(), hdr[:], nil) + setcpuprofilerate(int32(hz)) + } else if cpuprof.on { + setcpuprofilerate(0) + cpuprof.on = false + cpuprof.addExtra() + cpuprof.log.close() + } + unlock(&cpuprof.lock) +} + +// add adds the stack trace to the profile. +// It is called from signal handlers and other limited environments +// and cannot allocate memory or acquire locks that might be +// held at the time of the signal, nor can it use substantial amounts +// of stack. +// +//go:nowritebarrierrec +func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) { + // Simple cas-lock to coordinate with setcpuprofilerate. + for !prof.signalLock.CompareAndSwap(0, 1) { + // TODO: Is it safe to osyield here? https://go.dev/issue/52672 + osyield() + } + + if prof.hz.Load() != 0 { // implies cpuprof.log != nil + if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 { + p.addExtra() + } + hdr := [1]uint64{1} + // Note: write "knows" that the argument is &gp.labels, + // because otherwise its write barrier behavior may not + // be correct. See the long comment there before + // changing the argument here. + cpuprof.log.write(tagPtr, nanotime(), hdr[:], stk) + } + + prof.signalLock.Store(0) +} + +// addNonGo adds the non-Go stack trace to the profile. +// It is called from a non-Go thread, so we cannot use much stack at all, +// nor do anything that needs a g or an m. +// In particular, we can't call cpuprof.log.write. +// Instead, we copy the stack into cpuprof.extra, +// which will be drained the next time a Go thread +// gets the signal handling event. +// +//go:nosplit +//go:nowritebarrierrec +func (p *cpuProfile) addNonGo(stk []uintptr) { + // Simple cas-lock to coordinate with SetCPUProfileRate. + // (Other calls to add or addNonGo should be blocked out + // by the fact that only one SIGPROF can be handled by the + // process at a time. If not, this lock will serialize those too. + // The use of timer_create(2) on Linux to request process-targeted + // signals may have changed this.) + for !prof.signalLock.CompareAndSwap(0, 1) { + // TODO: Is it safe to osyield here? https://go.dev/issue/52672 + osyield() + } + + if cpuprof.numExtra+1+len(stk) < len(cpuprof.extra) { + i := cpuprof.numExtra + cpuprof.extra[i] = uintptr(1 + len(stk)) + copy(cpuprof.extra[i+1:], stk) + cpuprof.numExtra += 1 + len(stk) + } else { + cpuprof.lostExtra++ + } + + prof.signalLock.Store(0) +} + +// addExtra adds the "extra" profiling events, +// queued by addNonGo, to the profile log. +// addExtra is called either from a signal handler on a Go thread +// or from an ordinary goroutine; either way it can use stack +// and has a g. The world may be stopped, though. +func (p *cpuProfile) addExtra() { + // Copy accumulated non-Go profile events. + hdr := [1]uint64{1} + for i := 0; i < p.numExtra; { + p.log.write(nil, 0, hdr[:], p.extra[i+1:i+int(p.extra[i])]) + i += int(p.extra[i]) + } + p.numExtra = 0 + + // Report any lost events. + if p.lostExtra > 0 { + hdr := [1]uint64{p.lostExtra} + lostStk := [2]uintptr{ + abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum, + abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum, + } + p.log.write(nil, 0, hdr[:], lostStk[:]) + p.lostExtra = 0 + } + + if p.lostAtomic > 0 { + hdr := [1]uint64{p.lostAtomic} + lostStk := [2]uintptr{ + abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum, + abi.FuncPCABIInternal(_System) + sys.PCQuantum, + } + p.log.write(nil, 0, hdr[:], lostStk[:]) + p.lostAtomic = 0 + } + +} + +// CPUProfile panics. +// It formerly provided raw access to chunks of +// a pprof-format profile generated by the runtime. +// The details of generating that format have changed, +// so this functionality has been removed. +// +// Deprecated: Use the [runtime/pprof] package, +// or the handlers in the [net/http/pprof] package, +// or the [testing] package's -test.cpuprofile flag instead. +func CPUProfile() []byte { + panic("CPUProfile no longer available") +} + +//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond +func runtime_pprof_runtime_cyclesPerSecond() int64 { + return ticksPerSecond() +} + +// readProfile, provided to runtime/pprof, returns the next chunk of +// binary CPU profiling stack trace data, blocking until data is available. +// If profiling is turned off and all the profile data accumulated while it was +// on has been returned, readProfile returns eof=true. +// The caller must save the returned data and tags before calling readProfile again. +// The returned data contains a whole number of records, and tags contains +// exactly one entry per record. +// +//go:linkname runtime_pprof_readProfile runtime/pprof.readProfile +func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) { + lock(&cpuprof.lock) + log := cpuprof.log + unlock(&cpuprof.lock) + readMode := profBufBlocking + if GOOS == "darwin" || GOOS == "ios" { + readMode = profBufNonBlocking // For #61768; on Darwin notes are not async-signal-safe. See sigNoteSetup in os_darwin.go. + } + data, tags, eof := log.read(readMode) + if len(data) == 0 && eof { + lock(&cpuprof.lock) + cpuprof.log = nil + unlock(&cpuprof.lock) + } + return data, tags, eof +} diff --git a/platform/dbops/binaries/go/go/src/runtime/cputicks.go b/platform/dbops/binaries/go/go/src/runtime/cputicks.go new file mode 100644 index 0000000000000000000000000000000000000000..2cf324033338d5d8699a32eabb8258be10493b4c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/cputicks.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm && !arm64 && !mips64 && !mips64le && !mips && !mipsle && !wasm + +package runtime + +// careful: cputicks is not guaranteed to be monotonic! In particular, we have +// noticed drift between cpus on certain os/arch combinations. See issue 8976. +func cputicks() int64 diff --git a/platform/dbops/binaries/go/go/src/runtime/crash_cgo_test.go b/platform/dbops/binaries/go/go/src/runtime/crash_cgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..304f1a75545c78adaf179a72057585bc46f69879 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/crash_cgo_test.go @@ -0,0 +1,897 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package runtime_test + +import ( + "fmt" + "internal/goexperiment" + "internal/goos" + "internal/platform" + "internal/testenv" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func TestCgoCrashHandler(t *testing.T) { + t.Parallel() + testCrashHandler(t, true) +} + +func TestCgoSignalDeadlock(t *testing.T) { + // Don't call t.Parallel, since too much work going on at the + // same time can cause the testprogcgo code to overrun its + // timeouts (issue #18598). + + if testing.Short() && runtime.GOOS == "windows" { + t.Skip("Skipping in short mode") // takes up to 64 seconds + } + got := runTestProg(t, "testprogcgo", "CgoSignalDeadlock") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestCgoTraceback(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "CgoTraceback") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestCgoCallbackGC(t *testing.T) { + t.Parallel() + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + if testing.Short() { + switch { + case runtime.GOOS == "dragonfly": + t.Skip("see golang.org/issue/11990") + case runtime.GOOS == "linux" && runtime.GOARCH == "arm": + t.Skip("too slow for arm builders") + case runtime.GOOS == "linux" && (runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le"): + t.Skip("too slow for mips64x builders") + } + } + if testenv.Builder() == "darwin-amd64-10_14" { + // TODO(#23011): When the 10.14 builders are gone, remove this skip. + t.Skip("skipping due to platform bug on macOS 10.14; see https://golang.org/issue/43926") + } + got := runTestProg(t, "testprogcgo", "CgoCallbackGC") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestCgoExternalThreadPanic(t *testing.T) { + t.Parallel() + if runtime.GOOS == "plan9" { + t.Skipf("no pthreads on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "CgoExternalThreadPanic") + want := "panic: BOOM" + if !strings.Contains(got, want) { + t.Fatalf("want failure containing %q. output:\n%s\n", want, got) + } +} + +func TestCgoExternalThreadSIGPROF(t *testing.T) { + t.Parallel() + // issue 9456. + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + + got := runTestProg(t, "testprogcgo", "CgoExternalThreadSIGPROF", "GO_START_SIGPROF_THREAD=1") + if want := "OK\n"; got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestCgoExternalThreadSignal(t *testing.T) { + t.Parallel() + // issue 10139 + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + + got := runTestProg(t, "testprogcgo", "CgoExternalThreadSignal") + if want := "OK\n"; got != want { + if runtime.GOOS == "ios" && strings.Contains(got, "C signal did not crash as expected") { + testenv.SkipFlaky(t, 59913) + } + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestCgoDLLImports(t *testing.T) { + // test issue 9356 + if runtime.GOOS != "windows" { + t.Skip("skipping windows specific test") + } + got := runTestProg(t, "testprogcgo", "CgoDLLImportsMain") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %v", want, got) + } +} + +func TestCgoExecSignalMask(t *testing.T) { + t.Parallel() + // Test issue 13164. + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("skipping signal mask test on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "CgoExecSignalMask", "GOTRACEBACK=system") + want := "OK\n" + if got != want { + t.Errorf("expected %q, got %v", want, got) + } +} + +func TestEnsureDropM(t *testing.T) { + t.Parallel() + // Test for issue 13881. + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("skipping dropm test on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "EnsureDropM") + want := "OK\n" + if got != want { + t.Errorf("expected %q, got %v", want, got) + } +} + +// Test for issue 14387. +// Test that the program that doesn't need any cgo pointer checking +// takes about the same amount of time with it as without it. +func TestCgoCheckBytes(t *testing.T) { + t.Parallel() + // Make sure we don't count the build time as part of the run time. + testenv.MustHaveGoBuild(t) + exe, err := buildTestProg(t, "testprogcgo") + if err != nil { + t.Fatal(err) + } + + // Try it 10 times to avoid flakiness. + const tries = 10 + var tot1, tot2 time.Duration + for i := 0; i < tries; i++ { + cmd := testenv.CleanCmdEnv(exec.Command(exe, "CgoCheckBytes")) + cmd.Env = append(cmd.Env, "GODEBUG=cgocheck=0", fmt.Sprintf("GO_CGOCHECKBYTES_TRY=%d", i)) + + start := time.Now() + cmd.Run() + d1 := time.Since(start) + + cmd = testenv.CleanCmdEnv(exec.Command(exe, "CgoCheckBytes")) + cmd.Env = append(cmd.Env, fmt.Sprintf("GO_CGOCHECKBYTES_TRY=%d", i)) + + start = time.Now() + cmd.Run() + d2 := time.Since(start) + + if d1*20 > d2 { + // The slow version (d2) was less than 20 times + // slower than the fast version (d1), so OK. + return + } + + tot1 += d1 + tot2 += d2 + } + + t.Errorf("cgo check too slow: got %v, expected at most %v", tot2/tries, (tot1/tries)*20) +} + +func TestCgoPanicDeadlock(t *testing.T) { + t.Parallel() + // test issue 14432 + got := runTestProg(t, "testprogcgo", "CgoPanicDeadlock") + want := "panic: cgo error\n\n" + if !strings.HasPrefix(got, want) { + t.Fatalf("output does not start with %q:\n%s", want, got) + } +} + +func TestCgoCCodeSIGPROF(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "CgoCCodeSIGPROF") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func TestCgoPprofCallback(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") // takes a full second + } + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("skipping cgo pprof callback test on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "CgoPprofCallback") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func TestCgoCrashTraceback(t *testing.T) { + t.Parallel() + switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform { + case "darwin/amd64": + case "linux/amd64": + case "linux/arm64": + case "linux/ppc64le": + default: + t.Skipf("not yet supported on %s", platform) + } + got := runTestProg(t, "testprogcgo", "CrashTraceback") + for i := 1; i <= 3; i++ { + if !strings.Contains(got, fmt.Sprintf("cgo symbolizer:%d", i)) { + t.Errorf("missing cgo symbolizer:%d", i) + } + } +} + +func TestCgoCrashTracebackGo(t *testing.T) { + t.Parallel() + switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform { + case "darwin/amd64": + case "linux/amd64": + case "linux/arm64": + case "linux/ppc64le": + default: + t.Skipf("not yet supported on %s", platform) + } + got := runTestProg(t, "testprogcgo", "CrashTracebackGo") + for i := 1; i <= 3; i++ { + want := fmt.Sprintf("main.h%d", i) + if !strings.Contains(got, want) { + t.Errorf("missing %s", want) + } + } +} + +func TestCgoTracebackContext(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "TracebackContext") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func TestCgoTracebackContextPreemption(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "TracebackContextPreemption") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) { + t.Parallel() + if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le" && runtime.GOARCH != "arm64") { + t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH) + } + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprogcgo", buildArg) + if err != nil { + t.Fatal(err) + } + + cmd := testenv.CleanCmdEnv(exec.Command(exe, runArg)) + got, err := cmd.CombinedOutput() + if err != nil { + if testenv.Builder() == "linux-amd64-alpine" { + // See Issue 18243 and Issue 19938. + t.Skipf("Skipping failing test on Alpine (golang.org/issue/18243). Ignoring error: %v", err) + } + t.Fatalf("%s\n\n%v", got, err) + } + fn := strings.TrimSpace(string(got)) + defer os.Remove(fn) + + for try := 0; try < 2; try++ { + cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-tagignore=ignore", "-traces")) + // Check that pprof works both with and without explicit executable on command line. + if try == 0 { + cmd.Args = append(cmd.Args, exe, fn) + } else { + cmd.Args = append(cmd.Args, fn) + } + + found := false + for i, e := range cmd.Env { + if strings.HasPrefix(e, "PPROF_TMPDIR=") { + cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir() + found = true + break + } + } + if !found { + cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir()) + } + + out, err := cmd.CombinedOutput() + t.Logf("%s:\n%s", cmd.Args, out) + if err != nil { + t.Error(err) + continue + } + + trace := findTrace(string(out), top) + if len(trace) == 0 { + t.Errorf("%s traceback missing.", top) + continue + } + if trace[len(trace)-1] != bottom { + t.Errorf("invalid traceback origin: got=%v; want=[%s ... %s]", trace, top, bottom) + } + } +} + +func TestCgoPprof(t *testing.T) { + testCgoPprof(t, "", "CgoPprof", "cpuHog", "runtime.main") +} + +func TestCgoPprofPIE(t *testing.T) { + testCgoPprof(t, "-buildmode=pie", "CgoPprof", "cpuHog", "runtime.main") +} + +func TestCgoPprofThread(t *testing.T) { + testCgoPprof(t, "", "CgoPprofThread", "cpuHogThread", "cpuHogThread2") +} + +func TestCgoPprofThreadNoTraceback(t *testing.T) { + testCgoPprof(t, "", "CgoPprofThreadNoTraceback", "cpuHogThread", "runtime._ExternalCode") +} + +func TestRaceProf(t *testing.T) { + if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s because race detector not supported", runtime.GOOS, runtime.GOARCH) + } + if runtime.GOOS == "windows" { + t.Skipf("skipping: test requires pthread support") + // TODO: Can this test be rewritten to use the C11 thread API instead? + } + + testenv.MustHaveGoRun(t) + + // This test requires building various packages with -race, so + // it's somewhat slow. + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + exe, err := buildTestProg(t, "testprogcgo", "-race") + if err != nil { + t.Fatal(err) + } + + got, err := testenv.CleanCmdEnv(exec.Command(exe, "CgoRaceprof")).CombinedOutput() + if err != nil { + t.Fatal(err) + } + want := "OK\n" + if string(got) != want { + t.Errorf("expected %q got %s", want, got) + } +} + +func TestRaceSignal(t *testing.T) { + if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s because race detector not supported", runtime.GOOS, runtime.GOARCH) + } + if runtime.GOOS == "windows" { + t.Skipf("skipping: test requires pthread support") + // TODO: Can this test be rewritten to use the C11 thread API instead? + } + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + testenv.SkipFlaky(t, 60316) + } + + t.Parallel() + + testenv.MustHaveGoRun(t) + + // This test requires building various packages with -race, so + // it's somewhat slow. + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + exe, err := buildTestProg(t, "testprogcgo", "-race") + if err != nil { + t.Fatal(err) + } + + got, err := testenv.CleanCmdEnv(testenv.Command(t, exe, "CgoRaceSignal")).CombinedOutput() + if err != nil { + t.Logf("%s\n", got) + t.Fatal(err) + } + want := "OK\n" + if string(got) != want { + t.Errorf("expected %q got %s", want, got) + } +} + +func TestCgoNumGoroutine(t *testing.T) { + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("skipping numgoroutine test on %s", runtime.GOOS) + } + t.Parallel() + got := runTestProg(t, "testprogcgo", "NumGoroutine") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func TestCatchPanic(t *testing.T) { + t.Parallel() + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no signals on %s", runtime.GOOS) + case "darwin": + if runtime.GOARCH == "amd64" { + t.Skipf("crash() on darwin/amd64 doesn't raise SIGABRT") + } + } + + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprogcgo") + if err != nil { + t.Fatal(err) + } + + for _, early := range []bool{true, false} { + cmd := testenv.CleanCmdEnv(exec.Command(exe, "CgoCatchPanic")) + // Make sure a panic results in a crash. + cmd.Env = append(cmd.Env, "GOTRACEBACK=crash") + if early { + // Tell testprogcgo to install an early signal handler for SIGABRT + cmd.Env = append(cmd.Env, "CGOCATCHPANIC_EARLY_HANDLER=1") + } + if out, err := cmd.CombinedOutput(); err != nil { + t.Errorf("testprogcgo CgoCatchPanic failed: %v\n%s", err, out) + } + } +} + +func TestCgoLockOSThreadExit(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + t.Parallel() + testLockOSThreadExit(t, "testprogcgo") +} + +func TestWindowsStackMemoryCgo(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("skipping windows specific test") + } + testenv.SkipFlaky(t, 22575) + o := runTestProg(t, "testprogcgo", "StackMemory") + stackUsage, err := strconv.Atoi(o) + if err != nil { + t.Fatalf("Failed to read stack usage: %v", err) + } + if expected, got := 100<<10, stackUsage; got > expected { + t.Fatalf("expected < %d bytes of memory per thread, got %d", expected, got) + } +} + +func TestSigStackSwapping(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no sigaltstack on %s", runtime.GOOS) + } + t.Parallel() + got := runTestProg(t, "testprogcgo", "SigStack") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func TestCgoTracebackSigpanic(t *testing.T) { + // Test unwinding over a sigpanic in C code without a C + // symbolizer. See issue #23576. + if runtime.GOOS == "windows" { + // On Windows if we get an exception in C code, we let + // the Windows exception handler unwind it, rather + // than injecting a sigpanic. + t.Skip("no sigpanic in C on windows") + } + if runtime.GOOS == "ios" { + testenv.SkipFlaky(t, 59912) + } + t.Parallel() + got := runTestProg(t, "testprogcgo", "TracebackSigpanic") + t.Log(got) + // We should see the function that calls the C function. + want := "main.TracebackSigpanic" + if !strings.Contains(got, want) { + if runtime.GOOS == "android" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + testenv.SkipFlaky(t, 58794) + } + t.Errorf("did not see %q in output", want) + } + // We shouldn't inject a sigpanic call. (see issue 57698) + nowant := "runtime.sigpanic" + if strings.Contains(got, nowant) { + t.Errorf("unexpectedly saw %q in output", nowant) + } + // No runtime errors like "runtime: unexpected return pc". + nowant = "runtime: " + if strings.Contains(got, nowant) { + t.Errorf("unexpectedly saw %q in output", nowant) + } +} + +func TestCgoPanicCallback(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "PanicCallback") + t.Log(got) + want := "panic: runtime error: invalid memory address or nil pointer dereference" + if !strings.Contains(got, want) { + t.Errorf("did not see %q in output", want) + } + want = "panic_callback" + if !strings.Contains(got, want) { + t.Errorf("did not see %q in output", want) + } + want = "PanicCallback" + if !strings.Contains(got, want) { + t.Errorf("did not see %q in output", want) + } + // No runtime errors like "runtime: unexpected return pc". + nowant := "runtime: " + if strings.Contains(got, nowant) { + t.Errorf("did not see %q in output", want) + } +} + +// Test that C code called via cgo can use large Windows thread stacks +// and call back in to Go without crashing. See issue #20975. +// +// See also TestBigStackCallbackSyscall. +func TestBigStackCallbackCgo(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("skipping windows specific test") + } + t.Parallel() + got := runTestProg(t, "testprogcgo", "BigStack") + want := "OK\n" + if got != want { + t.Errorf("expected %q got %v", want, got) + } +} + +func nextTrace(lines []string) ([]string, []string) { + var trace []string + for n, line := range lines { + if strings.HasPrefix(line, "---") { + return trace, lines[n+1:] + } + fields := strings.Fields(strings.TrimSpace(line)) + if len(fields) == 0 { + continue + } + // Last field contains the function name. + trace = append(trace, fields[len(fields)-1]) + } + return nil, nil +} + +func findTrace(text, top string) []string { + lines := strings.Split(text, "\n") + _, lines = nextTrace(lines) // Skip the header. + for len(lines) > 0 { + var t []string + t, lines = nextTrace(lines) + if len(t) == 0 { + continue + } + if t[0] == top { + return t + } + } + return nil +} + +func TestSegv(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no signals on %s", runtime.GOOS) + } + + for _, test := range []string{"Segv", "SegvInCgo", "TgkillSegv", "TgkillSegvInCgo"} { + test := test + + // The tgkill variants only run on Linux. + if runtime.GOOS != "linux" && strings.HasPrefix(test, "Tgkill") { + continue + } + + t.Run(test, func(t *testing.T) { + if test == "SegvInCgo" && runtime.GOOS == "ios" { + testenv.SkipFlaky(t, 59947) // Don't even try, in case it times out. + } + + t.Parallel() + prog := "testprog" + if strings.HasSuffix(test, "InCgo") { + prog = "testprogcgo" + } + got := runTestProg(t, prog, test) + t.Log(got) + want := "SIGSEGV" + if !strings.Contains(got, want) { + if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" && strings.Contains(got, "fatal: morestack on g0") { + testenv.SkipFlaky(t, 39457) + } + t.Errorf("did not see %q in output", want) + } + + // No runtime errors like "runtime: unknown pc". + switch runtime.GOOS { + case "darwin", "ios", "illumos", "solaris": + // Runtime sometimes throws when generating the traceback. + testenv.SkipFlaky(t, 49182) + case "linux": + if runtime.GOARCH == "386" { + // Runtime throws when generating a traceback from + // a VDSO call via asmcgocall. + testenv.SkipFlaky(t, 50504) + } + } + if test == "SegvInCgo" && strings.Contains(got, "unknown pc") { + testenv.SkipFlaky(t, 50979) + } + + for _, nowant := range []string{"fatal error: ", "runtime: "} { + if strings.Contains(got, nowant) { + if runtime.GOOS == "darwin" && strings.Contains(got, "0xb01dfacedebac1e") { + // See the comment in signal_darwin_amd64.go. + t.Skip("skipping due to Darwin handling of malformed addresses") + } + t.Errorf("unexpectedly saw %q in output", nowant) + } + } + }) + } +} + +func TestAbortInCgo(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + // N.B. On Windows, C abort() causes the program to exit + // without going through the runtime at all. + t.Skipf("no signals on %s", runtime.GOOS) + } + + t.Parallel() + got := runTestProg(t, "testprogcgo", "Abort") + t.Log(got) + want := "SIGABRT" + if !strings.Contains(got, want) { + t.Errorf("did not see %q in output", want) + } + // No runtime errors like "runtime: unknown pc". + nowant := "runtime: " + if strings.Contains(got, nowant) { + t.Errorf("did not see %q in output", want) + } +} + +// TestEINTR tests that we handle EINTR correctly. +// See issue #20400 and friends. +func TestEINTR(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no EINTR on %s", runtime.GOOS) + case "linux": + if runtime.GOARCH == "386" { + // On linux-386 the Go signal handler sets + // a restorer function that is not preserved + // by the C sigaction call in the test, + // causing the signal handler to crash when + // returning the normal code. The test is not + // architecture-specific, so just skip on 386 + // rather than doing a complicated workaround. + t.Skip("skipping on linux-386; C sigaction does not preserve Go restorer") + } + } + + t.Parallel() + output := runTestProg(t, "testprogcgo", "EINTR") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +// Issue #42207. +func TestNeedmDeadlock(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no signals on %s", runtime.GOOS) + } + output := runTestProg(t, "testprogcgo", "NeedmDeadlock") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +func TestCgoNoCallback(t *testing.T) { + t.Skip("TODO(#56378): enable in Go 1.23") + got := runTestProg(t, "testprogcgo", "CgoNoCallback") + want := "function marked with #cgo nocallback called back into Go" + if !strings.Contains(got, want) { + t.Fatalf("did not see %q in output:\n%s", want, got) + } +} + +func TestCgoNoEscape(t *testing.T) { + t.Skip("TODO(#56378): enable in Go 1.23") + got := runTestProg(t, "testprogcgo", "CgoNoEscape") + want := "OK\n" + if got != want { + t.Fatalf("want %s, got %s\n", want, got) + } +} + +func TestCgoTracebackGoroutineProfile(t *testing.T) { + output := runTestProg(t, "testprogcgo", "GoroutineProfile") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +func TestCgoTraceParser(t *testing.T) { + // Test issue 29707. + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + if goexperiment.ExecTracer2 { + t.Skip("skipping test that is covered elsewhere for the new execution tracer") + } + output := runTestProg(t, "testprogcgo", "CgoTraceParser") + want := "OK\n" + ErrTimeOrder := "ErrTimeOrder\n" + if output == ErrTimeOrder { + t.Skipf("skipping due to golang.org/issue/16755: %v", output) + } else if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +func TestCgoTraceParserWithOneProc(t *testing.T) { + // Test issue 29707. + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("no pthreads on %s", runtime.GOOS) + } + if goexperiment.ExecTracer2 { + t.Skip("skipping test that is covered elsewhere for the new execution tracer") + } + output := runTestProg(t, "testprogcgo", "CgoTraceParser", "GOMAXPROCS=1") + want := "OK\n" + ErrTimeOrder := "ErrTimeOrder\n" + if output == ErrTimeOrder { + t.Skipf("skipping due to golang.org/issue/16755: %v", output) + } else if output != want { + t.Fatalf("GOMAXPROCS=1, want %s, got %s\n", want, output) + } +} + +func TestCgoSigfwd(t *testing.T) { + t.Parallel() + if !goos.IsUnix { + t.Skipf("no signals on %s", runtime.GOOS) + } + + got := runTestProg(t, "testprogcgo", "CgoSigfwd", "GO_TEST_CGOSIGFWD=1") + if want := "OK\n"; got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} + +func TestDestructorCallback(t *testing.T) { + t.Parallel() + got := runTestProg(t, "testprogcgo", "DestructorCallback") + if want := "OK\n"; got != want { + t.Errorf("expected %q, but got:\n%s", want, got) + } +} + +func TestDestructorCallbackRace(t *testing.T) { + // This test requires building with -race, + // so it's somewhat slow. + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s because race detector not supported", runtime.GOOS, runtime.GOARCH) + } + + t.Parallel() + + exe, err := buildTestProg(t, "testprogcgo", "-race") + if err != nil { + t.Fatal(err) + } + + got, err := testenv.CleanCmdEnv(exec.Command(exe, "DestructorCallback")).CombinedOutput() + if err != nil { + t.Fatal(err) + } + + if want := "OK\n"; string(got) != want { + t.Errorf("expected %q, but got:\n%s", want, got) + } +} + +func TestEnsureBindM(t *testing.T) { + t.Parallel() + switch runtime.GOOS { + case "windows", "plan9": + t.Skipf("skipping bindm test on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "EnsureBindM") + want := "OK\n" + if got != want { + t.Errorf("expected %q, got %v", want, got) + } +} + +func TestStackSwitchCallback(t *testing.T) { + t.Parallel() + switch runtime.GOOS { + case "windows", "plan9", "android", "ios", "openbsd": // no getcontext + t.Skipf("skipping test on %s", runtime.GOOS) + } + got := runTestProg(t, "testprogcgo", "StackSwitchCallback") + skip := "SKIP\n" + if got == skip { + t.Skip("skipping on musl/bionic libc") + } + want := "OK\n" + if got != want { + t.Errorf("expected %q, got %v", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/crash_test.go b/platform/dbops/binaries/go/go/src/runtime/crash_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2ed0fd8f0718f84c862fd7f138fe066f3021cd69 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/crash_test.go @@ -0,0 +1,899 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "errors" + "flag" + "fmt" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var toRemove []string + +func TestMain(m *testing.M) { + _, coreErrBefore := os.Stat("core") + + status := m.Run() + for _, file := range toRemove { + os.RemoveAll(file) + } + + _, coreErrAfter := os.Stat("core") + if coreErrBefore != nil && coreErrAfter == nil { + fmt.Fprintln(os.Stderr, "runtime.test: some test left a core file behind") + if status == 0 { + status = 1 + } + } + + os.Exit(status) +} + +var testprog struct { + sync.Mutex + dir string + target map[string]*buildexe +} + +type buildexe struct { + once sync.Once + exe string + err error +} + +func runTestProg(t *testing.T, binary, name string, env ...string) string { + if *flagQuick { + t.Skip("-quick") + } + + testenv.MustHaveGoBuild(t) + t.Helper() + + exe, err := buildTestProg(t, binary) + if err != nil { + t.Fatal(err) + } + + return runBuiltTestProg(t, exe, name, env...) +} + +func runBuiltTestProg(t *testing.T, exe, name string, env ...string) string { + t.Helper() + + if *flagQuick { + t.Skip("-quick") + } + + start := time.Now() + + cmd := testenv.CleanCmdEnv(testenv.Command(t, exe, name)) + cmd.Env = append(cmd.Env, env...) + if testing.Short() { + cmd.Env = append(cmd.Env, "RUNTIME_TEST_SHORT=1") + } + out, err := cmd.CombinedOutput() + if err == nil { + t.Logf("%v (%v): ok", cmd, time.Since(start)) + } else { + if _, ok := err.(*exec.ExitError); ok { + t.Logf("%v: %v", cmd, err) + } else if errors.Is(err, exec.ErrWaitDelay) { + t.Fatalf("%v: %v", cmd, err) + } else { + t.Fatalf("%v failed to start: %v", cmd, err) + } + } + return string(out) +} + +var serializeBuild = make(chan bool, 2) + +func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) { + if *flagQuick { + t.Skip("-quick") + } + testenv.MustHaveGoBuild(t) + + testprog.Lock() + if testprog.dir == "" { + dir, err := os.MkdirTemp("", "go-build") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + testprog.dir = dir + toRemove = append(toRemove, dir) + } + + if testprog.target == nil { + testprog.target = make(map[string]*buildexe) + } + name := binary + if len(flags) > 0 { + name += "_" + strings.Join(flags, "_") + } + target, ok := testprog.target[name] + if !ok { + target = &buildexe{} + testprog.target[name] = target + } + + dir := testprog.dir + + // Unlock testprog while actually building, so that other + // tests can look up executables that were already built. + testprog.Unlock() + + target.once.Do(func() { + // Only do two "go build"'s at a time, + // to keep load from getting too high. + serializeBuild <- true + defer func() { <-serializeBuild }() + + // Don't get confused if testenv.GoToolPath calls t.Skip. + target.err = errors.New("building test called t.Skip") + + exe := filepath.Join(dir, name+".exe") + + start := time.Now() + cmd := exec.Command(testenv.GoToolPath(t), append([]string{"build", "-o", exe}, flags...)...) + t.Logf("running %v", cmd) + cmd.Dir = "testdata/" + binary + out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out) + } else { + t.Logf("built %v in %v", name, time.Since(start)) + target.exe = exe + target.err = nil + } + }) + + return target.exe, target.err +} + +func TestVDSO(t *testing.T) { + t.Parallel() + output := runTestProg(t, "testprog", "SignalInVDSO") + want := "success\n" + if output != want { + t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want) + } +} + +func testCrashHandler(t *testing.T, cgo bool) { + type crashTest struct { + Cgo bool + } + var output string + if cgo { + output = runTestProg(t, "testprogcgo", "Crash") + } else { + output = runTestProg(t, "testprog", "Crash") + } + want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n" + if output != want { + t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want) + } +} + +func TestCrashHandler(t *testing.T) { + testCrashHandler(t, false) +} + +func testDeadlock(t *testing.T, name string) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + output := runTestProg(t, "testprog", name) + want := "fatal error: all goroutines are asleep - deadlock!\n" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestSimpleDeadlock(t *testing.T) { + testDeadlock(t, "SimpleDeadlock") +} + +func TestInitDeadlock(t *testing.T) { + testDeadlock(t, "InitDeadlock") +} + +func TestLockedDeadlock(t *testing.T) { + testDeadlock(t, "LockedDeadlock") +} + +func TestLockedDeadlock2(t *testing.T) { + testDeadlock(t, "LockedDeadlock2") +} + +func TestGoexitDeadlock(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + output := runTestProg(t, "testprog", "GoexitDeadlock") + want := "no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + +func TestStackOverflow(t *testing.T) { + output := runTestProg(t, "testprog", "StackOverflow") + want := []string{ + "runtime: goroutine stack exceeds 1474560-byte limit\n", + "fatal error: stack overflow", + // information about the current SP and stack bounds + "runtime: sp=", + "stack=[", + } + if !strings.HasPrefix(output, want[0]) { + t.Errorf("output does not start with %q", want[0]) + } + for _, s := range want[1:] { + if !strings.Contains(output, s) { + t.Errorf("output does not contain %q", s) + } + } + if t.Failed() { + t.Logf("output:\n%s", output) + } +} + +func TestThreadExhaustion(t *testing.T) { + output := runTestProg(t, "testprog", "ThreadExhaustion") + want := "runtime: program exceeds 10-thread limit\nfatal error: thread exhaustion" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestRecursivePanic(t *testing.T) { + output := runTestProg(t, "testprog", "RecursivePanic") + want := `wrap: bad +panic: again + +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + +} + +func TestRecursivePanic2(t *testing.T) { + output := runTestProg(t, "testprog", "RecursivePanic2") + want := `first panic +second panic +panic: third panic + +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + +} + +func TestRecursivePanic3(t *testing.T) { + output := runTestProg(t, "testprog", "RecursivePanic3") + want := `panic: first panic + +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + +} + +func TestRecursivePanic4(t *testing.T) { + output := runTestProg(t, "testprog", "RecursivePanic4") + want := `panic: first panic [recovered] + panic: second panic +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + +} + +func TestRecursivePanic5(t *testing.T) { + output := runTestProg(t, "testprog", "RecursivePanic5") + want := `first panic +second panic +panic: third panic +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + +} + +func TestGoexitCrash(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + output := runTestProg(t, "testprog", "GoexitExit") + want := "no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + +func TestGoexitDefer(t *testing.T) { + c := make(chan struct{}) + go func() { + defer func() { + r := recover() + if r != nil { + t.Errorf("non-nil recover during Goexit") + } + c <- struct{}{} + }() + runtime.Goexit() + }() + // Note: if the defer fails to run, we will get a deadlock here + <-c +} + +func TestGoNil(t *testing.T) { + output := runTestProg(t, "testprog", "GoNil") + want := "go of nil func value" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + +func TestMainGoroutineID(t *testing.T) { + output := runTestProg(t, "testprog", "MainGoroutineID") + want := "panic: test\n\ngoroutine 1 [running]:\n" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestNoHelperGoroutines(t *testing.T) { + output := runTestProg(t, "testprog", "NoHelperGoroutines") + matches := regexp.MustCompile(`goroutine [0-9]+ \[`).FindAllStringSubmatch(output, -1) + if len(matches) != 1 || matches[0][0] != "goroutine 1 [" { + t.Fatalf("want to see only goroutine 1, see:\n%s", output) + } +} + +func TestBreakpoint(t *testing.T) { + output := runTestProg(t, "testprog", "Breakpoint") + // If runtime.Breakpoint() is inlined, then the stack trace prints + // "runtime.Breakpoint(...)" instead of "runtime.Breakpoint()". + want := "runtime.Breakpoint(" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + +func TestGoexitInPanic(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + // see issue 8774: this code used to trigger an infinite recursion + output := runTestProg(t, "testprog", "GoexitInPanic") + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +// Issue 14965: Runtime panics should be of type runtime.Error +func TestRuntimePanicWithRuntimeError(t *testing.T) { + testCases := [...]func(){ + 0: func() { + var m map[uint64]bool + m[1234] = true + }, + 1: func() { + ch := make(chan struct{}) + close(ch) + close(ch) + }, + 2: func() { + var ch = make(chan struct{}) + close(ch) + ch <- struct{}{} + }, + 3: func() { + var s = make([]int, 2) + _ = s[2] + }, + 4: func() { + n := -1 + _ = make(chan bool, n) + }, + 5: func() { + close((chan bool)(nil)) + }, + } + + for i, fn := range testCases { + got := panicValue(fn) + if _, ok := got.(runtime.Error); !ok { + t.Errorf("test #%d: recovered value %v(type %T) does not implement runtime.Error", i, got, got) + } + } +} + +func panicValue(fn func()) (recovered any) { + defer func() { + recovered = recover() + }() + fn() + return +} + +func TestPanicAfterGoexit(t *testing.T) { + // an uncaught panic should still work after goexit + output := runTestProg(t, "testprog", "PanicAfterGoexit") + want := "panic: hello" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestRecoveredPanicAfterGoexit(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit") + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestRecoverBeforePanicAfterGoexit(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + t.Parallel() + output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit") + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestRecoverBeforePanicAfterGoexit2(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t, false) + + t.Parallel() + output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit2") + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestNetpollDeadlock(t *testing.T) { + t.Parallel() + output := runTestProg(t, "testprognet", "NetpollDeadlock") + want := "done\n" + if !strings.HasSuffix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestPanicTraceback(t *testing.T) { + t.Parallel() + output := runTestProg(t, "testprog", "PanicTraceback") + want := "panic: hello\n\tpanic: panic pt2\n\tpanic: panic pt1\n" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } + + // Check functions in the traceback. + fns := []string{"main.pt1.func1", "panic", "main.pt2.func1", "panic", "main.pt2", "main.pt1"} + for _, fn := range fns { + re := regexp.MustCompile(`(?m)^` + regexp.QuoteMeta(fn) + `\(.*\n`) + idx := re.FindStringIndex(output) + if idx == nil { + t.Fatalf("expected %q function in traceback:\n%s", fn, output) + } + output = output[idx[1]:] + } +} + +func testPanicDeadlock(t *testing.T, name string, want string) { + // test issue 14432 + output := runTestProg(t, "testprog", name) + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestPanicDeadlockGosched(t *testing.T) { + testPanicDeadlock(t, "GoschedInPanic", "panic: errorThatGosched\n\n") +} + +func TestPanicDeadlockSyscall(t *testing.T) { + testPanicDeadlock(t, "SyscallInPanic", "1\n2\npanic: 3\n\n") +} + +func TestPanicLoop(t *testing.T) { + output := runTestProg(t, "testprog", "PanicLoop") + if want := "panic while printing panic value"; !strings.Contains(output, want) { + t.Errorf("output does not contain %q:\n%s", want, output) + } +} + +func TestMemPprof(t *testing.T) { + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprog") + if err != nil { + t.Fatal(err) + } + + got, err := testenv.CleanCmdEnv(exec.Command(exe, "MemProf")).CombinedOutput() + if err != nil { + t.Fatalf("testprog failed: %s, output:\n%s", err, got) + } + fn := strings.TrimSpace(string(got)) + defer os.Remove(fn) + + for try := 0; try < 2; try++ { + cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top")) + // Check that pprof works both with and without explicit executable on command line. + if try == 0 { + cmd.Args = append(cmd.Args, exe, fn) + } else { + cmd.Args = append(cmd.Args, fn) + } + found := false + for i, e := range cmd.Env { + if strings.HasPrefix(e, "PPROF_TMPDIR=") { + cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir() + found = true + break + } + } + if !found { + cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir()) + } + + top, err := cmd.CombinedOutput() + t.Logf("%s:\n%s", cmd.Args, top) + if err != nil { + t.Error(err) + } else if !bytes.Contains(top, []byte("MemProf")) { + t.Error("missing MemProf in pprof output") + } + } +} + +var concurrentMapTest = flag.Bool("run_concurrent_map_tests", false, "also run flaky concurrent map tests") + +func TestConcurrentMapWrites(t *testing.T) { + if !*concurrentMapTest { + t.Skip("skipping without -run_concurrent_map_tests") + } + testenv.MustHaveGoRun(t) + output := runTestProg(t, "testprog", "concurrentMapWrites") + want := "fatal error: concurrent map writes" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} +func TestConcurrentMapReadWrite(t *testing.T) { + if !*concurrentMapTest { + t.Skip("skipping without -run_concurrent_map_tests") + } + testenv.MustHaveGoRun(t) + output := runTestProg(t, "testprog", "concurrentMapReadWrite") + want := "fatal error: concurrent map read and map write" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} +func TestConcurrentMapIterateWrite(t *testing.T) { + if !*concurrentMapTest { + t.Skip("skipping without -run_concurrent_map_tests") + } + testenv.MustHaveGoRun(t) + output := runTestProg(t, "testprog", "concurrentMapIterateWrite") + want := "fatal error: concurrent map iteration and map write" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +type point struct { + x, y *int +} + +func (p *point) negate() { + *p.x = *p.x * -1 + *p.y = *p.y * -1 +} + +// Test for issue #10152. +func TestPanicInlined(t *testing.T) { + defer func() { + r := recover() + if r == nil { + t.Fatalf("recover failed") + } + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + buf = buf[:n] + if !bytes.Contains(buf, []byte("(*point).negate(")) { + t.Fatalf("expecting stack trace to contain call to (*point).negate()") + } + }() + + pt := new(point) + pt.negate() +} + +// Test for issues #3934 and #20018. +// We want to delay exiting until a panic print is complete. +func TestPanicRace(t *testing.T) { + testenv.MustHaveGoRun(t) + + exe, err := buildTestProg(t, "testprog") + if err != nil { + t.Fatal(err) + } + + // The test is intentionally racy, and in my testing does not + // produce the expected output about 0.05% of the time. + // So run the program in a loop and only fail the test if we + // get the wrong output ten times in a row. + const tries = 10 +retry: + for i := 0; i < tries; i++ { + got, err := testenv.CleanCmdEnv(exec.Command(exe, "PanicRace")).CombinedOutput() + if err == nil { + t.Logf("try %d: program exited successfully, should have failed", i+1) + continue + } + + if i > 0 { + t.Logf("try %d:\n", i+1) + } + t.Logf("%s\n", got) + + wants := []string{ + "panic: crash", + "PanicRace", + "created by ", + } + for _, want := range wants { + if !bytes.Contains(got, []byte(want)) { + t.Logf("did not find expected string %q", want) + continue retry + } + } + + // Test generated expected output. + return + } + t.Errorf("test ran %d times without producing expected output", tries) +} + +func TestBadTraceback(t *testing.T) { + output := runTestProg(t, "testprog", "BadTraceback") + for _, want := range []string{ + "unexpected return pc", + "called from 0xbad", + "00000bad", // Smashed LR in hex dump + "= 2 && os.Args[1] == "testPanicSystemstackInternal" { + // Complete any in-flight GCs and disable future ones. We're going to + // block goroutines on runtime locks, which aren't ever preemptible for the + // GC to scan them. + runtime.GC() + debug.SetGCPercent(-1) + // Get two threads running on the system stack with + // something recognizable in the stack trace. + runtime.GOMAXPROCS(2) + go testPanicSystemstackInternal() + testPanicSystemstackInternal() + } +} + +func testPanicSystemstackInternal() { + runtime.BlockOnSystemStack() + os.Exit(1) // Should be unreachable. +} + +func TestSignalExitStatus(t *testing.T) { + testenv.MustHaveGoBuild(t) + exe, err := buildTestProg(t, "testprog") + if err != nil { + t.Fatal(err) + } + err = testenv.CleanCmdEnv(exec.Command(exe, "SignalExitStatus")).Run() + if err == nil { + t.Error("test program succeeded unexpectedly") + } else if ee, ok := err.(*exec.ExitError); !ok { + t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err) + } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok { + t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys()) + } else if !ws.Signaled() || ws.Signal() != syscall.SIGTERM { + t.Errorf("got %v; expected SIGTERM", ee) + } +} + +func TestSignalIgnoreSIGTRAP(t *testing.T) { + if runtime.GOOS == "openbsd" { + testenv.SkipFlaky(t, 49725) + } + + output := runTestProg(t, "testprognet", "SignalIgnoreSIGTRAP") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +func TestSignalDuringExec(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd": + default: + t.Skipf("skipping test on %s", runtime.GOOS) + } + output := runTestProg(t, "testprognet", "SignalDuringExec") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + +func TestSignalM(t *testing.T) { + r, w, errno := runtime.Pipe() + if errno != 0 { + t.Fatal(syscall.Errno(errno)) + } + defer func() { + runtime.Close(r) + runtime.Close(w) + }() + runtime.Closeonexec(r) + runtime.Closeonexec(w) + + var want, got int64 + var wg sync.WaitGroup + ready := make(chan *runtime.M) + wg.Add(1) + go func() { + runtime.LockOSThread() + want, got = runtime.WaitForSigusr1(r, w, func(mp *runtime.M) { + ready <- mp + }) + runtime.UnlockOSThread() + wg.Done() + }() + waitingM := <-ready + runtime.SendSigusr1(waitingM) + + timer := time.AfterFunc(time.Second, func() { + // Write 1 to tell WaitForSigusr1 that we timed out. + bw := byte(1) + if n := runtime.Write(uintptr(w), unsafe.Pointer(&bw), 1); n != 1 { + t.Errorf("pipe write failed: %d", n) + } + }) + defer timer.Stop() + + wg.Wait() + if got == -1 { + t.Fatal("signalM signal not received") + } else if want != got { + t.Fatalf("signal sent to M %d, but received on M %d", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/create_file_nounix.go b/platform/dbops/binaries/go/go/src/runtime/create_file_nounix.go new file mode 100644 index 0000000000000000000000000000000000000000..60f75175a2a6a72d81b0a30c156b727f940b554e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/create_file_nounix.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package runtime + +const canCreateFile = false + +func create(name *byte, perm int32) int32 { + throw("unimplemented") + return -1 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/create_file_unix.go b/platform/dbops/binaries/go/go/src/runtime/create_file_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..7280810ed2b6c1dd6c7f1bf3848c21a606150504 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/create_file_unix.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package runtime + +const canCreateFile = true + +// create returns an fd to a write-only file. +func create(name *byte, perm int32) int32 { + return open(name, _O_CREAT|_O_WRONLY|_O_TRUNC, perm) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debug.go b/platform/dbops/binaries/go/go/src/runtime/debug.go new file mode 100644 index 0000000000000000000000000000000000000000..3233ce8ee73718c1123594b28dfc71f255a9ee0b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debug.go @@ -0,0 +1,126 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +// GOMAXPROCS sets the maximum number of CPUs that can be executing +// simultaneously and returns the previous setting. It defaults to +// the value of [runtime.NumCPU]. If n < 1, it does not change the current setting. +// This call will go away when the scheduler improves. +func GOMAXPROCS(n int) int { + if GOARCH == "wasm" && n > 1 { + n = 1 // WebAssembly has no threads yet, so only one CPU is possible. + } + + lock(&sched.lock) + ret := int(gomaxprocs) + unlock(&sched.lock) + if n <= 0 || n == ret { + return ret + } + + stw := stopTheWorldGC(stwGOMAXPROCS) + + // newprocs will be processed by startTheWorld + newprocs = int32(n) + + startTheWorldGC(stw) + return ret +} + +// NumCPU returns the number of logical CPUs usable by the current process. +// +// The set of available CPUs is checked by querying the operating system +// at process startup. Changes to operating system CPU allocation after +// process startup are not reflected. +func NumCPU() int { + return int(ncpu) +} + +// NumCgoCall returns the number of cgo calls made by the current process. +func NumCgoCall() int64 { + var n = int64(atomic.Load64(&ncgocall)) + for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { + n += int64(mp.ncgocall) + } + return n +} + +func totalMutexWaitTimeNanos() int64 { + total := sched.totalMutexWaitTime.Load() + + total += sched.totalRuntimeLockWaitTime.Load() + for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { + total += mp.mLockProfile.waitTime.Load() + } + + return total +} + +// NumGoroutine returns the number of goroutines that currently exist. +func NumGoroutine() int { + return int(gcount()) +} + +//go:linkname debug_modinfo runtime/debug.modinfo +func debug_modinfo() string { + return modinfo +} + +// mayMoreStackPreempt is a maymorestack hook that forces a preemption +// at every possible cooperative preemption point. +// +// This is valuable to apply to the runtime, which can be sensitive to +// preemption points. To apply this to all preemption points in the +// runtime and runtime-like code, use the following in bash or zsh: +// +// X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]} +// +// This must be deeply nosplit because it is called from a function +// prologue before the stack is set up and because the compiler will +// call it from any splittable prologue (leading to infinite +// recursion). +// +// Ideally it should also use very little stack because the linker +// doesn't currently account for this in nosplit stack depth checking. +// +// Ensure mayMoreStackPreempt can be called for all ABIs. +// +//go:nosplit +//go:linkname mayMoreStackPreempt +func mayMoreStackPreempt() { + // Don't do anything on the g0 or gsignal stack. + gp := getg() + if gp == gp.m.g0 || gp == gp.m.gsignal { + return + } + // Force a preemption, unless the stack is already poisoned. + if gp.stackguard0 < stackPoisonMin { + gp.stackguard0 = stackPreempt + } +} + +// mayMoreStackMove is a maymorestack hook that forces stack movement +// at every possible point. +// +// See mayMoreStackPreempt. +// +//go:nosplit +//go:linkname mayMoreStackMove +func mayMoreStackMove() { + // Don't do anything on the g0 or gsignal stack. + gp := getg() + if gp == gp.m.g0 || gp == gp.m.gsignal { + return + } + // Force stack movement, unless the stack is already poisoned. + if gp.stackguard0 < stackPoisonMin { + gp.stackguard0 = stackForceMove + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debug_test.go b/platform/dbops/binaries/go/go/src/runtime/debug_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1c00d2fb0d918b07fc44ed98996c9aa6f9ca3f41 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debug_test.go @@ -0,0 +1,307 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: This test could be implemented on all (most?) UNIXes if we +// added syscall.Tgkill more widely. + +// We skip all of these tests under race mode because our test thread +// spends all of its time in the race runtime, which isn't a safe +// point. + +//go:build (amd64 || arm64 || ppc64le) && linux && !race + +package runtime_test + +import ( + "fmt" + "internal/abi" + "math" + "os" + "regexp" + "runtime" + "runtime/debug" + "sync/atomic" + "syscall" + "testing" +) + +func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) { + // This can deadlock if run under a debugger because it + // depends on catching SIGTRAP, which is usually swallowed by + // a debugger. + skipUnderDebugger(t) + + // This can deadlock if there aren't enough threads or if a GC + // tries to interrupt an atomic loop (see issue #10958). Execute + // an extra GC to ensure even the sweep phase is done (out of + // caution to prevent #49370 from happening). + // TODO(mknyszek): This extra GC cycle is likely unnecessary + // because preemption (which may happen during the sweep phase) + // isn't much of an issue anymore thanks to asynchronous preemption. + // The biggest risk is having a write barrier in the debug call + // injection test code fire, because it runs in a signal handler + // and may not have a P. + // + // We use 8 Ps so there's room for the debug call worker, + // something that's trying to preempt the call worker, and the + // goroutine that's trying to stop the call worker. + ogomaxprocs := runtime.GOMAXPROCS(8) + ogcpercent := debug.SetGCPercent(-1) + runtime.GC() + + // ready is a buffered channel so debugCallWorker won't block + // on sending to it. This makes it less likely we'll catch + // debugCallWorker while it's in the runtime. + ready := make(chan *runtime.G, 1) + var stop uint32 + done := make(chan error) + go debugCallWorker(ready, &stop, done) + g = <-ready + return g, func() { + atomic.StoreUint32(&stop, 1) + err := <-done + if err != nil { + t.Fatal(err) + } + runtime.GOMAXPROCS(ogomaxprocs) + debug.SetGCPercent(ogcpercent) + } +} + +func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + ready <- runtime.Getg() + + x := 2 + debugCallWorker2(stop, &x) + if x != 1 { + done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x) + } + close(done) +} + +// Don't inline this function, since we want to test adjusting +// pointers in the arguments. +// +//go:noinline +func debugCallWorker2(stop *uint32, x *int) { + for atomic.LoadUint32(stop) == 0 { + // Strongly encourage x to live in a register so we + // can test pointer register adjustment. + *x++ + } + *x = 1 +} + +func debugCallTKill(tid int) error { + return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP) +} + +// skipUnderDebugger skips the current test when running under a +// debugger (specifically if this process has a tracer). This is +// Linux-specific. +func skipUnderDebugger(t *testing.T) { + pid := syscall.Getpid() + status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) + if err != nil { + t.Logf("couldn't get proc tracer: %s", err) + return + } + re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`) + sub := re.FindSubmatch(status) + if sub == nil { + t.Logf("couldn't find proc tracer PID") + return + } + if string(sub[1]) == "0" { + return + } + t.Skip("test will deadlock under a debugger") +} + +func TestDebugCall(t *testing.T) { + g, after := startDebugCallWorker(t) + defer after() + + type stackArgs struct { + x0 int + x1 float64 + y0Ret int + y1Ret float64 + } + + // Inject a call into the debugCallWorker goroutine and test + // basic argument and result passing. + fn := func(x int, y float64) (y0Ret int, y1Ret float64) { + return x + 1, y + 1.0 + } + var args *stackArgs + var regs abi.RegArgs + intRegs := regs.Ints[:] + floatRegs := regs.Floats[:] + fval := float64(42.0) + if len(intRegs) > 0 { + intRegs[0] = 42 + floatRegs[0] = math.Float64bits(fval) + } else { + args = &stackArgs{ + x0: 42, + x1: 42.0, + } + } + + if _, err := runtime.InjectDebugCall(g, fn, ®s, args, debugCallTKill, false); err != nil { + t.Fatal(err) + } + var result0 int + var result1 float64 + if len(intRegs) > 0 { + result0 = int(intRegs[0]) + result1 = math.Float64frombits(floatRegs[0]) + } else { + result0 = args.y0Ret + result1 = args.y1Ret + } + if result0 != 43 { + t.Errorf("want 43, got %d", result0) + } + if result1 != fval+1 { + t.Errorf("want 43, got %f", result1) + } +} + +func TestDebugCallLarge(t *testing.T) { + g, after := startDebugCallWorker(t) + defer after() + + // Inject a call with a large call frame. + const N = 128 + var args struct { + in [N]int + out [N]int + } + fn := func(in [N]int) (out [N]int) { + for i := range in { + out[i] = in[i] + 1 + } + return + } + var want [N]int + for i := range args.in { + args.in[i] = i + want[i] = i + 1 + } + if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil { + t.Fatal(err) + } + if want != args.out { + t.Fatalf("want %v, got %v", want, args.out) + } +} + +func TestDebugCallGC(t *testing.T) { + g, after := startDebugCallWorker(t) + defer after() + + // Inject a call that performs a GC. + if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil { + t.Fatal(err) + } +} + +func TestDebugCallGrowStack(t *testing.T) { + g, after := startDebugCallWorker(t) + defer after() + + // Inject a call that grows the stack. debugCallWorker checks + // for stack pointer breakage. + if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil { + t.Fatal(err) + } +} + +//go:nosplit +func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) { + // The nosplit causes this function to not contain safe-points + // except at calls. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + *gpp = runtime.Getg() + + for atomic.LoadUint32(stop) == 0 { + atomic.StoreUint32(ready, 1) + } +} + +func TestDebugCallUnsafePoint(t *testing.T) { + skipUnderDebugger(t) + + // This can deadlock if there aren't enough threads or if a GC + // tries to interrupt an atomic loop (see issue #10958). + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8)) + + // InjectDebugCall cannot be executed while a GC is actively in + // progress. Wait until the current GC is done, and turn it off. + // + // See #49370. + runtime.GC() + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + // Test that the runtime refuses call injection at unsafe points. + var g *runtime.G + var ready, stop uint32 + defer atomic.StoreUint32(&stop, 1) + go debugCallUnsafePointWorker(&g, &ready, &stop) + for atomic.LoadUint32(&ready) == 0 { + runtime.Gosched() + } + + _, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true) + if msg := "call not at safe point"; err == nil || err.Error() != msg { + t.Fatalf("want %q, got %s", msg, err) + } +} + +func TestDebugCallPanic(t *testing.T) { + skipUnderDebugger(t) + + // This can deadlock if there aren't enough threads. + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8)) + + // InjectDebugCall cannot be executed while a GC is actively in + // progress. Wait until the current GC is done, and turn it off. + // + // See #10958 and #49370. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + // TODO(mknyszek): This extra GC cycle is likely unnecessary + // because preemption (which may happen during the sweep phase) + // isn't much of an issue anymore thanks to asynchronous preemption. + // The biggest risk is having a write barrier in the debug call + // injection test code fire, because it runs in a signal handler + // and may not have a P. + runtime.GC() + + ready := make(chan *runtime.G) + var stop uint32 + defer atomic.StoreUint32(&stop, 1) + go func() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + ready <- runtime.Getg() + for atomic.LoadUint32(&stop) == 0 { + } + }() + g := <-ready + + p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false) + if err != nil { + t.Fatal(err) + } + if ps, ok := p.(string); !ok || ps != "test" { + t.Fatalf("wanted panic %v, got %v", "test", p) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debugcall.go b/platform/dbops/binaries/go/go/src/runtime/debugcall.go new file mode 100644 index 0000000000000000000000000000000000000000..5dd83063ff9cb8bb88f6e34d42ab162c9f106812 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debugcall.go @@ -0,0 +1,266 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Though the debug call function feature is not enabled on +// ppc64, inserted ppc64 to avoid missing Go declaration error +// for debugCallPanicked while building runtime.test +//go:build amd64 || arm64 || ppc64le || ppc64 + +package runtime + +import ( + "internal/abi" + "unsafe" +) + +const ( + debugCallSystemStack = "executing on Go runtime stack" + debugCallUnknownFunc = "call from unknown function" + debugCallRuntime = "call from within the Go runtime" + debugCallUnsafePoint = "call not at safe point" +) + +func debugCallV2() +func debugCallPanicked(val any) + +// debugCallCheck checks whether it is safe to inject a debugger +// function call with return PC pc. If not, it returns a string +// explaining why. +// +//go:nosplit +func debugCallCheck(pc uintptr) string { + // No user calls from the system stack. + if getg() != getg().m.curg { + return debugCallSystemStack + } + if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { + // Fast syscalls (nanotime) and racecall switch to the + // g0 stack without switching g. We can't safely make + // a call in this state. (We can't even safely + // systemstack.) + return debugCallSystemStack + } + + // Switch to the system stack to avoid overflowing the user + // stack. + var ret string + systemstack(func() { + f := findfunc(pc) + if !f.valid() { + ret = debugCallUnknownFunc + return + } + + name := funcname(f) + + switch name { + case "debugCall32", + "debugCall64", + "debugCall128", + "debugCall256", + "debugCall512", + "debugCall1024", + "debugCall2048", + "debugCall4096", + "debugCall8192", + "debugCall16384", + "debugCall32768", + "debugCall65536": + // These functions are allowed so that the debugger can initiate multiple function calls. + // See: https://golang.org/cl/161137/ + return + } + + // Disallow calls from the runtime. We could + // potentially make this condition tighter (e.g., not + // when locks are held), but there are enough tightly + // coded sequences (e.g., defer handling) that it's + // better to play it safe. + if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx { + ret = debugCallRuntime + return + } + + // Check that this isn't an unsafe-point. + if pc != f.entry() { + pc-- + } + up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc) + if up != abi.UnsafePointSafe { + // Not at a safe point. + ret = debugCallUnsafePoint + } + }) + return ret +} + +// debugCallWrap starts a new goroutine to run a debug call and blocks +// the calling goroutine. On the goroutine, it prepares to recover +// panics from the debug call, and then calls the call dispatching +// function at PC dispatch. +// +// This must be deeply nosplit because there are untyped values on the +// stack from debugCallV2. +// +//go:nosplit +func debugCallWrap(dispatch uintptr) { + var lockedExt uint32 + callerpc := getcallerpc() + gp := getg() + + // Lock ourselves to the OS thread. + // + // Debuggers rely on us running on the same thread until we get to + // dispatch the function they asked as to. + // + // We're going to transfer this to the new G we just created. + lockOSThread() + + // Create a new goroutine to execute the call on. Run this on + // the system stack to avoid growing our stack. + systemstack(func() { + // TODO(mknyszek): It would be nice to wrap these arguments in an allocated + // closure and start the goroutine with that closure, but the compiler disallows + // implicit closure allocation in the runtime. + fn := debugCallWrap1 + newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc) + args := &debugCallWrapArgs{ + dispatch: dispatch, + callingG: gp, + } + newg.param = unsafe.Pointer(args) + + // Transfer locked-ness to the new goroutine. + // Save lock state to restore later. + mp := gp.m + if mp != gp.lockedm.ptr() { + throw("inconsistent lockedm") + } + // Save the external lock count and clear it so + // that it can't be unlocked from the debug call. + // Note: we already locked internally to the thread, + // so if we were locked before we're still locked now. + lockedExt = mp.lockedExt + mp.lockedExt = 0 + + mp.lockedg.set(newg) + newg.lockedm.set(mp) + gp.lockedm = 0 + + // Mark the calling goroutine as being at an async + // safe-point, since it has a few conservative frames + // at the bottom of the stack. This also prevents + // stack shrinks. + gp.asyncSafePoint = true + + // Stash newg away so we can execute it below (mcall's + // closure can't capture anything). + gp.schedlink.set(newg) + }) + + // Switch to the new goroutine. + mcall(func(gp *g) { + // Get newg. + newg := gp.schedlink.ptr() + gp.schedlink = 0 + + // Park the calling goroutine. + trace := traceAcquire() + casGToWaiting(gp, _Grunning, waitReasonDebugCall) + if trace.ok() { + trace.GoPark(traceBlockDebugCall, 1) + traceRelease(trace) + } + dropg() + + // Directly execute the new goroutine. The debug + // protocol will continue on the new goroutine, so + // it's important we not just let the scheduler do + // this or it may resume a different goroutine. + execute(newg, true) + }) + + // We'll resume here when the call returns. + + // Restore locked state. + mp := gp.m + mp.lockedExt = lockedExt + mp.lockedg.set(gp) + gp.lockedm.set(mp) + + // Undo the lockOSThread we did earlier. + unlockOSThread() + + gp.asyncSafePoint = false +} + +type debugCallWrapArgs struct { + dispatch uintptr + callingG *g +} + +// debugCallWrap1 is the continuation of debugCallWrap on the callee +// goroutine. +func debugCallWrap1() { + gp := getg() + args := (*debugCallWrapArgs)(gp.param) + dispatch, callingG := args.dispatch, args.callingG + gp.param = nil + + // Dispatch call and trap panics. + debugCallWrap2(dispatch) + + // Resume the caller goroutine. + getg().schedlink.set(callingG) + mcall(func(gp *g) { + callingG := gp.schedlink.ptr() + gp.schedlink = 0 + + // Unlock this goroutine from the M if necessary. The + // calling G will relock. + if gp.lockedm != 0 { + gp.lockedm = 0 + gp.m.lockedg = 0 + } + + // Switch back to the calling goroutine. At some point + // the scheduler will schedule us again and we'll + // finish exiting. + trace := traceAcquire() + casgstatus(gp, _Grunning, _Grunnable) + if trace.ok() { + trace.GoSched() + traceRelease(trace) + } + dropg() + lock(&sched.lock) + globrunqput(gp) + unlock(&sched.lock) + + trace = traceAcquire() + casgstatus(callingG, _Gwaiting, _Grunnable) + if trace.ok() { + trace.GoUnpark(callingG, 0) + traceRelease(trace) + } + execute(callingG, true) + }) +} + +func debugCallWrap2(dispatch uintptr) { + // Call the dispatch function and trap panics. + var dispatchF func() + dispatchFV := funcval{dispatch} + *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV)) + + var ok bool + defer func() { + if !ok { + err := recover() + debugCallPanicked(err) + } + }() + dispatchF() + ok = true +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debuglog.go b/platform/dbops/binaries/go/go/src/runtime/debuglog.go new file mode 100644 index 0000000000000000000000000000000000000000..873f1b45bddc8852fd22d4546c25e735a821c7ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debuglog.go @@ -0,0 +1,831 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides an internal debug logging facility. The debug +// log is a lightweight, in-memory, per-M ring buffer. By default, the +// runtime prints the debug log on panic. +// +// To print something to the debug log, call dlog to obtain a dlogger +// and use the methods on that to add values. The values will be +// space-separated in the output (much like println). +// +// This facility can be enabled by passing -tags debuglog when +// building. Without this tag, dlog calls compile to nothing. + +package runtime + +import ( + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// debugLogBytes is the size of each per-M ring buffer. This is +// allocated off-heap to avoid blowing up the M and hence the GC'd +// heap size. +const debugLogBytes = 16 << 10 + +// debugLogStringLimit is the maximum number of bytes in a string. +// Above this, the string will be truncated with "..(n more bytes).." +const debugLogStringLimit = debugLogBytes / 8 + +// dlog returns a debug logger. The caller can use methods on the +// returned logger to add values, which will be space-separated in the +// final output, much like println. The caller must call end() to +// finish the message. +// +// dlog can be used from highly-constrained corners of the runtime: it +// is safe to use in the signal handler, from within the write +// barrier, from within the stack implementation, and in places that +// must be recursively nosplit. +// +// This will be compiled away if built without the debuglog build tag. +// However, argument construction may not be. If any of the arguments +// are not literals or trivial expressions, consider protecting the +// call with "if dlogEnabled". +// +//go:nosplit +//go:nowritebarrierrec +func dlog() *dlogger { + if !dlogEnabled { + return nil + } + + // Get the time. + tick, nano := uint64(cputicks()), uint64(nanotime()) + + // Try to get a cached logger. + l := getCachedDlogger() + + // If we couldn't get a cached logger, try to get one from the + // global pool. + if l == nil { + allp := (*uintptr)(unsafe.Pointer(&allDloggers)) + all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) + for l1 := all; l1 != nil; l1 = l1.allLink { + if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) { + l = l1 + break + } + } + } + + // If that failed, allocate a new logger. + if l == nil { + // Use sysAllocOS instead of sysAlloc because we want to interfere + // with the runtime as little as possible, and sysAlloc updates accounting. + l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{}))) + if l == nil { + throw("failed to allocate debug log") + } + l.w.r.data = &l.w.data + l.owned.Store(1) + + // Prepend to allDloggers list. + headp := (*uintptr)(unsafe.Pointer(&allDloggers)) + for { + head := atomic.Loaduintptr(headp) + l.allLink = (*dlogger)(unsafe.Pointer(head)) + if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) { + break + } + } + } + + // If the time delta is getting too high, write a new sync + // packet. We set the limit so we don't write more than 6 + // bytes of delta in the record header. + const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets + if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit { + l.w.writeSync(tick, nano) + } + + // Reserve space for framing header. + l.w.ensure(debugLogHeaderSize) + l.w.write += debugLogHeaderSize + + // Write record header. + l.w.uvarint(tick - l.w.tick) + l.w.uvarint(nano - l.w.nano) + gp := getg() + if gp != nil && gp.m != nil && gp.m.p != 0 { + l.w.varint(int64(gp.m.p.ptr().id)) + } else { + l.w.varint(-1) + } + + return l +} + +// A dlogger writes to the debug log. +// +// To obtain a dlogger, call dlog(). When done with the dlogger, call +// end(). +type dlogger struct { + _ sys.NotInHeap + w debugLogWriter + + // allLink is the next dlogger in the allDloggers list. + allLink *dlogger + + // owned indicates that this dlogger is owned by an M. This is + // accessed atomically. + owned atomic.Uint32 +} + +// allDloggers is a list of all dloggers, linked through +// dlogger.allLink. This is accessed atomically. This is prepend only, +// so it doesn't need to protect against ABA races. +var allDloggers *dlogger + +//go:nosplit +func (l *dlogger) end() { + if !dlogEnabled { + return + } + + // Fill in framing header. + size := l.w.write - l.w.r.end + if !l.w.writeFrameAt(l.w.r.end, size) { + throw("record too large") + } + + // Commit the record. + l.w.r.end = l.w.write + + // Attempt to return this logger to the cache. + if putCachedDlogger(l) { + return + } + + // Return the logger to the global pool. + l.owned.Store(0) +} + +const ( + debugLogUnknown = 1 + iota + debugLogBoolTrue + debugLogBoolFalse + debugLogInt + debugLogUint + debugLogHex + debugLogPtr + debugLogString + debugLogConstString + debugLogStringOverflow + + debugLogPC + debugLogTraceback +) + +//go:nosplit +func (l *dlogger) b(x bool) *dlogger { + if !dlogEnabled { + return l + } + if x { + l.w.byte(debugLogBoolTrue) + } else { + l.w.byte(debugLogBoolFalse) + } + return l +} + +//go:nosplit +func (l *dlogger) i(x int) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i8(x int8) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i16(x int16) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i32(x int32) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i64(x int64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogInt) + l.w.varint(x) + return l +} + +//go:nosplit +func (l *dlogger) u(x uint) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) uptr(x uintptr) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u8(x uint8) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u16(x uint16) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u32(x uint32) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u64(x uint64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogUint) + l.w.uvarint(x) + return l +} + +//go:nosplit +func (l *dlogger) hex(x uint64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogHex) + l.w.uvarint(x) + return l +} + +//go:nosplit +func (l *dlogger) p(x any) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogPtr) + if x == nil { + l.w.uvarint(0) + } else { + v := efaceOf(&x) + switch v._type.Kind_ & kindMask { + case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer: + l.w.uvarint(uint64(uintptr(v.data))) + default: + throw("not a pointer type") + } + } + return l +} + +//go:nosplit +func (l *dlogger) s(x string) *dlogger { + if !dlogEnabled { + return l + } + + strData := unsafe.StringData(x) + datap := &firstmoduledata + if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end { + // String constants are in the rodata section, which + // isn't recorded in moduledata. But it has to be + // somewhere between etext and end. + l.w.byte(debugLogConstString) + l.w.uvarint(uint64(len(x))) + l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext)) + } else { + l.w.byte(debugLogString) + // We can't use unsafe.Slice as it may panic, which isn't safe + // in this (potentially) nowritebarrier context. + var b []byte + bb := (*slice)(unsafe.Pointer(&b)) + bb.array = unsafe.Pointer(strData) + bb.len, bb.cap = len(x), len(x) + if len(b) > debugLogStringLimit { + b = b[:debugLogStringLimit] + } + l.w.uvarint(uint64(len(b))) + l.w.bytes(b) + if len(b) != len(x) { + l.w.byte(debugLogStringOverflow) + l.w.uvarint(uint64(len(x) - len(b))) + } + } + return l +} + +//go:nosplit +func (l *dlogger) pc(x uintptr) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogPC) + l.w.uvarint(uint64(x)) + return l +} + +//go:nosplit +func (l *dlogger) traceback(x []uintptr) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogTraceback) + l.w.uvarint(uint64(len(x))) + for _, pc := range x { + l.w.uvarint(uint64(pc)) + } + return l +} + +// A debugLogWriter is a ring buffer of binary debug log records. +// +// A log record consists of a 2-byte framing header and a sequence of +// fields. The framing header gives the size of the record as a little +// endian 16-bit value. Each field starts with a byte indicating its +// type, followed by type-specific data. If the size in the framing +// header is 0, it's a sync record consisting of two little endian +// 64-bit values giving a new time base. +// +// Because this is a ring buffer, new records will eventually +// overwrite old records. Hence, it maintains a reader that consumes +// the log as it gets overwritten. That reader state is where an +// actual log reader would start. +type debugLogWriter struct { + _ sys.NotInHeap + write uint64 + data debugLogBuf + + // tick and nano are the time bases from the most recently + // written sync record. + tick, nano uint64 + + // r is a reader that consumes records as they get overwritten + // by the writer. It also acts as the initial reader state + // when printing the log. + r debugLogReader + + // buf is a scratch buffer for encoding. This is here to + // reduce stack usage. + buf [10]byte +} + +type debugLogBuf struct { + _ sys.NotInHeap + b [debugLogBytes]byte +} + +const ( + // debugLogHeaderSize is the number of bytes in the framing + // header of every dlog record. + debugLogHeaderSize = 2 + + // debugLogSyncSize is the number of bytes in a sync record. + debugLogSyncSize = debugLogHeaderSize + 2*8 +) + +//go:nosplit +func (l *debugLogWriter) ensure(n uint64) { + for l.write+n >= l.r.begin+uint64(len(l.data.b)) { + // Consume record at begin. + if l.r.skip() == ^uint64(0) { + // Wrapped around within a record. + // + // TODO(austin): It would be better to just + // eat the whole buffer at this point, but we + // have to communicate that to the reader + // somehow. + throw("record wrapped around") + } + } +} + +//go:nosplit +func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool { + l.data.b[pos%uint64(len(l.data.b))] = uint8(size) + l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8) + return size <= 0xFFFF +} + +//go:nosplit +func (l *debugLogWriter) writeSync(tick, nano uint64) { + l.tick, l.nano = tick, nano + l.ensure(debugLogHeaderSize) + l.writeFrameAt(l.write, 0) + l.write += debugLogHeaderSize + l.writeUint64LE(tick) + l.writeUint64LE(nano) + l.r.end = l.write +} + +//go:nosplit +func (l *debugLogWriter) writeUint64LE(x uint64) { + var b [8]byte + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) + l.bytes(b[:]) +} + +//go:nosplit +func (l *debugLogWriter) byte(x byte) { + l.ensure(1) + pos := l.write + l.write++ + l.data.b[pos%uint64(len(l.data.b))] = x +} + +//go:nosplit +func (l *debugLogWriter) bytes(x []byte) { + l.ensure(uint64(len(x))) + pos := l.write + l.write += uint64(len(x)) + for len(x) > 0 { + n := copy(l.data.b[pos%uint64(len(l.data.b)):], x) + pos += uint64(n) + x = x[n:] + } +} + +//go:nosplit +func (l *debugLogWriter) varint(x int64) { + var u uint64 + if x < 0 { + u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1 + } else { + u = (uint64(x) << 1) // do not complement i, bit 0 is 0 + } + l.uvarint(u) +} + +//go:nosplit +func (l *debugLogWriter) uvarint(u uint64) { + i := 0 + for u >= 0x80 { + l.buf[i] = byte(u) | 0x80 + u >>= 7 + i++ + } + l.buf[i] = byte(u) + i++ + l.bytes(l.buf[:i]) +} + +type debugLogReader struct { + data *debugLogBuf + + // begin and end are the positions in the log of the beginning + // and end of the log data, modulo len(data). + begin, end uint64 + + // tick and nano are the current time base at begin. + tick, nano uint64 +} + +//go:nosplit +func (r *debugLogReader) skip() uint64 { + // Read size at pos. + if r.begin+debugLogHeaderSize > r.end { + return ^uint64(0) + } + size := uint64(r.readUint16LEAt(r.begin)) + if size == 0 { + // Sync packet. + r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) + r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) + size = debugLogSyncSize + } + if r.begin+size > r.end { + return ^uint64(0) + } + r.begin += size + return size +} + +//go:nosplit +func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 { + return uint16(r.data.b[pos%uint64(len(r.data.b))]) | + uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8 +} + +//go:nosplit +func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 { + var b [8]byte + for i := range b { + b[i] = r.data.b[pos%uint64(len(r.data.b))] + pos++ + } + return uint64(b[0]) | uint64(b[1])<<8 | + uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | + uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (r *debugLogReader) peek() (tick uint64) { + // Consume any sync records. + size := uint64(0) + for size == 0 { + if r.begin+debugLogHeaderSize > r.end { + return ^uint64(0) + } + size = uint64(r.readUint16LEAt(r.begin)) + if size != 0 { + break + } + if r.begin+debugLogSyncSize > r.end { + return ^uint64(0) + } + // Sync packet. + r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) + r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) + r.begin += debugLogSyncSize + } + + // Peek tick delta. + if r.begin+size > r.end { + return ^uint64(0) + } + pos := r.begin + debugLogHeaderSize + var u uint64 + for i := uint(0); ; i += 7 { + b := r.data.b[pos%uint64(len(r.data.b))] + pos++ + u |= uint64(b&^0x80) << i + if b&0x80 == 0 { + break + } + } + if pos > r.begin+size { + return ^uint64(0) + } + return r.tick + u +} + +func (r *debugLogReader) header() (end, tick, nano uint64, p int) { + // Read size. We've already skipped sync packets and checked + // bounds in peek. + size := uint64(r.readUint16LEAt(r.begin)) + end = r.begin + size + r.begin += debugLogHeaderSize + + // Read tick, nano, and p. + tick = r.uvarint() + r.tick + nano = r.uvarint() + r.nano + p = int(r.varint()) + + return +} + +func (r *debugLogReader) uvarint() uint64 { + var u uint64 + for i := uint(0); ; i += 7 { + b := r.data.b[r.begin%uint64(len(r.data.b))] + r.begin++ + u |= uint64(b&^0x80) << i + if b&0x80 == 0 { + break + } + } + return u +} + +func (r *debugLogReader) varint() int64 { + u := r.uvarint() + var v int64 + if u&1 == 0 { + v = int64(u >> 1) + } else { + v = ^int64(u >> 1) + } + return v +} + +func (r *debugLogReader) printVal() bool { + typ := r.data.b[r.begin%uint64(len(r.data.b))] + r.begin++ + + switch typ { + default: + print("\n") + return false + + case debugLogUnknown: + print("") + + case debugLogBoolTrue: + print(true) + + case debugLogBoolFalse: + print(false) + + case debugLogInt: + print(r.varint()) + + case debugLogUint: + print(r.uvarint()) + + case debugLogHex, debugLogPtr: + print(hex(r.uvarint())) + + case debugLogString: + sl := r.uvarint() + if r.begin+sl > r.end { + r.begin = r.end + print("") + break + } + for sl > 0 { + b := r.data.b[r.begin%uint64(len(r.data.b)):] + if uint64(len(b)) > sl { + b = b[:sl] + } + r.begin += uint64(len(b)) + sl -= uint64(len(b)) + gwrite(b) + } + + case debugLogConstString: + len, ptr := int(r.uvarint()), uintptr(r.uvarint()) + ptr += firstmoduledata.etext + // We can't use unsafe.String as it may panic, which isn't safe + // in this (potentially) nowritebarrier context. + str := stringStruct{ + str: unsafe.Pointer(ptr), + len: len, + } + s := *(*string)(unsafe.Pointer(&str)) + print(s) + + case debugLogStringOverflow: + print("..(", r.uvarint(), " more bytes)..") + + case debugLogPC: + printDebugLogPC(uintptr(r.uvarint()), false) + + case debugLogTraceback: + n := int(r.uvarint()) + for i := 0; i < n; i++ { + print("\n\t") + // gentraceback PCs are always return PCs. + // Convert them to call PCs. + // + // TODO(austin): Expand inlined frames. + printDebugLogPC(uintptr(r.uvarint()), true) + } + } + + return true +} + +// printDebugLog prints the debug log. +func printDebugLog() { + if !dlogEnabled { + return + } + + // This function should not panic or throw since it is used in + // the fatal panic path and this may deadlock. + + printlock() + + // Get the list of all debug logs. + allp := (*uintptr)(unsafe.Pointer(&allDloggers)) + all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) + + // Count the logs. + n := 0 + for l := all; l != nil; l = l.allLink { + n++ + } + if n == 0 { + printunlock() + return + } + + // Prepare read state for all logs. + type readState struct { + debugLogReader + first bool + lost uint64 + nextTick uint64 + } + // Use sysAllocOS instead of sysAlloc because we want to interfere + // with the runtime as little as possible, and sysAlloc updates accounting. + state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n)) + if state1 == nil { + println("failed to allocate read state for", n, "logs") + printunlock() + return + } + state := (*[1 << 20]readState)(state1)[:n] + { + l := all + for i := range state { + s := &state[i] + s.debugLogReader = l.w.r + s.first = true + s.lost = l.w.r.begin + s.nextTick = s.peek() + l = l.allLink + } + } + + // Print records. + for { + // Find the next record. + var best struct { + tick uint64 + i int + } + best.tick = ^uint64(0) + for i := range state { + if state[i].nextTick < best.tick { + best.tick = state[i].nextTick + best.i = i + } + } + if best.tick == ^uint64(0) { + break + } + + // Print record. + s := &state[best.i] + if s.first { + print(">> begin log ", best.i) + if s.lost != 0 { + print("; lost first ", s.lost>>10, "KB") + } + print(" <<\n") + s.first = false + } + + end, _, nano, p := s.header() + oldEnd := s.end + s.end = end + + print("[") + var tmpbuf [21]byte + pnano := int64(nano) - runtimeInitTime + if pnano < 0 { + // Logged before runtimeInitTime was set. + pnano = 0 + } + pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9) + print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes))) + print(" P ", p, "] ") + + for i := 0; s.begin < s.end; i++ { + if i > 0 { + print(" ") + } + if !s.printVal() { + // Abort this P log. + print("") + end = oldEnd + break + } + } + println() + + // Move on to the next record. + s.begin = end + s.end = oldEnd + s.nextTick = s.peek() + } + + printunlock() +} + +// printDebugLogPC prints a single symbolized PC. If returnPC is true, +// pc is a return PC that must first be converted to a call PC. +func printDebugLogPC(pc uintptr, returnPC bool) { + fn := findfunc(pc) + if returnPC && (!fn.valid() || pc > fn.entry()) { + // TODO(austin): Don't back up if the previous frame + // was a sigpanic. + pc-- + } + + print(hex(pc)) + if !fn.valid() { + print(" [unknown PC]") + } else { + name := funcname(fn) + file, line := funcline(fn, pc) + print(" [", name, "+", hex(pc-fn.entry()), + " ", file, ":", line, "]") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debuglog_off.go b/platform/dbops/binaries/go/go/src/runtime/debuglog_off.go new file mode 100644 index 0000000000000000000000000000000000000000..fa3be39c70f83449c279caa070bc07aaa6f718c4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debuglog_off.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !debuglog + +package runtime + +const dlogEnabled = false + +type dlogPerM struct{} + +func getCachedDlogger() *dlogger { + return nil +} + +func putCachedDlogger(l *dlogger) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debuglog_on.go b/platform/dbops/binaries/go/go/src/runtime/debuglog_on.go new file mode 100644 index 0000000000000000000000000000000000000000..b8150202251b36eeda9d16244e26da2aee0bab08 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debuglog_on.go @@ -0,0 +1,45 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build debuglog + +package runtime + +const dlogEnabled = true + +// dlogPerM is the per-M debug log data. This is embedded in the m +// struct. +type dlogPerM struct { + dlogCache *dlogger +} + +// getCachedDlogger returns a cached dlogger if it can do so +// efficiently, or nil otherwise. The returned dlogger will be owned. +func getCachedDlogger() *dlogger { + mp := acquirem() + // We don't return a cached dlogger if we're running on the + // signal stack in case the signal arrived while in + // get/putCachedDlogger. (Too bad we don't have non-atomic + // exchange!) + var l *dlogger + if getg() != mp.gsignal { + l = mp.dlogCache + mp.dlogCache = nil + } + releasem(mp) + return l +} + +// putCachedDlogger attempts to return l to the local cache. It +// returns false if this fails. +func putCachedDlogger(l *dlogger) bool { + mp := acquirem() + if getg() != mp.gsignal && mp.dlogCache == nil { + mp.dlogCache = l + releasem(mp) + return true + } + releasem(mp) + return false +} diff --git a/platform/dbops/binaries/go/go/src/runtime/debuglog_test.go b/platform/dbops/binaries/go/go/src/runtime/debuglog_test.go new file mode 100644 index 0000000000000000000000000000000000000000..18c54a81b93642a144fbca91ceeffff17570a690 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/debuglog_test.go @@ -0,0 +1,169 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(austin): All of these tests are skipped if the debuglog build +// tag isn't provided. That means we basically never test debuglog. +// There are two potential ways around this: +// +// 1. Make these tests re-build the runtime test with the debuglog +// build tag and re-invoke themselves. +// +// 2. Always build the whole debuglog infrastructure and depend on +// linker dead-code elimination to drop it. This is easy for dlog() +// since there won't be any calls to it. For printDebugLog, we can +// make panic call a wrapper that is call printDebugLog if the +// debuglog build tag is set, or otherwise do nothing. Then tests +// could call printDebugLog directly. This is the right answer in +// principle, but currently our linker reads in all symbols +// regardless, so this would slow down and bloat all links. If the +// linker gets more efficient about this, we should revisit this +// approach. + +package runtime_test + +import ( + "fmt" + "internal/testenv" + "regexp" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" +) + +func skipDebugLog(t *testing.T) { + if !runtime.DlogEnabled { + t.Skip("debug log disabled (rebuild with -tags debuglog)") + } +} + +func dlogCanonicalize(x string) string { + begin := regexp.MustCompile(`(?m)^>> begin log \d+ <<\n`) + x = begin.ReplaceAllString(x, "") + prefix := regexp.MustCompile(`(?m)^\[[^]]+\]`) + x = prefix.ReplaceAllString(x, "[]") + return x +} + +func TestDebugLog(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + runtime.Dlog().S("testing").End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + if want := "[] testing\n"; got != want { + t.Fatalf("want %q, got %q", want, got) + } +} + +func TestDebugLogTypes(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + var varString = strings.Repeat("a", 4) + runtime.Dlog().B(true).B(false).I(-42).I16(0x7fff).U64(^uint64(0)).Hex(0xfff).P(nil).S(varString).S("const string").End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + if want := "[] true false -42 32767 18446744073709551615 0xfff 0x0 aaaa const string\n"; got != want { + t.Fatalf("want %q, got %q", want, got) + } +} + +func TestDebugLogSym(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + pc, _, _, _ := runtime.Caller(0) + runtime.Dlog().PC(pc).End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + want := regexp.MustCompile(`\[\] 0x[0-9a-f]+ \[runtime_test\.TestDebugLogSym\+0x[0-9a-f]+ .*/debuglog_test\.go:[0-9]+\]\n`) + if !want.MatchString(got) { + t.Fatalf("want matching %s, got %q", want, got) + } +} + +func TestDebugLogInterleaving(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + var wg sync.WaitGroup + done := int32(0) + wg.Add(1) + go func() { + // Encourage main goroutine to move around to + // different Ms and Ps. + for atomic.LoadInt32(&done) == 0 { + runtime.Gosched() + } + wg.Done() + }() + var want strings.Builder + for i := 0; i < 1000; i++ { + runtime.Dlog().I(i).End() + fmt.Fprintf(&want, "[] %d\n", i) + runtime.Gosched() + } + atomic.StoreInt32(&done, 1) + wg.Wait() + + gotFull := runtime.DumpDebugLog() + got := dlogCanonicalize(gotFull) + if got != want.String() { + // Since the timestamps are useful in understand + // failures of this test, we print the uncanonicalized + // output. + t.Fatalf("want %q, got (uncanonicalized) %q", want.String(), gotFull) + } +} + +func TestDebugLogWraparound(t *testing.T) { + skipDebugLog(t) + + // Make sure we don't switch logs so it's easier to fill one up. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + runtime.ResetDebugLog() + var longString = strings.Repeat("a", 128) + var want strings.Builder + for i, j := 0, 0; j < 2*runtime.DebugLogBytes; i, j = i+1, j+len(longString) { + runtime.Dlog().I(i).S(longString).End() + fmt.Fprintf(&want, "[] %d %s\n", i, longString) + } + log := runtime.DumpDebugLog() + + // Check for "lost" message. + lost := regexp.MustCompile(`^>> begin log \d+; lost first \d+KB <<\n`) + if !lost.MatchString(log) { + t.Fatalf("want matching %s, got %q", lost, log) + } + idx := lost.FindStringIndex(log) + // Strip lost message. + log = dlogCanonicalize(log[idx[1]:]) + + // Check log. + if !strings.HasSuffix(want.String(), log) { + t.Fatalf("wrong suffix:\n%s", log) + } +} + +func TestDebugLogLongString(t *testing.T) { + skipDebugLog(t) + + runtime.ResetDebugLog() + var longString = strings.Repeat("a", runtime.DebugLogStringLimit+1) + runtime.Dlog().S(longString).End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + want := "[] " + strings.Repeat("a", runtime.DebugLogStringLimit) + " ..(1 more bytes)..\n" + if got != want { + t.Fatalf("want %q, got %q", want, got) + } +} + +// TestDebugLogBuild verifies that the runtime builds with -tags=debuglog. +func TestDebugLogBuild(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // It doesn't matter which program we build, anything will rebuild the + // runtime. + if _, err := buildTestProg(t, "testprog", "-tags=debuglog"); err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defer_test.go b/platform/dbops/binaries/go/go/src/runtime/defer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d73202ae6a01091116d06a8a861cd8bd508b5729 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defer_test.go @@ -0,0 +1,517 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "reflect" + "runtime" + "testing" +) + +// Make sure open-coded defer exit code is not lost, even when there is an +// unconditional panic (hence no return from the function) +func TestUnconditionalPanic(t *testing.T) { + defer func() { + if recover() != "testUnconditional" { + t.Fatal("expected unconditional panic") + } + }() + panic("testUnconditional") +} + +var glob int = 3 + +// Test an open-coded defer and non-open-coded defer - make sure both defers run +// and call recover() +func TestOpenAndNonOpenDefers(t *testing.T) { + for { + // Non-open defer because in a loop + defer func(n int) { + if recover() != "testNonOpenDefer" { + t.Fatal("expected testNonOpen panic") + } + }(3) + if glob > 2 { + break + } + } + testOpen(t, 47) + panic("testNonOpenDefer") +} + +//go:noinline +func testOpen(t *testing.T, arg int) { + defer func(n int) { + if recover() != "testOpenDefer" { + t.Fatal("expected testOpen panic") + } + }(4) + if arg > 2 { + panic("testOpenDefer") + } +} + +// Test a non-open-coded defer and an open-coded defer - make sure both defers run +// and call recover() +func TestNonOpenAndOpenDefers(t *testing.T) { + testOpen(t, 47) + for { + // Non-open defer because in a loop + defer func(n int) { + if recover() != "testNonOpenDefer" { + t.Fatal("expected testNonOpen panic") + } + }(3) + if glob > 2 { + break + } + } + panic("testNonOpenDefer") +} + +var list []int + +// Make sure that conditional open-coded defers are activated correctly and run in +// the correct order. +func TestConditionalDefers(t *testing.T) { + list = make([]int, 0, 10) + + defer func() { + if recover() != "testConditional" { + t.Fatal("expected panic") + } + want := []int{4, 2, 1} + if !reflect.DeepEqual(want, list) { + t.Fatalf("wanted %v, got %v", want, list) + } + + }() + testConditionalDefers(8) +} + +func testConditionalDefers(n int) { + doappend := func(i int) { + list = append(list, i) + } + + defer doappend(1) + if n > 5 { + defer doappend(2) + if n > 8 { + defer doappend(3) + } else { + defer doappend(4) + } + } + panic("testConditional") +} + +// Test that there is no compile-time or run-time error if an open-coded defer +// call is removed by constant propagation and dead-code elimination. +func TestDisappearingDefer(t *testing.T) { + switch runtime.GOOS { + case "invalidOS": + defer func() { + t.Fatal("Defer shouldn't run") + }() + } +} + +// This tests an extra recursive panic behavior that is only specified in the +// code. Suppose a first panic P1 happens and starts processing defer calls. If a +// second panic P2 happens while processing defer call D in frame F, then defer +// call processing is restarted (with some potentially new defer calls created by +// D or its callees). If the defer processing reaches the started defer call D +// again in the defer stack, then the original panic P1 is aborted and cannot +// continue panic processing or be recovered. If the panic P2 does a recover at +// some point, it will naturally remove the original panic P1 from the stack +// (since the original panic had to be in frame F or a descendant of F). +func TestAbortedPanic(t *testing.T) { + defer func() { + r := recover() + if r != nil { + t.Fatalf("wanted nil recover, got %v", r) + } + }() + defer func() { + r := recover() + if r != "panic2" { + t.Fatalf("wanted %v, got %v", "panic2", r) + } + }() + defer func() { + panic("panic2") + }() + panic("panic1") +} + +// This tests that recover() does not succeed unless it is called directly from a +// defer function that is directly called by the panic. Here, we first call it +// from a defer function that is created by the defer function called directly by +// the panic. In +func TestRecoverMatching(t *testing.T) { + defer func() { + r := recover() + if r != "panic1" { + t.Fatalf("wanted %v, got %v", "panic1", r) + } + }() + defer func() { + defer func() { + // Shouldn't succeed, even though it is called directly + // from a defer function, since this defer function was + // not directly called by the panic. + r := recover() + if r != nil { + t.Fatalf("wanted nil recover, got %v", r) + } + }() + }() + panic("panic1") +} + +type nonSSAable [128]byte + +type bigStruct struct { + x, y, z, w, p, q int64 +} + +type containsBigStruct struct { + element bigStruct +} + +func mknonSSAable() nonSSAable { + globint1++ + return nonSSAable{0, 0, 0, 0, 5} +} + +var globint1, globint2, globint3 int + +//go:noinline +func sideeffect(n int64) int64 { + globint2++ + return n +} + +func sideeffect2(in containsBigStruct) containsBigStruct { + globint3++ + return in +} + +// Test that nonSSAable arguments to defer are handled correctly and only evaluated once. +func TestNonSSAableArgs(t *testing.T) { + globint1 = 0 + globint2 = 0 + globint3 = 0 + var save1 byte + var save2 int64 + var save3 int64 + var save4 int64 + + defer func() { + if globint1 != 1 { + t.Fatalf("globint1: wanted: 1, got %v", globint1) + } + if save1 != 5 { + t.Fatalf("save1: wanted: 5, got %v", save1) + } + if globint2 != 1 { + t.Fatalf("globint2: wanted: 1, got %v", globint2) + } + if save2 != 2 { + t.Fatalf("save2: wanted: 2, got %v", save2) + } + if save3 != 4 { + t.Fatalf("save3: wanted: 4, got %v", save3) + } + if globint3 != 1 { + t.Fatalf("globint3: wanted: 1, got %v", globint3) + } + if save4 != 4 { + t.Fatalf("save1: wanted: 4, got %v", save4) + } + }() + + // Test function returning a non-SSAable arg + defer func(n nonSSAable) { + save1 = n[4] + }(mknonSSAable()) + // Test composite literal that is not SSAable + defer func(b bigStruct) { + save2 = b.y + }(bigStruct{1, 2, 3, 4, 5, sideeffect(6)}) + + // Test struct field reference that is non-SSAable + foo := containsBigStruct{} + foo.element.z = 4 + defer func(element bigStruct) { + save3 = element.z + }(foo.element) + defer func(element bigStruct) { + save4 = element.z + }(sideeffect2(foo).element) +} + +//go:noinline +func doPanic() { + panic("Test panic") +} + +func TestDeferForFuncWithNoExit(t *testing.T) { + cond := 1 + defer func() { + if cond != 2 { + t.Fatalf("cond: wanted 2, got %v", cond) + } + if recover() != "Test panic" { + t.Fatal("Didn't find expected panic") + } + }() + x := 0 + // Force a stack copy, to make sure that the &cond pointer passed to defer + // function is properly updated. + growStackIter(&x, 1000) + cond = 2 + doPanic() + + // This function has no exit/return, since it ends with an infinite loop + for { + } +} + +// Test case approximating issue #37664, where a recursive function (interpreter) +// may do repeated recovers/re-panics until it reaches the frame where the panic +// can actually be handled. The recurseFnPanicRec() function is testing that there +// are no stale defer structs on the defer chain after the interpreter() sequence, +// by writing a bunch of 0xffffffffs into several recursive stack frames, and then +// doing a single panic-recover which would invoke any such stale defer structs. +func TestDeferWithRepeatedRepanics(t *testing.T) { + interpreter(0, 6, 2) + recurseFnPanicRec(0, 10) + interpreter(0, 5, 1) + recurseFnPanicRec(0, 10) + interpreter(0, 6, 3) + recurseFnPanicRec(0, 10) +} + +func interpreter(level int, maxlevel int, rec int) { + defer func() { + e := recover() + if e == nil { + return + } + if level != e.(int) { + //fmt.Fprintln(os.Stderr, "re-panicing, level", level) + panic(e) + } + //fmt.Fprintln(os.Stderr, "Recovered, level", level) + }() + if level+1 < maxlevel { + interpreter(level+1, maxlevel, rec) + } else { + //fmt.Fprintln(os.Stderr, "Initiating panic") + panic(rec) + } +} + +func recurseFnPanicRec(level int, maxlevel int) { + defer func() { + recover() + }() + recurseFn(level, maxlevel) +} + +var saveInt uint32 + +func recurseFn(level int, maxlevel int) { + a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff} + if level+1 < maxlevel { + // Make sure a array is referenced, so it is not optimized away + saveInt = a[4] + recurseFn(level+1, maxlevel) + } else { + panic("recurseFn panic") + } +} + +// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is +// mistakenly held, and that struct keeps a pointer to a stack-allocated defer +// struct, and that stack-allocated struct gets overwritten or the stack gets +// moved, so a memory error happens on GC. +func TestIssue37688(t *testing.T) { + for j := 0; j < 10; j++ { + g2() + g3() + } +} + +type foo struct { +} + +//go:noinline +func (f *foo) method1() { +} + +//go:noinline +func (f *foo) method2() { +} + +func g2() { + var a foo + ap := &a + // The loop forces this defer to be heap-allocated and the remaining two + // to be stack-allocated. + for i := 0; i < 1; i++ { + defer ap.method1() + } + defer ap.method2() + defer ap.method1() + ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9) + // Try to get the stack to be moved by growing it too large, so + // existing stack-allocated defer becomes invalid. + rec1(2000) +} + +func g3() { + // Mix up the stack layout by adding in an extra function frame + g2() +} + +var globstruct struct { + a, b, c, d, e, f, g, h, i int +} + +func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) { + defer ap.method1() + + // Make a defer that has a very large set of args, hence big size for the + // defer record for the open-coded frame (which means it won't use the + // defer pool) + defer func(ap *foo, a, b, c, d, e, f, g, h, i int) { + if v := recover(); v != nil { + } + globstruct.a = a + globstruct.b = b + globstruct.c = c + globstruct.d = d + globstruct.e = e + globstruct.f = f + globstruct.g = g + globstruct.h = h + }(ap, a, b, c, d, e, f, g, h, i) + panic("ff1 panic") +} + +func rec1(max int) { + if max > 0 { + rec1(max - 1) + } +} + +func TestIssue43921(t *testing.T) { + defer func() { + expect(t, 1, recover()) + }() + func() { + // Prevent open-coded defers + for { + defer func() {}() + break + } + + defer func() { + defer func() { + expect(t, 4, recover()) + }() + panic(4) + }() + panic(1) + + }() +} + +func expect(t *testing.T, n int, err any) { + if n != err { + t.Fatalf("have %v, want %v", err, n) + } +} + +func TestIssue43920(t *testing.T) { + var steps int + + defer func() { + expect(t, 1, recover()) + }() + defer func() { + defer func() { + defer func() { + expect(t, 5, recover()) + }() + defer panic(5) + func() { + panic(4) + }() + }() + defer func() { + expect(t, 3, recover()) + }() + defer panic(3) + }() + func() { + defer step(t, &steps, 1) + panic(1) + }() +} + +func step(t *testing.T, steps *int, want int) { + *steps++ + if *steps != want { + t.Fatalf("have %v, want %v", *steps, want) + } +} + +func TestIssue43941(t *testing.T) { + var steps int = 7 + defer func() { + step(t, &steps, 14) + expect(t, 4, recover()) + }() + func() { + func() { + defer func() { + defer func() { + expect(t, 3, recover()) + }() + defer panic(3) + panic(2) + }() + defer func() { + expect(t, 1, recover()) + }() + defer panic(1) + }() + defer func() {}() + defer func() {}() + defer step(t, &steps, 10) + defer step(t, &steps, 9) + step(t, &steps, 8) + }() + func() { + defer step(t, &steps, 13) + defer step(t, &steps, 12) + func() { + defer step(t, &steps, 11) + panic(4) + }() + + // Code below isn't executed, + // but removing it breaks the test case. + defer func() {}() + defer panic(-1) + defer step(t, &steps, -1) + defer step(t, &steps, -1) + defer func() {}() + }() +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_linux.go b/platform/dbops/binaries/go/go/src/runtime/defs1_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..709f19e599910d2f8035a9956ef422cbdfa6f375 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_linux.go @@ -0,0 +1,40 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo -cdefs + +GOARCH=amd64 cgo -cdefs defs.go defs1.go >amd64/defs.h +*/ + +package runtime + +/* +#include +#include +#include +*/ +import "C" + +const ( + O_RDONLY = C.O_RDONLY + O_NONBLOCK = C.O_NONBLOCK + O_CLOEXEC = C.O_CLOEXEC + SA_RESTORER = C.SA_RESTORER +) + +type Usigset C.__sigset_t +type Fpxreg C.struct__libc_fpxreg +type Xmmreg C.struct__libc_xmmreg +type Fpstate C.struct__libc_fpstate +type Fpxreg1 C.struct__fpxreg +type Xmmreg1 C.struct__xmmreg +type Fpstate1 C.struct__fpstate +type Fpreg1 C.struct__fpreg +type StackT C.stack_t +type Mcontext C.mcontext_t +type Ucontext C.ucontext_t +type Sigcontext C.struct_sigcontext diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_386.go b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_386.go new file mode 100644 index 0000000000000000000000000000000000000000..f7fe45b4ab64924ee704bd96dd7200d6c3a2be70 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_386.go @@ -0,0 +1,183 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_386.go + +package runtime + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x400000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = 0x0 + _EVFILT_WRITE = 0x1 +) + +type sigset struct { + __bits [4]uint32 +} + +type siginfo struct { + _signo int32 + _code int32 + _errno int32 + _reason [20]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) +} + +type timeval struct { + tv_sec int64 + tv_usec int32 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type mcontextt struct { + __gregs [19]uint32 + __fpregs [644]byte + _mc_tlsbase int32 +} + +type ucontextt struct { + uc_flags uint32 + uc_link *ucontextt + uc_sigmask sigset + uc_stack stackt + uc_mcontext mcontextt + __uc_pad [4]int32 +} + +type keventt struct { + ident uint32 + filter uint32 + flags uint32 + fflags uint32 + data int64 + udata *byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_386.go + +const ( + _REG_GS = 0x0 + _REG_FS = 0x1 + _REG_ES = 0x2 + _REG_DS = 0x3 + _REG_EDI = 0x4 + _REG_ESI = 0x5 + _REG_EBP = 0x6 + _REG_ESP = 0x7 + _REG_EBX = 0x8 + _REG_EDX = 0x9 + _REG_ECX = 0xa + _REG_EAX = 0xb + _REG_TRAPNO = 0xc + _REG_ERR = 0xd + _REG_EIP = 0xe + _REG_CS = 0xf + _REG_EFL = 0x10 + _REG_UESP = 0x11 + _REG_SS = 0x12 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..80908cd93185c2d4d598bf87b9e68acf88802fb1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_amd64.go @@ -0,0 +1,195 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go + +package runtime + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x400000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = 0x0 + _EVFILT_WRITE = 0x1 +) + +type sigset struct { + __bits [4]uint32 +} + +type siginfo struct { + _signo int32 + _code int32 + _errno int32 + _pad int32 + _reason [24]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type mcontextt struct { + __gregs [26]uint64 + _mc_tlsbase uint64 + __fpregs [512]int8 +} + +type ucontextt struct { + uc_flags uint32 + pad_cgo_0 [4]byte + uc_link *ucontextt + uc_sigmask sigset + uc_stack stackt + uc_mcontext mcontextt +} + +type keventt struct { + ident uint64 + filter uint32 + flags uint32 + fflags uint32 + pad_cgo_0 [4]byte + data int64 + udata *byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go + +const ( + _REG_RDI = 0x0 + _REG_RSI = 0x1 + _REG_RDX = 0x2 + _REG_RCX = 0x3 + _REG_R8 = 0x4 + _REG_R9 = 0x5 + _REG_R10 = 0x6 + _REG_R11 = 0x7 + _REG_R12 = 0x8 + _REG_R13 = 0x9 + _REG_R14 = 0xa + _REG_R15 = 0xb + _REG_RBP = 0xc + _REG_RBX = 0xd + _REG_RAX = 0xe + _REG_GS = 0xf + _REG_FS = 0x10 + _REG_ES = 0x11 + _REG_DS = 0x12 + _REG_TRAPNO = 0x13 + _REG_ERR = 0x14 + _REG_RIP = 0x15 + _REG_CS = 0x16 + _REG_RFLAGS = 0x17 + _REG_RSP = 0x18 + _REG_SS = 0x19 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..c63e592ff14573c10df284c04ac31103b962c299 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm.go @@ -0,0 +1,188 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go + +package runtime + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x400000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = 0x0 + _EVFILT_WRITE = 0x1 +) + +type sigset struct { + __bits [4]uint32 +} + +type siginfo struct { + _signo int32 + _code int32 + _errno int32 + _reason uintptr + _reasonx [16]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int32 + _ [4]byte // EABI +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + _ [4]byte // EABI +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type mcontextt struct { + __gregs [17]uint32 + _ [4]byte // EABI + __fpu [272]byte // EABI + _mc_tlsbase uint32 + _ [4]byte // EABI +} + +type ucontextt struct { + uc_flags uint32 + uc_link *ucontextt + uc_sigmask sigset + uc_stack stackt + _ [4]byte // EABI + uc_mcontext mcontextt + __uc_pad [2]int32 +} + +type keventt struct { + ident uint32 + filter uint32 + flags uint32 + fflags uint32 + data int64 + udata *byte + _ [4]byte // EABI +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go + +const ( + _REG_R0 = 0x0 + _REG_R1 = 0x1 + _REG_R2 = 0x2 + _REG_R3 = 0x3 + _REG_R4 = 0x4 + _REG_R5 = 0x5 + _REG_R6 = 0x6 + _REG_R7 = 0x7 + _REG_R8 = 0x8 + _REG_R9 = 0x9 + _REG_R10 = 0xa + _REG_R11 = 0xb + _REG_R12 = 0xc + _REG_R13 = 0xd + _REG_R14 = 0xe + _REG_R15 = 0xf + _REG_CPSR = 0x10 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..804b5b0b3f6738360d1288674acdb635b60e37f2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_netbsd_arm64.go @@ -0,0 +1,203 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go + +package runtime + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x400000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = 0x0 + _EVFILT_WRITE = 0x1 +) + +type sigset struct { + __bits [4]uint32 +} + +type siginfo struct { + _signo int32 + _code int32 + _errno int32 + _reason uintptr + _reasonx [16]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + _ [4]byte // EABI +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type mcontextt struct { + __gregs [35]uint64 + __fregs [4160]byte // _NFREG * 128 + 32 + 32 + _ [8]uint64 // future use +} + +type ucontextt struct { + uc_flags uint32 + uc_link *ucontextt + uc_sigmask sigset + uc_stack stackt + _ [4]byte // EABI + uc_mcontext mcontextt + __uc_pad [2]int32 +} + +type keventt struct { + ident uint64 + filter uint32 + flags uint32 + fflags uint32 + pad_cgo_0 [4]byte + data int64 + udata *byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go + +const ( + _REG_X0 = 0 + _REG_X1 = 1 + _REG_X2 = 2 + _REG_X3 = 3 + _REG_X4 = 4 + _REG_X5 = 5 + _REG_X6 = 6 + _REG_X7 = 7 + _REG_X8 = 8 + _REG_X9 = 9 + _REG_X10 = 10 + _REG_X11 = 11 + _REG_X12 = 12 + _REG_X13 = 13 + _REG_X14 = 14 + _REG_X15 = 15 + _REG_X16 = 16 + _REG_X17 = 17 + _REG_X18 = 18 + _REG_X19 = 19 + _REG_X20 = 20 + _REG_X21 = 21 + _REG_X22 = 22 + _REG_X23 = 23 + _REG_X24 = 24 + _REG_X25 = 25 + _REG_X26 = 26 + _REG_X27 = 27 + _REG_X28 = 28 + _REG_X29 = 29 + _REG_X30 = 30 + _REG_X31 = 31 + _REG_ELR = 32 + _REG_SPSR = 33 + _REG_TPIDR = 34 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs1_solaris_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs1_solaris_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..9ebe5bb03e5f0df1a1a6efd4b6a086fe710d670a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs1_solaris_amd64.go @@ -0,0 +1,250 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_solaris.go defs_solaris_amd64.go + +package runtime + +const ( + _EINTR = 0x4 + _EBADF = 0x9 + _EFAULT = 0xe + _EAGAIN = 0xb + _EBUSY = 0x10 + _ETIME = 0x3e + _ETIMEDOUT = 0x91 + _EWOULDBLOCK = 0xb + _EINPROGRESS = 0x96 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x100 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x8 + _SA_RESTART = 0x4 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x15 + _SIGSTOP = 0x17 + _SIGTSTP = 0x18 + _SIGCONT = 0x19 + _SIGCHLD = 0x12 + _SIGTTIN = 0x1a + _SIGTTOU = 0x1b + _SIGIO = 0x16 + _SIGXCPU = 0x1e + _SIGXFSZ = 0x1f + _SIGVTALRM = 0x1c + _SIGPROF = 0x1d + _SIGWINCH = 0x14 + _SIGUSR1 = 0x10 + _SIGUSR2 = 0x11 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + __SC_PAGESIZE = 0xb + __SC_NPROCESSORS_ONLN = 0xf + + _PTHREAD_CREATE_DETACHED = 0x40 + + _FORK_NOSIGCHLD = 0x1 + _FORK_WAITPID = 0x2 + + _MAXHOSTNAMELEN = 0x100 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x80 + _O_TRUNC = 0x200 + _O_CREAT = 0x100 + _O_CLOEXEC = 0x800000 + + _POLLIN = 0x1 + _POLLOUT = 0x4 + _POLLHUP = 0x10 + _POLLERR = 0x8 + + _PORT_SOURCE_FD = 0x4 + _PORT_SOURCE_ALERT = 0x5 + _PORT_ALERT_UPDATE = 0x2 +) + +type semt struct { + sem_count uint32 + sem_type uint16 + sem_magic uint16 + sem_pad1 [3]uint64 + sem_pad2 [2]uint64 +} + +type sigset struct { + __sigbits [4]uint32 +} + +type stackt struct { + ss_sp *byte + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + si_pad int32 + __data [240]byte +} + +type sigactiont struct { + sa_flags int32 + pad_cgo_0 [4]byte + _funcptr [8]byte + sa_mask sigset +} + +type fpregset struct { + fp_reg_set [528]byte +} + +type mcontext struct { + gregs [28]int64 + fpregs fpregset +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_sigmask sigset + uc_stack stackt + pad_cgo_0 [8]byte + uc_mcontext mcontext + uc_filler [5]int64 + pad_cgo_1 [8]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type portevent struct { + portev_events int32 + portev_source uint16 + portev_pad uint16 + portev_object uint64 + portev_user *byte +} + +type pthread uint32 +type pthreadattr struct { + __pthread_attrp *byte +} + +type stat struct { + st_dev uint64 + st_ino uint64 + st_mode uint32 + st_nlink uint32 + st_uid uint32 + st_gid uint32 + st_rdev uint64 + st_size int64 + st_atim timespec + st_mtim timespec + st_ctim timespec + st_blksize int32 + pad_cgo_0 [4]byte + st_blocks int64 + st_fstype [16]int8 +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_solaris.go defs_solaris_amd64.go + +const ( + _REG_RDI = 0x8 + _REG_RSI = 0x9 + _REG_RDX = 0xc + _REG_RCX = 0xd + _REG_R8 = 0x7 + _REG_R9 = 0x6 + _REG_R10 = 0x5 + _REG_R11 = 0x4 + _REG_R12 = 0x3 + _REG_R13 = 0x2 + _REG_R14 = 0x1 + _REG_R15 = 0x0 + _REG_RBP = 0xa + _REG_RBX = 0xb + _REG_RAX = 0xe + _REG_GS = 0x17 + _REG_FS = 0x16 + _REG_ES = 0x18 + _REG_DS = 0x19 + _REG_TRAPNO = 0xf + _REG_ERR = 0x10 + _REG_RIP = 0x11 + _REG_CS = 0x12 + _REG_RFLAGS = 0x13 + _REG_RSP = 0x14 + _REG_SS = 0x15 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs2_linux.go b/platform/dbops/binaries/go/go/src/runtime/defs2_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..5d6730a7ad7fd8cf981d294e7c84f4eab148d986 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs2_linux.go @@ -0,0 +1,138 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* + * Input to cgo -cdefs + +GOARCH=386 go tool cgo -cdefs defs2_linux.go >defs_linux_386.h + +The asm header tricks we have to use for Linux on amd64 +(see defs.c and defs1.c) don't work here, so this is yet another +file. Sigh. +*/ + +package runtime + +/* +#cgo CFLAGS: -I/tmp/linux/arch/x86/include -I/tmp/linux/include -D_LOOSE_KERNEL_NAMES -D__ARCH_SI_UID_T=__kernel_uid32_t + +#define size_t __kernel_size_t +#define pid_t int +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This is the sigaction structure from the Linux 2.1.68 kernel which +// is used with the rt_sigaction system call. For 386 this is not +// defined in any public header file. + +struct kernel_sigaction { + __sighandler_t k_sa_handler; + unsigned long sa_flags; + void (*sa_restorer) (void); + unsigned long long sa_mask; +}; +*/ +import "C" + +const ( + EINTR = C.EINTR + EAGAIN = C.EAGAIN + ENOMEM = C.ENOMEM + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANONYMOUS + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE + + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + SA_RESTORER = C.SA_RESTORER + SA_SIGINFO = C.SA_SIGINFO + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGBUS = C.SIGBUS + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGUSR1 = C.SIGUSR1 + SIGSEGV = C.SIGSEGV + SIGUSR2 = C.SIGUSR2 + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGSTKFLT = C.SIGSTKFLT + SIGCHLD = C.SIGCHLD + SIGCONT = C.SIGCONT + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGURG = C.SIGURG + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGIO = C.SIGIO + SIGPWR = C.SIGPWR + SIGSYS = C.SIGSYS + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + O_RDONLY = C.O_RDONLY + O_CLOEXEC = C.O_CLOEXEC +) + +type Fpreg C.struct__fpreg +type Fpxreg C.struct__fpxreg +type Xmmreg C.struct__xmmreg +type Fpstate C.struct__fpstate +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Sigaction C.struct_kernel_sigaction +type Siginfo C.siginfo_t +type StackT C.stack_t +type Sigcontext C.struct_sigcontext +type Ucontext C.struct_ucontext +type Itimerval C.struct_itimerval +type EpollEvent C.struct_epoll_event diff --git a/platform/dbops/binaries/go/go/src/runtime/defs3_linux.go b/platform/dbops/binaries/go/go/src/runtime/defs3_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..99479aad06191ebb496c0bde0a5fa72c26c41f17 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs3_linux.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo -cdefs + +GOARCH=ppc64 cgo -cdefs defs_linux.go defs3_linux.go > defs_linux_ppc64.h +*/ + +package runtime + +/* +#define size_t __kernel_size_t +#define sigset_t __sigset_t // rename the sigset_t here otherwise cgo will complain about "inconsistent definitions for C.sigset_t" +#define _SYS_TYPES_H // avoid inclusion of sys/types.h +#include +#include +*/ +import "C" + +const ( + O_RDONLY = C.O_RDONLY + O_CLOEXEC = C.O_CLOEXEC + SA_RESTORER = 0 // unused +) + +type Usigset C.__sigset_t + +// types used in sigcontext +type Ptregs C.struct_pt_regs +type Gregset C.elf_gregset_t +type FPregset C.elf_fpregset_t +type Vreg C.elf_vrreg_t + +type StackT C.stack_t + +// PPC64 uses sigcontext in place of mcontext in ucontext. +// see https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/arch/powerpc/include/uapi/asm/ucontext.h +type Sigcontext C.struct_sigcontext +type Ucontext C.struct_ucontext diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_aix.go b/platform/dbops/binaries/go/go/src/runtime/defs_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..2f28e534689bb341e871d8f789f1ddacfead44d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_aix.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo -godefs +GOARCH=ppc64 go tool cgo -godefs defs_aix.go > defs_aix_ppc64_tmp.go + +This is only a helper to create defs_aix_ppc64.go +Go runtime functions require the "linux" name of fields (ss_sp, si_addr, etc) +However, AIX structures don't provide such names and must be modified. + +TODO(aix): create a script to automatise defs_aix creation. + +Modifications made: + - sigset replaced by a [4]uint64 array + - add sigset_all variable + - siginfo.si_addr uintptr instead of *byte + - add (*timeval) set_usec + - stackt.ss_sp uintptr instead of *byte + - stackt.ss_size uintptr instead of uint64 + - sigcontext.sc_jmpbuf context64 instead of jumbuf + - ucontext.__extctx is a uintptr because we don't need extctx struct + - ucontext.uc_mcontext: replace jumbuf structure by context64 structure + - sigaction.sa_handler represents union field as both are uintptr + - tstate.* replace *byte by uintptr + + +*/ + +package runtime + +/* + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +*/ +import "C" + +const ( + _EPERM = C.EPERM + _ENOENT = C.ENOENT + _EINTR = C.EINTR + _EAGAIN = C.EAGAIN + _ENOMEM = C.ENOMEM + _EACCES = C.EACCES + _EFAULT = C.EFAULT + _EINVAL = C.EINVAL + _ETIMEDOUT = C.ETIMEDOUT + + _PROT_NONE = C.PROT_NONE + _PROT_READ = C.PROT_READ + _PROT_WRITE = C.PROT_WRITE + _PROT_EXEC = C.PROT_EXEC + + _MAP_ANON = C.MAP_ANONYMOUS + _MAP_PRIVATE = C.MAP_PRIVATE + _MAP_FIXED = C.MAP_FIXED + _MADV_DONTNEED = C.MADV_DONTNEED + + _SIGHUP = C.SIGHUP + _SIGINT = C.SIGINT + _SIGQUIT = C.SIGQUIT + _SIGILL = C.SIGILL + _SIGTRAP = C.SIGTRAP + _SIGABRT = C.SIGABRT + _SIGBUS = C.SIGBUS + _SIGFPE = C.SIGFPE + _SIGKILL = C.SIGKILL + _SIGUSR1 = C.SIGUSR1 + _SIGSEGV = C.SIGSEGV + _SIGUSR2 = C.SIGUSR2 + _SIGPIPE = C.SIGPIPE + _SIGALRM = C.SIGALRM + _SIGCHLD = C.SIGCHLD + _SIGCONT = C.SIGCONT + _SIGSTOP = C.SIGSTOP + _SIGTSTP = C.SIGTSTP + _SIGTTIN = C.SIGTTIN + _SIGTTOU = C.SIGTTOU + _SIGURG = C.SIGURG + _SIGXCPU = C.SIGXCPU + _SIGXFSZ = C.SIGXFSZ + _SIGVTALRM = C.SIGVTALRM + _SIGPROF = C.SIGPROF + _SIGWINCH = C.SIGWINCH + _SIGIO = C.SIGIO + _SIGPWR = C.SIGPWR + _SIGSYS = C.SIGSYS + _SIGTERM = C.SIGTERM + _SIGEMT = C.SIGEMT + _SIGWAITING = C.SIGWAITING + + _FPE_INTDIV = C.FPE_INTDIV + _FPE_INTOVF = C.FPE_INTOVF + _FPE_FLTDIV = C.FPE_FLTDIV + _FPE_FLTOVF = C.FPE_FLTOVF + _FPE_FLTUND = C.FPE_FLTUND + _FPE_FLTRES = C.FPE_FLTRES + _FPE_FLTINV = C.FPE_FLTINV + _FPE_FLTSUB = C.FPE_FLTSUB + + _BUS_ADRALN = C.BUS_ADRALN + _BUS_ADRERR = C.BUS_ADRERR + _BUS_OBJERR = C.BUS_OBJERR + + _SEGV_MAPERR = C.SEGV_MAPERR + _SEGV_ACCERR = C.SEGV_ACCERR + + _ITIMER_REAL = C.ITIMER_REAL + _ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + _ITIMER_PROF = C.ITIMER_PROF + + _O_RDONLY = C.O_RDONLY + _O_WRONLY = C.O_WRONLY + _O_NONBLOCK = C.O_NONBLOCK + _O_CREAT = C.O_CREAT + _O_TRUNC = C.O_TRUNC + + _SS_DISABLE = C.SS_DISABLE + _SI_USER = C.SI_USER + _SIG_BLOCK = C.SIG_BLOCK + _SIG_UNBLOCK = C.SIG_UNBLOCK + _SIG_SETMASK = C.SIG_SETMASK + + _SA_SIGINFO = C.SA_SIGINFO + _SA_RESTART = C.SA_RESTART + _SA_ONSTACK = C.SA_ONSTACK + + _PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + __SC_PAGE_SIZE = C._SC_PAGE_SIZE + __SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN + + _F_SETFL = C.F_SETFL + _F_GETFD = C.F_GETFD + _F_GETFL = C.F_GETFL +) + +type sigset C.sigset_t +type siginfo C.siginfo_t +type timespec C.struct_timespec +type timestruc C.struct_timestruc_t +type timeval C.struct_timeval +type itimerval C.struct_itimerval + +type stackt C.stack_t +type sigcontext C.struct_sigcontext +type ucontext C.ucontext_t +type _Ctype_struct___extctx uint64 // ucontext use a pointer to this structure but it shouldn't be used +type jmpbuf C.struct___jmpbuf +type context64 C.struct___context64 +type sigactiont C.struct_sigaction +type tstate C.struct_tstate +type rusage C.struct_rusage + +type pthread C.pthread_t +type pthread_attr C.pthread_attr_t + +type semt C.sem_t diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_aix_ppc64.go b/platform/dbops/binaries/go/go/src/runtime/defs_aix_ppc64.go new file mode 100644 index 0000000000000000000000000000000000000000..8e85096939cb767d0ee775f785395ff5e1639d80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_aix_ppc64.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package runtime + +const ( + _EPERM = 0x1 + _ENOENT = 0x2 + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + _EACCES = 0xd + _EFAULT = 0xe + _EINVAL = 0x16 + _ETIMEDOUT = 0x4e + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x10 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x100 + _MADV_DONTNEED = 0x4 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0xa + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0x1e + _SIGSEGV = 0xb + _SIGUSR2 = 0x1f + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGCHLD = 0x14 + _SIGCONT = 0x13 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x10 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x22 + _SIGPROF = 0x20 + _SIGWINCH = 0x1c + _SIGIO = 0x17 + _SIGPWR = 0x1d + _SIGSYS = 0xc + _SIGTERM = 0xf + _SIGEMT = 0x7 + _SIGWAITING = 0x27 + + _FPE_INTDIV = 0x14 + _FPE_INTOVF = 0x15 + _FPE_FLTDIV = 0x16 + _FPE_FLTOVF = 0x17 + _FPE_FLTUND = 0x18 + _FPE_FLTRES = 0x19 + _FPE_FLTINV = 0x1a + _FPE_FLTSUB = 0x1b + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + _ + _SEGV_MAPERR = 0x32 + _SEGV_ACCERR = 0x33 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x100 + _O_TRUNC = 0x200 + + _SS_DISABLE = 0x2 + _SI_USER = 0x0 + _SIG_BLOCK = 0x0 + _SIG_UNBLOCK = 0x1 + _SIG_SETMASK = 0x2 + + _SA_SIGINFO = 0x100 + _SA_RESTART = 0x8 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + __SC_PAGE_SIZE = 0x30 + __SC_NPROCESSORS_ONLN = 0x48 + + _F_SETFL = 0x4 + _F_GETFD = 0x1 + _F_GETFL = 0x3 +) + +type sigset [4]uint64 + +var sigset_all = sigset{^uint64(0), ^uint64(0), ^uint64(0), ^uint64(0)} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uintptr + si_band int64 + si_value [2]int32 // [8]byte + __si_flags int32 + __pad [3]int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + __pad [4]int32 + pas_cgo_0 [4]byte +} + +type sigcontext struct { + sc_onstack int32 + pad_cgo_0 [4]byte + sc_mask sigset + sc_uerror int32 + sc_jmpbuf context64 +} + +type ucontext struct { + __sc_onstack int32 + pad_cgo_0 [4]byte + uc_sigmask sigset + __sc_error int32 + pad_cgo_1 [4]byte + uc_mcontext context64 + uc_link *ucontext + uc_stack stackt + __extctx uintptr // pointer to struct __extctx but we don't use it + __extctx_magic int32 + __pad int32 +} + +type context64 struct { + gpr [32]uint64 + msr uint64 + iar uint64 + lr uint64 + ctr uint64 + cr uint32 + xer uint32 + fpscr uint32 + fpscrx uint32 + except [1]uint64 + fpr [32]float64 + fpeu uint8 + fpinfo uint8 + fpscr24_31 uint8 + pad [1]uint8 + excp_type int32 +} + +type sigactiont struct { + sa_handler uintptr // a union of two pointer + sa_mask sigset + sa_flags int32 + pad_cgo_0 [4]byte +} + +type pthread uint32 +type pthread_attr *byte + +type semt int32 diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_arm_linux.go b/platform/dbops/binaries/go/go/src/runtime/defs_arm_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..805735bd0eef8c68f705bd9ecdaa5bf6c6b94455 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_arm_linux.go @@ -0,0 +1,124 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. +On a Debian Lenny arm linux distribution: + +cgo -cdefs defs_arm.c >arm/defs.h +*/ + +package runtime + +/* +#cgo CFLAGS: -I/usr/src/linux-headers-2.6.26-2-versatile/include + +#define __ARCH_SI_UID_T int +#include +#include +#include +#include +#include +#include + +struct xsiginfo { + int si_signo; + int si_errno; + int si_code; + char _sifields[4]; +}; + +#undef sa_handler +#undef sa_flags +#undef sa_restorer +#undef sa_mask + +struct xsigaction { + void (*sa_handler)(void); + unsigned long sa_flags; + void (*sa_restorer)(void); + unsigned int sa_mask; // mask last for extensibility +}; +*/ +import "C" + +const ( + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANONYMOUS + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + SA_RESTORER = C.SA_RESTORER + SA_SIGINFO = C.SA_SIGINFO + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGBUS = C.SIGBUS + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGUSR1 = C.SIGUSR1 + SIGSEGV = C.SIGSEGV + SIGUSR2 = C.SIGUSR2 + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGSTKFLT = C.SIGSTKFLT + SIGCHLD = C.SIGCHLD + SIGCONT = C.SIGCONT + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGURG = C.SIGURG + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGIO = C.SIGIO + SIGPWR = C.SIGPWR + SIGSYS = C.SIGSYS + + FPE_INTDIV = C.FPE_INTDIV & 0xFFFF + FPE_INTOVF = C.FPE_INTOVF & 0xFFFF + FPE_FLTDIV = C.FPE_FLTDIV & 0xFFFF + FPE_FLTOVF = C.FPE_FLTOVF & 0xFFFF + FPE_FLTUND = C.FPE_FLTUND & 0xFFFF + FPE_FLTRES = C.FPE_FLTRES & 0xFFFF + FPE_FLTINV = C.FPE_FLTINV & 0xFFFF + FPE_FLTSUB = C.FPE_FLTSUB & 0xFFFF + + BUS_ADRALN = C.BUS_ADRALN & 0xFFFF + BUS_ADRERR = C.BUS_ADRERR & 0xFFFF + BUS_OBJERR = C.BUS_OBJERR & 0xFFFF + + SEGV_MAPERR = C.SEGV_MAPERR & 0xFFFF + SEGV_ACCERR = C.SEGV_ACCERR & 0xFFFF + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_PROF = C.ITIMER_PROF + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL +) + +type Timespec C.struct_timespec +type StackT C.stack_t +type Sigcontext C.struct_sigcontext +type Ucontext C.struct_ucontext +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval +type Siginfo C.struct_xsiginfo +type Sigaction C.struct_xsigaction diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_darwin.go b/platform/dbops/binaries/go/go/src/runtime/defs_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..e37443307f3776d7acfd6edf0e251efe0241456b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_darwin.go @@ -0,0 +1,176 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_darwin.go >defs_darwin_amd64.h +*/ + +package runtime + +/* +#define __DARWIN_UNIX03 0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EFAULT = C.EFAULT + EAGAIN = C.EAGAIN + ETIMEDOUT = C.ETIMEDOUT + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_FREE_REUSABLE = C.MADV_FREE_REUSABLE + MADV_FREE_REUSE = C.MADV_FREE_REUSE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + SA_USERTRAMP = C.SA_USERTRAMP + SA_64REGSET = C.SA_64REGSET + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGINFO = C.SIGINFO + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + EV_ADD = C.EV_ADD + EV_DELETE = C.EV_DELETE + EV_CLEAR = C.EV_CLEAR + EV_RECEIPT = C.EV_RECEIPT + EV_ERROR = C.EV_ERROR + EV_EOF = C.EV_EOF + EVFILT_READ = C.EVFILT_READ + EVFILT_WRITE = C.EVFILT_WRITE + + PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + F_GETFL = C.F_GETFL + F_SETFL = C.F_SETFL + + O_WRONLY = C.O_WRONLY + O_NONBLOCK = C.O_NONBLOCK + O_CREAT = C.O_CREAT + O_TRUNC = C.O_TRUNC + + VM_REGION_BASIC_INFO_COUNT_64 = C.VM_REGION_BASIC_INFO_COUNT_64 + VM_REGION_BASIC_INFO_64 = C.VM_REGION_BASIC_INFO_64 +) + +type StackT C.struct_sigaltstack +type Sighandler C.union___sigaction_u + +type Sigaction C.struct___sigaction // used in syscalls +type Usigaction C.struct_sigaction // used by sigaction second argument +type Sigset C.sigset_t +type Sigval C.union_sigval +type Siginfo C.siginfo_t +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval +type Timespec C.struct_timespec + +type FPControl C.struct_fp_control +type FPStatus C.struct_fp_status +type RegMMST C.struct_mmst_reg +type RegXMM C.struct_xmm_reg + +type Regs64 C.struct_x86_thread_state64 +type FloatState64 C.struct_x86_float_state64 +type ExceptionState64 C.struct_x86_exception_state64 +type Mcontext64 C.struct_mcontext64 + +type Regs32 C.struct_i386_thread_state +type FloatState32 C.struct_i386_float_state +type ExceptionState32 C.struct_i386_exception_state +type Mcontext32 C.struct_mcontext32 + +type Ucontext C.struct_ucontext + +type Kevent C.struct_kevent + +type Pthread C.pthread_t +type PthreadAttr C.pthread_attr_t +type PthreadMutex C.pthread_mutex_t +type PthreadMutexAttr C.pthread_mutexattr_t +type PthreadCond C.pthread_cond_t +type PthreadCondAttr C.pthread_condattr_t + +type MachTimebaseInfo C.mach_timebase_info_data_t + +type MachPort C.mach_port_t +type MachVMMapRead C.vm_map_read_t +type MachVMAddress C.mach_vm_address_t +type MachVMSize C.mach_vm_size_t +type MachVMRegionFlavour C.vm_region_flavor_t +type MachVMRegionInfo C.vm_region_info_t +type MachMsgTypeNumber C.mach_msg_type_number_t diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_darwin_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_darwin_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..f998b0be9118f979e1fa4766298445818ab0c51d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_darwin_amd64.go @@ -0,0 +1,384 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_darwin.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + _MADV_FREE_REUSABLE = 0x7 + _MADV_FREE_REUSE = 0x8 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + _SA_USERTRAMP = 0x100 + _SA_64REGSET = 0x200 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x7 + _FPE_INTOVF = 0x8 + _FPE_FLTDIV = 0x1 + _FPE_FLTOVF = 0x2 + _FPE_FLTUND = 0x3 + _FPE_FLTRES = 0x4 + _FPE_FLTINV = 0x5 + _FPE_FLTSUB = 0x6 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 + + _PTHREAD_CREATE_DETACHED = 0x2 + + _F_GETFL = 0x3 + _F_SETFL = 0x4 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + + _VM_REGION_BASIC_INFO_COUNT_64 = 0x9 + _VM_REGION_BASIC_INFO_64 = 0x9 +) + +type stackt struct { + ss_sp *byte + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type sigactiont struct { + __sigaction_u [8]byte + sa_tramp unsafe.Pointer + sa_mask uint32 + sa_flags int32 +} + +type usigactiont struct { + __sigaction_u [8]byte + sa_mask uint32 + sa_flags int32 +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uint64 + si_value [8]byte + si_band int64 + __pad [7]uint64 +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type fpcontrol struct { + pad_cgo_0 [2]byte +} + +type fpstatus struct { + pad_cgo_0 [2]byte +} + +type regmmst struct { + mmst_reg [10]int8 + mmst_rsrv [6]int8 +} + +type regxmm struct { + xmm_reg [16]int8 +} + +type regs64 struct { + rax uint64 + rbx uint64 + rcx uint64 + rdx uint64 + rdi uint64 + rsi uint64 + rbp uint64 + rsp uint64 + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rip uint64 + rflags uint64 + cs uint64 + fs uint64 + gs uint64 +} + +type floatstate64 struct { + fpu_reserved [2]int32 + fpu_fcw fpcontrol + fpu_fsw fpstatus + fpu_ftw uint8 + fpu_rsrv1 uint8 + fpu_fop uint16 + fpu_ip uint32 + fpu_cs uint16 + fpu_rsrv2 uint16 + fpu_dp uint32 + fpu_ds uint16 + fpu_rsrv3 uint16 + fpu_mxcsr uint32 + fpu_mxcsrmask uint32 + fpu_stmm0 regmmst + fpu_stmm1 regmmst + fpu_stmm2 regmmst + fpu_stmm3 regmmst + fpu_stmm4 regmmst + fpu_stmm5 regmmst + fpu_stmm6 regmmst + fpu_stmm7 regmmst + fpu_xmm0 regxmm + fpu_xmm1 regxmm + fpu_xmm2 regxmm + fpu_xmm3 regxmm + fpu_xmm4 regxmm + fpu_xmm5 regxmm + fpu_xmm6 regxmm + fpu_xmm7 regxmm + fpu_xmm8 regxmm + fpu_xmm9 regxmm + fpu_xmm10 regxmm + fpu_xmm11 regxmm + fpu_xmm12 regxmm + fpu_xmm13 regxmm + fpu_xmm14 regxmm + fpu_xmm15 regxmm + fpu_rsrv4 [96]int8 + fpu_reserved1 int32 +} + +type exceptionstate64 struct { + trapno uint16 + cpu uint16 + err uint32 + faultvaddr uint64 +} + +type mcontext64 struct { + es exceptionstate64 + ss regs64 + fs floatstate64 + pad_cgo_0 [4]byte +} + +type regs32 struct { + eax uint32 + ebx uint32 + ecx uint32 + edx uint32 + edi uint32 + esi uint32 + ebp uint32 + esp uint32 + ss uint32 + eflags uint32 + eip uint32 + cs uint32 + ds uint32 + es uint32 + fs uint32 + gs uint32 +} + +type floatstate32 struct { + fpu_reserved [2]int32 + fpu_fcw fpcontrol + fpu_fsw fpstatus + fpu_ftw uint8 + fpu_rsrv1 uint8 + fpu_fop uint16 + fpu_ip uint32 + fpu_cs uint16 + fpu_rsrv2 uint16 + fpu_dp uint32 + fpu_ds uint16 + fpu_rsrv3 uint16 + fpu_mxcsr uint32 + fpu_mxcsrmask uint32 + fpu_stmm0 regmmst + fpu_stmm1 regmmst + fpu_stmm2 regmmst + fpu_stmm3 regmmst + fpu_stmm4 regmmst + fpu_stmm5 regmmst + fpu_stmm6 regmmst + fpu_stmm7 regmmst + fpu_xmm0 regxmm + fpu_xmm1 regxmm + fpu_xmm2 regxmm + fpu_xmm3 regxmm + fpu_xmm4 regxmm + fpu_xmm5 regxmm + fpu_xmm6 regxmm + fpu_xmm7 regxmm + fpu_rsrv4 [224]int8 + fpu_reserved1 int32 +} + +type exceptionstate32 struct { + trapno uint16 + cpu uint16 + err uint32 + faultvaddr uint32 +} + +type mcontext32 struct { + es exceptionstate32 + ss regs32 + fs floatstate32 +} + +type ucontext struct { + uc_onstack int32 + uc_sigmask uint32 + uc_stack stackt + uc_link *ucontext + uc_mcsize uint64 + uc_mcontext *mcontext64 +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr struct { + X__sig int64 + X__opaque [56]int8 +} +type pthreadmutex struct { + X__sig int64 + X__opaque [56]int8 +} +type pthreadmutexattr struct { + X__sig int64 + X__opaque [8]int8 +} +type pthreadcond struct { + X__sig int64 + X__opaque [40]int8 +} +type pthreadcondattr struct { + X__sig int64 + X__opaque [8]int8 +} + +type machTimebaseInfo struct { + numer uint32 + denom uint32 +} + +type machPort uint32 +type machVMMapRead uint32 +type machVMAddress uint64 +type machVMSize uint64 +type machVMRegionFlavour int32 +type machVMRegionInfo *int32 +type machMsgTypeNumber uint32 diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_darwin_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs_darwin_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..e07b08e0eec92060e9584b6ad2d9cbd60f724de5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_darwin_arm64.go @@ -0,0 +1,251 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_darwin.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + _MADV_FREE_REUSABLE = 0x7 + _MADV_FREE_REUSE = 0x8 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + _SA_USERTRAMP = 0x100 + _SA_64REGSET = 0x200 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x7 + _FPE_INTOVF = 0x8 + _FPE_FLTDIV = 0x1 + _FPE_FLTOVF = 0x2 + _FPE_FLTUND = 0x3 + _FPE_FLTRES = 0x4 + _FPE_FLTINV = 0x5 + _FPE_FLTSUB = 0x6 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 + + _PTHREAD_CREATE_DETACHED = 0x2 + + _PTHREAD_KEYS_MAX = 512 + + _F_GETFL = 0x3 + _F_SETFL = 0x4 + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + + _VM_REGION_BASIC_INFO_COUNT_64 = 0x9 + _VM_REGION_BASIC_INFO_64 = 0x9 +) + +type stackt struct { + ss_sp *byte + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type sigactiont struct { + __sigaction_u [8]byte + sa_tramp unsafe.Pointer + sa_mask uint32 + sa_flags int32 +} + +type usigactiont struct { + __sigaction_u [8]byte + sa_mask uint32 + sa_flags int32 +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr *byte + si_value [8]byte + si_band int64 + __pad [7]uint64 +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type exceptionstate64 struct { + far uint64 // virtual fault addr + esr uint32 // exception syndrome + exc uint32 // number of arm exception taken +} + +type regs64 struct { + x [29]uint64 // registers x0 to x28 + fp uint64 // frame register, x29 + lr uint64 // link register, x30 + sp uint64 // stack pointer, x31 + pc uint64 // program counter + cpsr uint32 // current program status register + __pad uint32 +} + +type neonstate64 struct { + v [64]uint64 // actually [32]uint128 + fpsr uint32 + fpcr uint32 +} + +type mcontext64 struct { + es exceptionstate64 + ss regs64 + ns neonstate64 +} + +type ucontext struct { + uc_onstack int32 + uc_sigmask uint32 + uc_stack stackt + uc_link *ucontext + uc_mcsize uint64 + uc_mcontext *mcontext64 +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr struct { + X__sig int64 + X__opaque [56]int8 +} +type pthreadmutex struct { + X__sig int64 + X__opaque [56]int8 +} +type pthreadmutexattr struct { + X__sig int64 + X__opaque [8]int8 +} +type pthreadcond struct { + X__sig int64 + X__opaque [40]int8 +} +type pthreadcondattr struct { + X__sig int64 + X__opaque [8]int8 +} + +type machTimebaseInfo struct { + numer uint32 + denom uint32 +} + +type pthreadkey uint64 + +type machPort uint32 +type machVMMapRead uint32 +type machVMAddress uint64 +type machVMSize uint64 +type machVMRegionFlavour int32 +type machVMRegionInfo *int32 +type machMsgTypeNumber uint32 diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly.go b/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly.go new file mode 100644 index 0000000000000000000000000000000000000000..0463f1f116aeac1f4d9530bc43377be59accf015 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly.go @@ -0,0 +1,133 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_dragonfly.go >defs_dragonfly_amd64.h +*/ + +package runtime + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EFAULT = C.EFAULT + EBUSY = C.EBUSY + EAGAIN = C.EAGAIN + ETIMEDOUT = C.ETIMEDOUT + + O_WRONLY = C.O_WRONLY + O_NONBLOCK = C.O_NONBLOCK + O_CREAT = C.O_CREAT + O_TRUNC = C.O_TRUNC + O_CLOEXEC = C.O_CLOEXEC + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGINFO = C.SIGINFO + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + EV_ADD = C.EV_ADD + EV_DELETE = C.EV_DELETE + EV_CLEAR = C.EV_CLEAR + EV_ERROR = C.EV_ERROR + EV_EOF = C.EV_EOF + EVFILT_READ = C.EVFILT_READ + EVFILT_WRITE = C.EVFILT_WRITE +) + +type Rtprio C.struct_rtprio +type Lwpparams C.struct_lwp_params +type Sigset C.struct___sigset +type StackT C.stack_t + +type Siginfo C.siginfo_t + +type Mcontext C.mcontext_t +type Ucontext C.ucontext_t + +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval + +type Kevent C.struct_kevent diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..41bfb085d1ac66f985fc82aaf129f57404279997 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_dragonfly_amd64.go @@ -0,0 +1,212 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_dragonfly.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EBUSY = 0x10 + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x20000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type lwpparams struct { + start_func uintptr + arg unsafe.Pointer + stack uintptr + tid1 unsafe.Pointer // *int32 + tid2 unsafe.Pointer // *int32 +} + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uint64 + si_value [8]byte + si_band int64 + __spare__ [7]int32 + pad_cgo_0 [4]byte +} + +type mcontext struct { + mc_onstack uint64 + mc_rdi uint64 + mc_rsi uint64 + mc_rdx uint64 + mc_rcx uint64 + mc_r8 uint64 + mc_r9 uint64 + mc_rax uint64 + mc_rbx uint64 + mc_rbp uint64 + mc_r10 uint64 + mc_r11 uint64 + mc_r12 uint64 + mc_r13 uint64 + mc_r14 uint64 + mc_r15 uint64 + mc_xflags uint64 + mc_trapno uint64 + mc_addr uint64 + mc_flags uint64 + mc_err uint64 + mc_rip uint64 + mc_cs uint64 + mc_rflags uint64 + mc_rsp uint64 + mc_ss uint64 + mc_len uint32 + mc_fpformat uint32 + mc_ownedfp uint32 + mc_reserved uint32 + mc_unused [8]uint32 + mc_fpregs [256]int32 +} + +type ucontext struct { + uc_sigmask sigset + pad_cgo_0 [48]byte + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + __spare__ [8]int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd.go new file mode 100644 index 0000000000000000000000000000000000000000..d86ae9133afade6055c44e26f5be2cb880ff8a2e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd.go @@ -0,0 +1,174 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_freebsd.go >defs_freebsd_amd64.h +GOARCH=386 go tool cgo -cdefs defs_freebsd.go >defs_freebsd_386.h +GOARCH=arm go tool cgo -cdefs defs_freebsd.go >defs_freebsd_arm.h +*/ + +package runtime + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +// Local consts. +const ( + _NBBY = C.NBBY // Number of bits in a byte. + _CTL_MAXNAME = C.CTL_MAXNAME // Largest number of components supported. + _CPU_LEVEL_WHICH = C.CPU_LEVEL_WHICH // Actual mask/id for which. + _CPU_WHICH_PID = C.CPU_WHICH_PID // Specifies a process id. +) + +const ( + EINTR = C.EINTR + EFAULT = C.EFAULT + EAGAIN = C.EAGAIN + ETIMEDOUT = C.ETIMEDOUT + + O_WRONLY = C.O_WRONLY + O_NONBLOCK = C.O_NONBLOCK + O_CREAT = C.O_CREAT + O_TRUNC = C.O_TRUNC + O_CLOEXEC = C.O_CLOEXEC + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_SHARED = C.MAP_SHARED + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + + CLOCK_MONOTONIC = C.CLOCK_MONOTONIC + CLOCK_REALTIME = C.CLOCK_REALTIME + + UMTX_OP_WAIT_UINT = C.UMTX_OP_WAIT_UINT + UMTX_OP_WAIT_UINT_PRIVATE = C.UMTX_OP_WAIT_UINT_PRIVATE + UMTX_OP_WAKE = C.UMTX_OP_WAKE + UMTX_OP_WAKE_PRIVATE = C.UMTX_OP_WAKE_PRIVATE + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGINFO = C.SIGINFO + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + EV_ADD = C.EV_ADD + EV_DELETE = C.EV_DELETE + EV_CLEAR = C.EV_CLEAR + EV_RECEIPT = C.EV_RECEIPT + EV_ERROR = C.EV_ERROR + EV_EOF = C.EV_EOF + EVFILT_READ = C.EVFILT_READ + EVFILT_WRITE = C.EVFILT_WRITE +) + +type Rtprio C.struct_rtprio +type ThrParam C.struct_thr_param +type Sigset C.struct___sigset +type StackT C.stack_t + +type Siginfo C.siginfo_t + +type Mcontext C.mcontext_t +type Ucontext C.ucontext_t + +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval + +type Umtx_time C.struct__umtx_time + +type KeventT C.struct_kevent + +type bintime C.struct_bintime +type vdsoTimehands C.struct_vdso_timehands +type vdsoTimekeep C.struct_vdso_timekeep + +const ( + _VDSO_TK_VER_CURR = C.VDSO_TK_VER_CURR + + vdsoTimehandsSize = C.sizeof_struct_vdso_timehands + vdsoTimekeepSize = C.sizeof_struct_vdso_timekeep +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_386.go new file mode 100644 index 0000000000000000000000000000000000000000..ee8274188ae69e0b74e63f06d4090b186d9b3477 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_386.go @@ -0,0 +1,270 @@ +// Code generated by cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_freebsd.go + +package runtime + +import "unsafe" + +const ( + _NBBY = 0x8 + _CTL_MAXNAME = 0x18 + _CPU_LEVEL_WHICH = 0x3 + _CPU_WHICH_PID = 0x2 +) + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x100000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_SHARED = 0x1 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _CLOCK_MONOTONIC = 0x4 + _CLOCK_REALTIME = 0x0 + + _UMTX_OP_WAIT_UINT = 0xb + _UMTX_OP_WAIT_UINT_PRIVATE = 0xf + _UMTX_OP_WAKE = 0x3 + _UMTX_OP_WAKE_PRIVATE = 0x10 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type thrparam struct { + start_func uintptr + arg unsafe.Pointer + stack_base uintptr + stack_size uintptr + tls_base unsafe.Pointer + tls_size uintptr + child_tid unsafe.Pointer // *int32 + parent_tid *int32 + flags int32 + rtp *rtprio + spare [3]uintptr +} + +type thread int32 // long + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uintptr + si_value [4]byte + _reason [32]byte +} + +type mcontext struct { + mc_onstack uint32 + mc_gs uint32 + mc_fs uint32 + mc_es uint32 + mc_ds uint32 + mc_edi uint32 + mc_esi uint32 + mc_ebp uint32 + mc_isp uint32 + mc_ebx uint32 + mc_edx uint32 + mc_ecx uint32 + mc_eax uint32 + mc_trapno uint32 + mc_err uint32 + mc_eip uint32 + mc_cs uint32 + mc_eflags uint32 + mc_esp uint32 + mc_ss uint32 + mc_len uint32 + mc_fpformat uint32 + mc_ownedfp uint32 + mc_flags uint32 + mc_fpstate [128]uint32 + mc_fsbase uint32 + mc_gsbase uint32 + mc_xfpustate uint32 + mc_xfpustate_len uint32 + mc_spare2 [4]uint32 +} + +type ucontext struct { + uc_sigmask sigset + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + uc_flags int32 + __spare__ [4]int32 + pad_cgo_0 [12]byte +} + +type timespec struct { + tv_sec int32 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) +} + +type timeval struct { + tv_sec int32 + tv_usec int32 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type umtx_time struct { + _timeout timespec + _flags uint32 + _clockid uint32 +} + +type keventt struct { + ident uint32 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte + ext [4]uint64 +} + +type bintime struct { + sec int32 + frac uint64 +} + +type vdsoTimehands struct { + algo uint32 + gen uint32 + scale uint64 + offset_count uint32 + counter_mask uint32 + offset bintime + boottime bintime + x86_shift uint32 + x86_hpet_idx uint32 + res [6]uint32 +} + +type vdsoTimekeep struct { + ver uint32 + enabled uint32 + current uint32 +} + +const ( + _VDSO_TK_VER_CURR = 0x1 + + vdsoTimehandsSize = 0x50 + vdsoTimekeepSize = 0xc +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..9003f9201565a14ca9871be08b3406edb557a3e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_amd64.go @@ -0,0 +1,282 @@ +// Code generated by cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_freebsd.go + +package runtime + +import "unsafe" + +const ( + _NBBY = 0x8 + _CTL_MAXNAME = 0x18 + _CPU_LEVEL_WHICH = 0x3 + _CPU_WHICH_PID = 0x2 +) + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x100000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_SHARED = 0x1 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _CLOCK_MONOTONIC = 0x4 + _CLOCK_REALTIME = 0x0 + + _UMTX_OP_WAIT_UINT = 0xb + _UMTX_OP_WAIT_UINT_PRIVATE = 0xf + _UMTX_OP_WAKE = 0x3 + _UMTX_OP_WAKE_PRIVATE = 0x10 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type thrparam struct { + start_func uintptr + arg unsafe.Pointer + stack_base uintptr + stack_size uintptr + tls_base unsafe.Pointer + tls_size uintptr + child_tid unsafe.Pointer // *int64 + parent_tid *int64 + flags int32 + pad_cgo_0 [4]byte + rtp *rtprio + spare [3]uintptr +} + +type thread int64 // long + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uint64 + si_value [8]byte + _reason [40]byte +} + +type mcontext struct { + mc_onstack uint64 + mc_rdi uint64 + mc_rsi uint64 + mc_rdx uint64 + mc_rcx uint64 + mc_r8 uint64 + mc_r9 uint64 + mc_rax uint64 + mc_rbx uint64 + mc_rbp uint64 + mc_r10 uint64 + mc_r11 uint64 + mc_r12 uint64 + mc_r13 uint64 + mc_r14 uint64 + mc_r15 uint64 + mc_trapno uint32 + mc_fs uint16 + mc_gs uint16 + mc_addr uint64 + mc_flags uint32 + mc_es uint16 + mc_ds uint16 + mc_err uint64 + mc_rip uint64 + mc_cs uint64 + mc_rflags uint64 + mc_rsp uint64 + mc_ss uint64 + mc_len uint64 + mc_fpformat uint64 + mc_ownedfp uint64 + mc_fpstate [64]uint64 + mc_fsbase uint64 + mc_gsbase uint64 + mc_xfpustate uint64 + mc_xfpustate_len uint64 + mc_spare [4]uint64 +} + +type ucontext struct { + uc_sigmask sigset + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + uc_flags int32 + __spare__ [4]int32 + pad_cgo_0 [12]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type umtx_time struct { + _timeout timespec + _flags uint32 + _clockid uint32 +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte + ext [4]uint64 +} + +type bintime struct { + sec int64 + frac uint64 +} + +type vdsoTimehands struct { + algo uint32 + gen uint32 + scale uint64 + offset_count uint32 + counter_mask uint32 + offset bintime + boottime bintime + x86_shift uint32 + x86_hpet_idx uint32 + res [6]uint32 +} + +type vdsoTimekeep struct { + ver uint32 + enabled uint32 + current uint32 + pad_cgo_0 [4]byte +} + +const ( + _VDSO_TK_VER_CURR = 0x1 + + vdsoTimehandsSize = 0x58 + vdsoTimekeepSize = 0x10 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..68cc1b9545b7991eba8808c0e28d0a70a9794a22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm.go @@ -0,0 +1,245 @@ +// Code generated by cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_freebsd.go + +package runtime + +import "unsafe" + +const ( + _NBBY = 0x8 + _CTL_MAXNAME = 0x18 + _CPU_LEVEL_WHICH = 0x3 + _CPU_WHICH_PID = 0x2 +) + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x100000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_SHARED = 0x1 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _CLOCK_MONOTONIC = 0x4 + _CLOCK_REALTIME = 0x0 + + _UMTX_OP_WAIT_UINT = 0xb + _UMTX_OP_WAIT_UINT_PRIVATE = 0xf + _UMTX_OP_WAKE = 0x3 + _UMTX_OP_WAKE_PRIVATE = 0x10 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type thrparam struct { + start_func uintptr + arg unsafe.Pointer + stack_base uintptr + stack_size uintptr + tls_base unsafe.Pointer + tls_size uintptr + child_tid unsafe.Pointer // *int32 + parent_tid *int32 + flags int32 + rtp *rtprio + spare [3]uintptr +} + +type thread int32 // long + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uintptr + si_value [4]byte + _reason [32]byte +} + +type mcontext struct { + __gregs [17]uint32 + __fpu [140]byte +} + +type ucontext struct { + uc_sigmask sigset + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + uc_flags int32 + __spare__ [4]int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int32 + pad_cgo_0 [4]byte +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type umtx_time struct { + _timeout timespec + _flags uint32 + _clockid uint32 +} + +type keventt struct { + ident uint32 + filter int16 + flags uint16 + fflags uint32 + pad_cgo_0 [4]byte + data int64 + udata *byte + pad_cgo_1 [4]byte + ext [4]uint64 +} + +type bintime struct { + sec int64 + frac uint64 +} + +type vdsoTimehands struct { + algo uint32 + gen uint32 + scale uint64 + offset_count uint32 + counter_mask uint32 + offset bintime + boottime bintime + physical uint32 + res [7]uint32 +} + +type vdsoTimekeep struct { + ver uint32 + enabled uint32 + current uint32 + pad_cgo_0 [4]byte +} + +const ( + _VDSO_TK_VER_CURR = 0x1 + + vdsoTimehandsSize = 0x58 + vdsoTimekeepSize = 0x10 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..1d6723621ae72bb1a09e6842074f7459e794d4db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_arm64.go @@ -0,0 +1,265 @@ +// Code generated by cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_freebsd.go + +package runtime + +import "unsafe" + +const ( + _NBBY = 0x8 + _CTL_MAXNAME = 0x18 + _CPU_LEVEL_WHICH = 0x3 + _CPU_WHICH_PID = 0x2 +) + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x100000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_SHARED = 0x1 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _CLOCK_MONOTONIC = 0x4 + _CLOCK_REALTIME = 0x0 + + _UMTX_OP_WAIT_UINT = 0xb + _UMTX_OP_WAIT_UINT_PRIVATE = 0xf + _UMTX_OP_WAKE = 0x3 + _UMTX_OP_WAKE_PRIVATE = 0x10 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type thrparam struct { + start_func uintptr + arg unsafe.Pointer + stack_base uintptr + stack_size uintptr + tls_base unsafe.Pointer + tls_size uintptr + child_tid unsafe.Pointer // *int64 + parent_tid *int64 + flags int32 + pad_cgo_0 [4]byte + rtp *rtprio + spare [3]uintptr +} + +type thread int64 // long + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uint64 + si_value [8]byte + _reason [40]byte +} + +type gpregs struct { + gp_x [30]uint64 + gp_lr uint64 + gp_sp uint64 + gp_elr uint64 + gp_spsr uint32 + gp_pad int32 +} + +type fpregs struct { + fp_q [64]uint64 // actually [32]uint128 + fp_sr uint32 + fp_cr uint32 + fp_flags int32 + fp_pad int32 +} + +type mcontext struct { + mc_gpregs gpregs + mc_fpregs fpregs + mc_flags int32 + mc_pad int32 + mc_spare [8]uint64 +} + +type ucontext struct { + uc_sigmask sigset + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + uc_flags int32 + __spare__ [4]int32 + pad_cgo_0 [12]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type umtx_time struct { + _timeout timespec + _flags uint32 + _clockid uint32 +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte + ext [4]uint64 +} + +type bintime struct { + sec int64 + frac uint64 +} + +type vdsoTimehands struct { + algo uint32 + gen uint32 + scale uint64 + offset_count uint32 + counter_mask uint32 + offset bintime + boottime bintime + physical uint32 + res [7]uint32 +} + +type vdsoTimekeep struct { + ver uint32 + enabled uint32 + current uint32 + pad_cgo_0 [4]byte +} + +const ( + _VDSO_TK_VER_CURR = 0x1 + + vdsoTimehandsSize = 0x58 + vdsoTimekeepSize = 0x10 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_riscv64.go b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_riscv64.go new file mode 100644 index 0000000000000000000000000000000000000000..b977bde5515e42e9ff18b555bfff1b05a938eddd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_freebsd_riscv64.go @@ -0,0 +1,266 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_freebsd.go + +package runtime + +import "unsafe" + +const ( + _NBBY = 0x8 + _CTL_MAXNAME = 0x18 + _CPU_LEVEL_WHICH = 0x3 + _CPU_WHICH_PID = 0x2 +) + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x100000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_SHARED = 0x1 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x5 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _CLOCK_MONOTONIC = 0x4 + _CLOCK_REALTIME = 0x0 + + _UMTX_OP_WAIT_UINT = 0xb + _UMTX_OP_WAIT_UINT_PRIVATE = 0xf + _UMTX_OP_WAKE = 0x3 + _UMTX_OP_WAKE_PRIVATE = 0x10 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x2 + _FPE_INTOVF = 0x1 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_RECEIPT = 0x40 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type rtprio struct { + _type uint16 + prio uint16 +} + +type thrparam struct { + start_func uintptr + arg unsafe.Pointer + stack_base uintptr + stack_size uintptr + tls_base unsafe.Pointer + tls_size uintptr + child_tid unsafe.Pointer // *int64 + parent_tid *int64 + flags int32 + pad_cgo_0 [4]byte + rtp *rtprio + spare [3]uintptr +} + +type thread int64 // long + +type sigset struct { + __bits [4]uint32 +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uint64 + si_value [8]byte + _reason [40]byte +} + +type gpregs struct { + gp_ra uint64 + gp_sp uint64 + gp_gp uint64 + gp_tp uint64 + gp_t [7]uint64 + gp_s [12]uint64 + gp_a [8]uint64 + gp_sepc uint64 + gp_sstatus uint64 +} + +type fpregs struct { + fp_x [64]uint64 // actually __uint64_t fp_x[32][2] + fp_fcsr uint64 + fp_flags int32 + pad int32 +} + +type mcontext struct { + mc_gpregs gpregs + mc_fpregs fpregs + mc_flags int32 + mc_pad int32 + mc_spare [8]uint64 +} + +type ucontext struct { + uc_sigmask sigset + uc_mcontext mcontext + uc_link *ucontext + uc_stack stackt + uc_flags int32 + __spare__ [4]int32 + pad_cgo_0 [12]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type umtx_time struct { + _timeout timespec + _flags uint32 + _clockid uint32 +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte + ext [4]uint64 +} + +type bintime struct { + sec int64 + frac uint64 +} + +type vdsoTimehands struct { + algo uint32 + gen uint32 + scale uint64 + offset_count uint32 + counter_mask uint32 + offset bintime + boottime bintime + physical uint32 + res [7]uint32 +} + +type vdsoTimekeep struct { + ver uint32 + enabled uint32 + current uint32 + pad_cgo_0 [4]byte +} + +const ( + _VDSO_TK_VER_CURR = 0x1 + + vdsoTimehandsSize = 0x58 + vdsoTimekeepSize = 0x10 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_illumos_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_illumos_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..9c5413bae38c78a523e8e558a444e71beabee554 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_illumos_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + _RCTL_LOCAL_DENY = 0x2 + + _RCTL_LOCAL_MAXIMAL = 0x80000000 + + _RCTL_FIRST = 0x0 + _RCTL_NEXT = 0x1 +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..296fcb4bfd6a7ad656365388b78e7c1aa8b5fe20 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux.go @@ -0,0 +1,127 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo -cdefs + +GOARCH=amd64 go tool cgo -cdefs defs_linux.go defs1_linux.go >defs_linux_amd64.h +*/ + +package runtime + +/* +// Linux glibc and Linux kernel define different and conflicting +// definitions for struct sigaction, struct timespec, etc. +// We want the kernel ones, which are in the asm/* headers. +// But then we'd get conflicts when we include the system +// headers for things like ucontext_t, so that happens in +// a separate file, defs1.go. + +#define _SYS_TYPES_H // avoid inclusion of sys/types.h +#include +#define size_t __kernel_size_t +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EAGAIN = C.EAGAIN + ENOMEM = C.ENOMEM + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANONYMOUS + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE + + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + SA_SIGINFO = C.SA_SIGINFO + + SI_KERNEL = C.SI_KERNEL + SI_TIMER = C.SI_TIMER + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGBUS = C.SIGBUS + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGUSR1 = C.SIGUSR1 + SIGSEGV = C.SIGSEGV + SIGUSR2 = C.SIGUSR2 + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGSTKFLT = C.SIGSTKFLT + SIGCHLD = C.SIGCHLD + SIGCONT = C.SIGCONT + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGURG = C.SIGURG + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGIO = C.SIGIO + SIGPWR = C.SIGPWR + SIGSYS = C.SIGSYS + + SIGRTMIN = C.SIGRTMIN + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + CLOCK_THREAD_CPUTIME_ID = C.CLOCK_THREAD_CPUTIME_ID + + SIGEV_THREAD_ID = C.SIGEV_THREAD_ID +) + +type Sigset C.sigset_t +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Sigaction C.struct_sigaction +type Siginfo C.siginfo_t +type Itimerspec C.struct_itimerspec +type Itimerval C.struct_itimerval +type Sigevent C.struct_sigevent diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_386.go new file mode 100644 index 0000000000000000000000000000000000000000..5fef55610f39d2965933fd8d677fce56af357720 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_386.go @@ -0,0 +1,253 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs2_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_RESTORER = 0x4000000 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 + + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 + + _AF_UNIX = 0x1 + _SOCK_DGRAM = 0x2 +) + +type fpreg struct { + significand [4]uint16 + exponent uint16 +} + +type fpxreg struct { + significand [4]uint16 + exponent uint16 + padding [3]uint16 +} + +type xmmreg struct { + element [4]uint32 +} + +type fpstate struct { + cw uint32 + sw uint32 + tag uint32 + ipoff uint32 + cssel uint32 + dataoff uint32 + datasel uint32 + _st [8]fpreg + status uint16 + magic uint16 + _fxsr_env [6]uint32 + mxcsr uint32 + reserved uint32 + _fxsr_st [8]fpxreg + _xmm [8]xmmreg + padding1 [44]uint32 + anon0 [48]byte +} + +type timespec struct { + tv_sec int32 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) +} + +type timeval struct { + tv_sec int32 + tv_usec int32 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint32 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint32 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + ss_size uintptr +} + +type sigcontext struct { + gs uint16 + __gsh uint16 + fs uint16 + __fsh uint16 + es uint16 + __esh uint16 + ds uint16 + __dsh uint16 + edi uint32 + esi uint32 + ebp uint32 + esp uint32 + ebx uint32 + edx uint32 + ecx uint32 + eax uint32 + trapno uint32 + err uint32 + eip uint32 + cs uint16 + __csh uint16 + eflags uint32 + esp_at_signal uint32 + ss uint16 + __ssh uint16 + fpstate *fpstate + oldmask uint32 + cr2 uint32 +} + +type ucontext struct { + uc_flags uint32 + uc_link *ucontext + uc_stack stackt + uc_mcontext sigcontext + uc_sigmask uint32 +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +type sockaddr_un struct { + family uint16 + path [108]byte +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..dce7799b6adc9cb2cdc1e7001775109a72943eb0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_amd64.go @@ -0,0 +1,289 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs1_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_RESTORER = 0x4000000 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 + + _AF_UNIX = 0x1 + _SOCK_DGRAM = 0x2 +) + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs1_linux.go + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 +) + +type usigset struct { + __val [16]uint64 +} + +type fpxreg struct { + significand [4]uint16 + exponent uint16 + padding [3]uint16 +} + +type xmmreg struct { + element [4]uint32 +} + +type fpstate struct { + cwd uint16 + swd uint16 + ftw uint16 + fop uint16 + rip uint64 + rdp uint64 + mxcsr uint32 + mxcr_mask uint32 + _st [8]fpxreg + _xmm [16]xmmreg + padding [24]uint32 +} + +type fpxreg1 struct { + significand [4]uint16 + exponent uint16 + padding [3]uint16 +} + +type xmmreg1 struct { + element [4]uint32 +} + +type fpstate1 struct { + cwd uint16 + swd uint16 + ftw uint16 + fop uint16 + rip uint64 + rdp uint64 + mxcsr uint32 + mxcr_mask uint32 + _st [8]fpxreg1 + _xmm [16]xmmreg1 + padding [24]uint32 +} + +type fpreg1 struct { + significand [4]uint16 + exponent uint16 +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr +} + +type mcontext struct { + gregs [23]uint64 + fpregs *fpstate + __reserved1 [8]uint64 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_mcontext mcontext + uc_sigmask usigset + __fpregs_mem fpstate +} + +type sigcontext struct { + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rdi uint64 + rsi uint64 + rbp uint64 + rbx uint64 + rdx uint64 + rax uint64 + rcx uint64 + rsp uint64 + rip uint64 + eflags uint64 + cs uint16 + gs uint16 + fs uint16 + __pad0 uint16 + err uint64 + trapno uint64 + oldmask uint64 + cr2 uint64 + fpstate *fpstate1 + __reserved1 [8]uint64 +} + +type sockaddr_un struct { + family uint16 + path [108]byte +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..71cf8c6d50e7609bb5fb492197afe3821e88c294 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm.go @@ -0,0 +1,207 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// Constants +const ( + _EINTR = 0x4 + _ENOMEM = 0xc + _EAGAIN = 0xb + + _PROT_NONE = 0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_RESTORER = 0 // unused on ARM + _SA_SIGINFO = 0x4 + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + _SIGRTMIN = 0x20 + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + _ITIMER_REAL = 0 + _ITIMER_PROF = 0x2 + _ITIMER_VIRTUAL = 0x1 + _O_RDONLY = 0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 + + _AF_UNIX = 0x1 + _SOCK_DGRAM = 0x2 +) + +type timespec struct { + tv_sec int32 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + ss_size uintptr +} + +type sigcontext struct { + trap_no uint32 + error_code uint32 + oldmask uint32 + r0 uint32 + r1 uint32 + r2 uint32 + r3 uint32 + r4 uint32 + r5 uint32 + r6 uint32 + r7 uint32 + r8 uint32 + r9 uint32 + r10 uint32 + fp uint32 + ip uint32 + sp uint32 + lr uint32 + pc uint32 + cpsr uint32 + fault_address uint32 +} + +type ucontext struct { + uc_flags uint32 + uc_link *ucontext + uc_stack stackt + uc_mcontext sigcontext + uc_sigmask uint32 + __unused [31]int32 + uc_regspace [128]uint32 +} + +type timeval struct { + tv_sec int32 + tv_usec int32 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint32 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint32 + sa_restorer uintptr + sa_mask uint64 +} + +type sockaddr_un struct { + family uint16 + path [108]byte +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..606cd70494e37ad9b23f81b2e8e3b286267f5269 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_arm64.go @@ -0,0 +1,211 @@ +// Created by cgo -cdefs and converted (by hand) to Go +// ../cmd/cgo/cgo -cdefs defs_linux.go defs1_linux.go defs2_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_RESTORER = 0x0 // Only used on intel + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 + + _AF_UNIX = 0x1 + _SOCK_DGRAM = 0x2 +) + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +// Created by cgo -cdefs and then converted to Go by hand +// ../cmd/cgo/cgo -cdefs defs_linux.go defs1_linux.go defs2_linux.go + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 +) + +type usigset struct { + __val [16]uint64 +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr +} + +type sigcontext struct { + fault_address uint64 + /* AArch64 registers */ + regs [31]uint64 + sp uint64 + pc uint64 + pstate uint64 + _pad [8]byte // __attribute__((__aligned__(16))) + __reserved [4096]byte +} + +type sockaddr_un struct { + family uint16 + path [108]byte +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_sigmask uint64 + _pad [(1024 - 64) / 8]byte + _pad2 [8]byte // sigcontext must be aligned to 16-byte + uc_mcontext sigcontext +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_loong64.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_loong64.go new file mode 100644 index 0000000000000000000000000000000000000000..692d8c78e9a4d2d3f02d7748b56cb30baac04f8a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_loong64.go @@ -0,0 +1,198 @@ +// Generated using cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x4 + _SA_RESTORER = 0x0 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 +) + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_mask uint64 + // Linux on loong64 does not have the sa_restorer field, but the setsig + // function references it (for x86). Not much harm to include it at the end. + sa_restorer uintptr +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + __pad0 [1]int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type usigset struct { + val [16]uint64 +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr +} + +type sigcontext struct { + sc_pc uint64 + sc_regs [32]uint64 + sc_flags uint32 + sc_extcontext [0]uint64 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_sigmask usigset + uc_x_unused [0]uint8 + uc_pad_cgo_0 [8]byte + uc_mcontext sigcontext +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_mips64x.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_mips64x.go new file mode 100644 index 0000000000000000000000000000000000000000..8a0af41234f22629343f24ebb0958f58d7078d33 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_mips64x.go @@ -0,0 +1,211 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (mips64 || mips64le) && linux + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x800 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x8 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGUSR1 = 0x10 + _SIGUSR2 = 0x11 + _SIGCHLD = 0x12 + _SIGPWR = 0x13 + _SIGWINCH = 0x14 + _SIGURG = 0x15 + _SIGIO = 0x16 + _SIGSTOP = 0x17 + _SIGTSTP = 0x18 + _SIGCONT = 0x19 + _SIGTTIN = 0x1a + _SIGTTOU = 0x1b + _SIGVTALRM = 0x1c + _SIGPROF = 0x1d + _SIGXCPU = 0x1e + _SIGXFSZ = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +//struct Sigset { +// uint64 sig[1]; +//}; +//typedef uint64 Sigset; + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_flags uint32 + sa_handler uintptr + sa_mask [2]uint64 + // linux header does not have sa_restorer field, + // but it is used in setsig(). it is no harm to put it here + sa_restorer uintptr +} + +type siginfoFields struct { + si_signo int32 + si_code int32 + si_errno int32 + __pad0 [1]int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x100 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x80 + _O_CLOEXEC = 0x80000 + _SA_RESTORER = 0 +) + +type stackt struct { + ss_sp *byte + ss_size uintptr + ss_flags int32 +} + +type sigcontext struct { + sc_regs [32]uint64 + sc_fpregs [32]uint64 + sc_mdhi uint64 + sc_hi1 uint64 + sc_hi2 uint64 + sc_hi3 uint64 + sc_mdlo uint64 + sc_lo1 uint64 + sc_lo2 uint64 + sc_lo3 uint64 + sc_pc uint64 + sc_fpc_csr uint32 + sc_used_math uint32 + sc_dsp uint32 + sc_reserved uint32 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_mcontext sigcontext + uc_sigmask uint64 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_mipsx.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_mipsx.go new file mode 100644 index 0000000000000000000000000000000000000000..8322beab2b1c20e536b425182fe8630c059078bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_mipsx.go @@ -0,0 +1,209 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (mips || mipsle) && linux + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x800 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x8 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGUSR1 = 0x10 + _SIGUSR2 = 0x11 + _SIGCHLD = 0x12 + _SIGPWR = 0x13 + _SIGWINCH = 0x14 + _SIGURG = 0x15 + _SIGIO = 0x16 + _SIGSTOP = 0x17 + _SIGTSTP = 0x18 + _SIGCONT = 0x19 + _SIGTTIN = 0x1a + _SIGTTOU = 0x1b + _SIGVTALRM = 0x1c + _SIGPROF = 0x1d + _SIGXCPU = 0x1e + _SIGXFSZ = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +type timespec struct { + tv_sec int32 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) +} + +type timeval struct { + tv_sec int32 + tv_usec int32 +} + +//go:nosplit +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type sigactiont struct { + sa_flags uint32 + sa_handler uintptr + sa_mask [4]uint32 + // linux header does not have sa_restorer field, + // but it is used in setsig(). it is no harm to put it here + sa_restorer uintptr +} + +type siginfoFields struct { + si_signo int32 + si_code int32 + si_errno int32 + // below here is a union; si_addr is the only field we use + si_addr uint32 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x80 + _O_CREAT = 0x100 + _O_TRUNC = 0x200 + _O_CLOEXEC = 0x80000 + _SA_RESTORER = 0 +) + +type stackt struct { + ss_sp *byte + ss_size uintptr + ss_flags int32 +} + +type sigcontext struct { + sc_regmask uint32 + sc_status uint32 + sc_pc uint64 + sc_regs [32]uint64 + sc_fpregs [32]uint64 + sc_acx uint32 + sc_fpc_csr uint32 + sc_fpc_eir uint32 + sc_used_math uint32 + sc_dsp uint32 + sc_mdhi uint64 + sc_mdlo uint64 + sc_hi1 uint32 + sc_lo1 uint32 + sc_hi2 uint32 + sc_lo2 uint32 + sc_hi3 uint32 + sc_lo3 uint32 +} + +type ucontext struct { + uc_flags uint32 + uc_link *ucontext + uc_stack stackt + Pad_cgo_0 [4]byte + uc_mcontext sigcontext + uc_sigmask [4]uint32 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64.go new file mode 100644 index 0000000000000000000000000000000000000000..f87924affe65ef190e95da2cf7bf768cb2c58c1e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64.go @@ -0,0 +1,225 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs3_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +//struct Sigset { +// uint64 sig[1]; +//}; +//typedef uint64 Sigset; + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs3_linux.go + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 + _SA_RESTORER = 0 +) + +type ptregs struct { + gpr [32]uint64 + nip uint64 + msr uint64 + orig_gpr3 uint64 + ctr uint64 + link uint64 + xer uint64 + ccr uint64 + softe uint64 + trap uint64 + dar uint64 + dsisr uint64 + result uint64 +} + +type vreg struct { + u [4]uint32 +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr +} + +type sigcontext struct { + _unused [4]uint64 + signal int32 + _pad0 int32 + handler uint64 + oldmask uint64 + regs *ptregs + gp_regs [48]uint64 + fp_regs [33]float64 + v_regs *vreg + vmx_reserve [101]int64 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_sigmask uint64 + __unused [15]uint64 + uc_mcontext sigcontext +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64le.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64le.go new file mode 100644 index 0000000000000000000000000000000000000000..f87924affe65ef190e95da2cf7bf768cb2c58c1e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_ppc64le.go @@ -0,0 +1,225 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs3_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +//struct Sigset { +// uint64 sig[1]; +//}; +//typedef uint64 Sigset; + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_linux.go defs3_linux.go + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 + _SA_RESTORER = 0 +) + +type ptregs struct { + gpr [32]uint64 + nip uint64 + msr uint64 + orig_gpr3 uint64 + ctr uint64 + link uint64 + xer uint64 + ccr uint64 + softe uint64 + trap uint64 + dar uint64 + dsisr uint64 + result uint64 +} + +type vreg struct { + u [4]uint32 +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr +} + +type sigcontext struct { + _unused [4]uint64 + signal int32 + _pad0 int32 + handler uint64 + oldmask uint64 + regs *ptregs + gp_regs [48]uint64 + fp_regs [33]float64 + v_regs *vreg + vmx_reserve [101]int64 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_sigmask uint64 + __unused [15]uint64 + uc_mcontext sigcontext +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_riscv64.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_riscv64.go new file mode 100644 index 0000000000000000000000000000000000000000..29b1ef2a50a94c9f6dd6be96b4e08ed03c4d4dad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_riscv64.go @@ -0,0 +1,235 @@ +// Generated using cgo, then manually converted into appropriate naming and code +// for the Go runtime. +// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_RESTORER = 0x0 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_mask uint64 + // Linux on riscv64 does not have the sa_restorer field, but the setsig + // function references it (for x86). Not much harm to include it at the end. + sa_restorer uintptr +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 +) + +type user_regs_struct struct { + pc uint64 + ra uint64 + sp uint64 + gp uint64 + tp uint64 + t0 uint64 + t1 uint64 + t2 uint64 + s0 uint64 + s1 uint64 + a0 uint64 + a1 uint64 + a2 uint64 + a3 uint64 + a4 uint64 + a5 uint64 + a6 uint64 + a7 uint64 + s2 uint64 + s3 uint64 + s4 uint64 + s5 uint64 + s6 uint64 + s7 uint64 + s8 uint64 + s9 uint64 + s10 uint64 + s11 uint64 + t3 uint64 + t4 uint64 + t5 uint64 + t6 uint64 +} + +type user_fpregs_struct struct { + f [528]byte +} + +type usigset struct { + us_x__val [16]uint64 +} + +type sigcontext struct { + sc_regs user_regs_struct + sc_fpregs user_fpregs_struct +} + +type stackt struct { + ss_sp *byte + ss_flags int32 + ss_size uintptr +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_sigmask usigset + uc_x__unused [0]uint8 + uc_pad_cgo_0 [8]byte + uc_mcontext sigcontext +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_linux_s390x.go b/platform/dbops/binaries/go/go/src/runtime/defs_linux_s390x.go new file mode 100644 index 0000000000000000000000000000000000000000..b0280213b3d12f6207ad44b83dd88edded9bd066 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_linux_s390x.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x20 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 + _MADV_HUGEPAGE = 0xe + _MADV_NOHUGEPAGE = 0xf + _MADV_COLLAPSE = 0x19 + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 + _SA_SIGINFO = 0x4 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0xa + _SIGSEGV = 0xb + _SIGUSR2 = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGSTKFLT = 0x10 + _SIGCHLD = 0x11 + _SIGCONT = 0x12 + _SIGSTOP = 0x13 + _SIGTSTP = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGIO = 0x1d + _SIGPWR = 0x1e + _SIGSYS = 0x1f + + _SIGRTMIN = 0x20 + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _CLOCK_THREAD_CPUTIME_ID = 0x3 + + _SIGEV_THREAD_ID = 0x4 +) + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type sigactiont struct { + sa_handler uintptr + sa_flags uint64 + sa_restorer uintptr + sa_mask uint64 +} + +type siginfoFields struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_addr is the only field we use + si_addr uint64 +} + +type siginfo struct { + siginfoFields + + // Pad struct to the max size in the kernel. + _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +} + +type itimerspec struct { + it_interval timespec + it_value timespec +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type sigeventFields struct { + value uintptr + signo int32 + notify int32 + // below here is a union; sigev_notify_thread_id is the only field we use + sigev_notify_thread_id int32 +} + +type sigevent struct { + sigeventFields + + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte +} + +const ( + _O_RDONLY = 0x0 + _O_WRONLY = 0x1 + _O_CREAT = 0x40 + _O_TRUNC = 0x200 + _O_NONBLOCK = 0x800 + _O_CLOEXEC = 0x80000 + _SA_RESTORER = 0 +) + +type stackt struct { + ss_sp *byte + ss_flags int32 + ss_size uintptr +} + +type sigcontext struct { + psw_mask uint64 + psw_addr uint64 + gregs [16]uint64 + aregs [16]uint32 + fpc uint32 + fpregs [16]uint64 +} + +type ucontext struct { + uc_flags uint64 + uc_link *ucontext + uc_stack stackt + uc_mcontext sigcontext + uc_sigmask uint64 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_netbsd.go b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..43923e3075b7aa774ea32a5993b65c5c6d6c2559 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd.go @@ -0,0 +1,133 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go >defs_netbsd_amd64.h +GOARCH=386 go tool cgo -cdefs defs_netbsd.go defs_netbsd_386.go >defs_netbsd_386.h +GOARCH=arm go tool cgo -cdefs defs_netbsd.go defs_netbsd_arm.go >defs_netbsd_arm.h +*/ + +// +godefs map __fpregset_t [644]byte + +package runtime + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EFAULT = C.EFAULT + EAGAIN = C.EAGAIN + + O_WRONLY = C.O_WRONLY + O_NONBLOCK = C.O_NONBLOCK + O_CREAT = C.O_CREAT + O_TRUNC = C.O_TRUNC + O_CLOEXEC = C.O_CLOEXEC + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGINFO = C.SIGINFO + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + EV_ADD = C.EV_ADD + EV_DELETE = C.EV_DELETE + EV_CLEAR = C.EV_CLEAR + EV_RECEIPT = 0 + EV_ERROR = C.EV_ERROR + EV_EOF = C.EV_EOF + EVFILT_READ = C.EVFILT_READ + EVFILT_WRITE = C.EVFILT_WRITE +) + +type Sigset C.sigset_t +type Siginfo C.struct__ksiginfo + +type StackT C.stack_t + +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval + +type McontextT C.mcontext_t +type UcontextT C.ucontext_t + +type Kevent C.struct_kevent diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_386.go new file mode 100644 index 0000000000000000000000000000000000000000..2943ea3f13eebf78e4039b0c6b3812531abacede --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_386.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=386 go tool cgo -cdefs defs_netbsd.go defs_netbsd_386.go >defs_netbsd_386.h +*/ + +package runtime + +/* +#include +#include +*/ +import "C" + +const ( + REG_GS = C._REG_GS + REG_FS = C._REG_FS + REG_ES = C._REG_ES + REG_DS = C._REG_DS + REG_EDI = C._REG_EDI + REG_ESI = C._REG_ESI + REG_EBP = C._REG_EBP + REG_ESP = C._REG_ESP + REG_EBX = C._REG_EBX + REG_EDX = C._REG_EDX + REG_ECX = C._REG_ECX + REG_EAX = C._REG_EAX + REG_TRAPNO = C._REG_TRAPNO + REG_ERR = C._REG_ERR + REG_EIP = C._REG_EIP + REG_CS = C._REG_CS + REG_EFL = C._REG_EFL + REG_UESP = C._REG_UESP + REG_SS = C._REG_SS +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..33d80ff53c0ccc5a0bccd99171f9d98b3d4cb078 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_amd64.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go >defs_netbsd_amd64.h +*/ + +package runtime + +/* +#include +#include +*/ +import "C" + +const ( + REG_RDI = C._REG_RDI + REG_RSI = C._REG_RSI + REG_RDX = C._REG_RDX + REG_RCX = C._REG_RCX + REG_R8 = C._REG_R8 + REG_R9 = C._REG_R9 + REG_R10 = C._REG_R10 + REG_R11 = C._REG_R11 + REG_R12 = C._REG_R12 + REG_R13 = C._REG_R13 + REG_R14 = C._REG_R14 + REG_R15 = C._REG_R15 + REG_RBP = C._REG_RBP + REG_RBX = C._REG_RBX + REG_RAX = C._REG_RAX + REG_GS = C._REG_GS + REG_FS = C._REG_FS + REG_ES = C._REG_ES + REG_DS = C._REG_DS + REG_TRAPNO = C._REG_TRAPNO + REG_ERR = C._REG_ERR + REG_RIP = C._REG_RIP + REG_CS = C._REG_CS + REG_RFLAGS = C._REG_RFLAGS + REG_RSP = C._REG_RSP + REG_SS = C._REG_SS +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..74b37527df66a6a0912008e43ed8861f64cf2bc5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_netbsd_arm.go @@ -0,0 +1,39 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=arm go tool cgo -cdefs defs_netbsd.go defs_netbsd_arm.go >defs_netbsd_arm.h +*/ + +package runtime + +/* +#include +#include +*/ +import "C" + +const ( + REG_R0 = C._REG_R0 + REG_R1 = C._REG_R1 + REG_R2 = C._REG_R2 + REG_R3 = C._REG_R3 + REG_R4 = C._REG_R4 + REG_R5 = C._REG_R5 + REG_R6 = C._REG_R6 + REG_R7 = C._REG_R7 + REG_R8 = C._REG_R8 + REG_R9 = C._REG_R9 + REG_R10 = C._REG_R10 + REG_R11 = C._REG_R11 + REG_R12 = C._REG_R12 + REG_R13 = C._REG_R13 + REG_R14 = C._REG_R14 + REG_R15 = C._REG_R15 + REG_CPSR = C._REG_CPSR +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd.go new file mode 100644 index 0000000000000000000000000000000000000000..d93c087a8194b1ab9ecc74e39a941ec449900abb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -godefs defs_openbsd.go +GOARCH=386 go tool cgo -godefs defs_openbsd.go +GOARCH=arm go tool cgo -godefs defs_openbsd.go +GOARCH=arm64 go tool cgo -godefs defs_openbsd.go +GOARCH=mips64 go tool cgo -godefs defs_openbsd.go +*/ + +package runtime + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EFAULT = C.EFAULT + EAGAIN = C.EAGAIN + ETIMEDOUT = C.ETIMEDOUT + + O_NONBLOCK = C.O_NONBLOCK + O_CLOEXEC = C.O_CLOEXEC + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + MAP_STACK = C.MAP_STACK + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + + PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGINFO = C.SIGINFO + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + EV_ADD = C.EV_ADD + EV_DELETE = C.EV_DELETE + EV_CLEAR = C.EV_CLEAR + EV_ERROR = C.EV_ERROR + EV_EOF = C.EV_EOF + EVFILT_READ = C.EVFILT_READ + EVFILT_WRITE = C.EVFILT_WRITE +) + +type TforkT C.struct___tfork + +type Sigcontext C.struct_sigcontext +type Siginfo C.siginfo_t +type Sigset C.sigset_t +type Sigval C.union_sigval + +type StackT C.stack_t + +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval + +type KeventT C.struct_kevent + +type Pthread C.pthread_t +type PthreadAttr C.pthread_attr_t +type PthreadCond C.pthread_cond_t +type PthreadCondAttr C.pthread_condattr_t +type PthreadMutex C.pthread_mutex_t +type PthreadMutexAttr C.pthread_mutexattr_t diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_386.go new file mode 100644 index 0000000000000000000000000000000000000000..996745f6f8ed8516e0a2d669b49dfd98b2af2657 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_386.go @@ -0,0 +1,181 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_openbsd.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + sc_gs uint32 + sc_fs uint32 + sc_es uint32 + sc_ds uint32 + sc_edi uint32 + sc_esi uint32 + sc_ebp uint32 + sc_ebx uint32 + sc_edx uint32 + sc_ecx uint32 + sc_eax uint32 + sc_eip uint32 + sc_cs uint32 + sc_eflags uint32 + sc_esp uint32 + sc_ss uint32 + __sc_unused uint32 + sc_mask uint32 + sc_trapno uint32 + sc_err uint32 + sc_fpstate unsafe.Pointer +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + _data [116]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int32 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) +} + +type timeval struct { + tv_sec int64 + tv_usec int32 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint32 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..739853bc035fe338b27c91842893d21fc128c623 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_amd64.go @@ -0,0 +1,192 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_openbsd.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + sc_rdi uint64 + sc_rsi uint64 + sc_rdx uint64 + sc_rcx uint64 + sc_r8 uint64 + sc_r9 uint64 + sc_r10 uint64 + sc_r11 uint64 + sc_r12 uint64 + sc_r13 uint64 + sc_r14 uint64 + sc_r15 uint64 + sc_rbp uint64 + sc_rbx uint64 + sc_rax uint64 + sc_gs uint64 + sc_fs uint64 + sc_es uint64 + sc_ds uint64 + sc_trapno uint64 + sc_err uint64 + sc_rip uint64 + sc_cs uint64 + sc_rflags uint64 + sc_rsp uint64 + sc_ss uint64 + sc_fpstate unsafe.Pointer + __sc_unused int32 + sc_mask int32 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..cdda6b4ad1a850d1050ac7e9cb8e3408e397f77c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm.go @@ -0,0 +1,189 @@ +// created by cgo -cdefs and then converted to Go +// cgo -cdefs defs_openbsd.go + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + __sc_unused int32 + sc_mask int32 + + sc_spsr uint32 + sc_r0 uint32 + sc_r1 uint32 + sc_r2 uint32 + sc_r3 uint32 + sc_r4 uint32 + sc_r5 uint32 + sc_r6 uint32 + sc_r7 uint32 + sc_r8 uint32 + sc_r9 uint32 + sc_r10 uint32 + sc_r11 uint32 + sc_r12 uint32 + sc_usr_sp uint32 + sc_usr_lr uint32 + sc_svc_lr uint32 + sc_pc uint32 + sc_fpused uint32 + sc_fpscr uint32 + sc_fpreg [32]uint64 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int32 + pad_cgo_0 [4]byte +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) +} + +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint32 + filter int16 + flags uint16 + fflags uint32 + pad_cgo_0 [4]byte + data int64 + udata *byte + pad_cgo_1 [4]byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..67f35b49fb4f306e3b17f6e1a68424a0f6666e14 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_arm64.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + __sc_unused int32 + sc_mask int32 + sc_sp uintptr + sc_lr uintptr + sc_elr uintptr + sc_spsr uintptr + sc_x [30]uintptr + sc_cookie int64 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_mips64.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_mips64.go new file mode 100644 index 0000000000000000000000000000000000000000..7958044d0422d87cc3fb07f9caba4f49232920a2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_mips64.go @@ -0,0 +1,171 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Generated from: +// +// GOARCH=mips64 go tool cgo -godefs defs_openbsd.go +// +// Then converted to the form used by the runtime. + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + sc_cookie uint64 + sc_mask uint64 + sc_pc uint64 + sc_regs [32]uint64 + mullo uint64 + mulhi uint64 + sc_fpregs [33]uint64 + sc_fpused uint64 + sc_fpc_eir uint64 + _xxx [8]int64 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_ppc64.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_ppc64.go new file mode 100644 index 0000000000000000000000000000000000000000..83f300cbaab85b19e194dba6b57b2bd2d6368113 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_ppc64.go @@ -0,0 +1,184 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Generated from: +// +// GOARCH=ppc64 go tool cgo -godefs defs_openbsd.go +// +// Then converted to the form used by the runtime. + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + sc_cookie uint64 + sc_mask int32 + sc_reg [32]uint64 + sc_lr uint64 + sc_cr uint64 + sc_xer uint64 + sc_ctr uint64 + sc_pc uint64 + sc_ps uint64 + sc_vrsave uint64 + pad_cgo_0 [8]byte + sc_vsx [64][16]uint8 + sc_fpscr uint64 + sc_vscr uint64 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_riscv64.go b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_riscv64.go new file mode 100644 index 0000000000000000000000000000000000000000..2a044d5eb05fcf959d1bf279a631f1d7eb068023 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_openbsd_riscv64.go @@ -0,0 +1,177 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +const ( + _EINTR = 0x4 + _EFAULT = 0xe + _EAGAIN = 0x23 + _ETIMEDOUT = 0x3c + + _O_WRONLY = 0x1 + _O_NONBLOCK = 0x4 + _O_CREAT = 0x200 + _O_TRUNC = 0x400 + _O_CLOEXEC = 0x10000 + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANON = 0x1000 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x10 + _MAP_STACK = 0x4000 + + _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x6 + + _SA_SIGINFO = 0x40 + _SA_RESTART = 0x2 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGEMT = 0x7 + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGBUS = 0xa + _SIGSEGV = 0xb + _SIGSYS = 0xc + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGTERM = 0xf + _SIGURG = 0x10 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGCONT = 0x13 + _SIGCHLD = 0x14 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGIO = 0x17 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x1a + _SIGPROF = 0x1b + _SIGWINCH = 0x1c + _SIGINFO = 0x1d + _SIGUSR1 = 0x1e + _SIGUSR2 = 0x1f + + _FPE_INTDIV = 0x1 + _FPE_INTOVF = 0x2 + _FPE_FLTDIV = 0x3 + _FPE_FLTOVF = 0x4 + _FPE_FLTUND = 0x5 + _FPE_FLTRES = 0x6 + _FPE_FLTINV = 0x7 + _FPE_FLTSUB = 0x8 + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + + _SEGV_MAPERR = 0x1 + _SEGV_ACCERR = 0x2 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _EV_ADD = 0x1 + _EV_DELETE = 0x2 + _EV_CLEAR = 0x20 + _EV_ERROR = 0x4000 + _EV_EOF = 0x8000 + _EVFILT_READ = -0x1 + _EVFILT_WRITE = -0x2 +) + +type tforkt struct { + tf_tcb unsafe.Pointer + tf_tid *int32 + tf_stack uintptr +} + +type sigcontext struct { + __sc_unused int32 + sc_mask int32 + sc_ra uintptr + sc_sp uintptr + sc_gp uintptr + sc_tp uintptr + sc_t [7]uintptr + sc_s [12]uintptr + sc_a [8]uintptr + sc_sepc uintptr + sc_f [32]uintptr + sc_fcsr uintptr + sc_cookie int64 +} + +type siginfo struct { + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} + +//go:nosplit +func (ts *timespec) setNsec(ns int64) { + ts.tv_sec = ns / 1e9 + ts.tv_nsec = ns % 1e9 +} + +type timeval struct { + tv_sec int64 + tv_usec int64 +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = int64(x) +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type keventt struct { + ident uint64 + filter int16 + flags uint16 + fflags uint32 + data int64 + udata *byte +} + +type pthread uintptr +type pthreadattr uintptr +type pthreadcond uintptr +type pthreadcondattr uintptr +type pthreadmutex uintptr +type pthreadmutexattr uintptr diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_plan9_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_386.go new file mode 100644 index 0000000000000000000000000000000000000000..428044df6871d71823a5f18edd745e1fb749eda9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_386.go @@ -0,0 +1,64 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const _PAGESIZE = 0x1000 + +type ureg struct { + di uint32 /* general registers */ + si uint32 /* ... */ + bp uint32 /* ... */ + nsp uint32 + bx uint32 /* ... */ + dx uint32 /* ... */ + cx uint32 /* ... */ + ax uint32 /* ... */ + gs uint32 /* data segments */ + fs uint32 /* ... */ + es uint32 /* ... */ + ds uint32 /* ... */ + trap uint32 /* trap _type */ + ecode uint32 /* error code (or zero) */ + pc uint32 /* pc */ + cs uint32 /* old context */ + flags uint32 /* old flags */ + sp uint32 + ss uint32 /* old stack segment */ +} + +type sigctxt struct { + u *ureg +} + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) } + +func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } +func (c *sigctxt) lr() uintptr { return uintptr(0) } + +func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) } +func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) } +func (c *sigctxt) setlr(x uintptr) {} + +func (c *sigctxt) savelr(x uintptr) {} + +func dumpregs(u *ureg) { + print("ax ", hex(u.ax), "\n") + print("bx ", hex(u.bx), "\n") + print("cx ", hex(u.cx), "\n") + print("dx ", hex(u.dx), "\n") + print("di ", hex(u.di), "\n") + print("si ", hex(u.si), "\n") + print("bp ", hex(u.bp), "\n") + print("sp ", hex(u.sp), "\n") + print("pc ", hex(u.pc), "\n") + print("flags ", hex(u.flags), "\n") + print("cs ", hex(u.cs), "\n") + print("fs ", hex(u.fs), "\n") + print("gs ", hex(u.gs), "\n") +} + +func sigpanictramp() diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_plan9_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..15a27fc7db9e5110991e4dcf282ed590b2171d9a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_amd64.go @@ -0,0 +1,81 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const _PAGESIZE = 0x1000 + +type ureg struct { + ax uint64 + bx uint64 + cx uint64 + dx uint64 + si uint64 + di uint64 + bp uint64 + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + + ds uint16 + es uint16 + fs uint16 + gs uint16 + + _type uint64 + error uint64 /* error code (or zero) */ + ip uint64 /* pc */ + cs uint64 /* old context */ + flags uint64 /* old flags */ + sp uint64 /* sp */ + ss uint64 /* old stack segment */ +} + +type sigctxt struct { + u *ureg +} + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) pc() uintptr { return uintptr(c.u.ip) } + +func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } +func (c *sigctxt) lr() uintptr { return uintptr(0) } + +func (c *sigctxt) setpc(x uintptr) { c.u.ip = uint64(x) } +func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint64(x) } +func (c *sigctxt) setlr(x uintptr) {} + +func (c *sigctxt) savelr(x uintptr) {} + +func dumpregs(u *ureg) { + print("ax ", hex(u.ax), "\n") + print("bx ", hex(u.bx), "\n") + print("cx ", hex(u.cx), "\n") + print("dx ", hex(u.dx), "\n") + print("di ", hex(u.di), "\n") + print("si ", hex(u.si), "\n") + print("bp ", hex(u.bp), "\n") + print("sp ", hex(u.sp), "\n") + print("r8 ", hex(u.r8), "\n") + print("r9 ", hex(u.r9), "\n") + print("r10 ", hex(u.r10), "\n") + print("r11 ", hex(u.r11), "\n") + print("r12 ", hex(u.r12), "\n") + print("r13 ", hex(u.r13), "\n") + print("r14 ", hex(u.r14), "\n") + print("r15 ", hex(u.r15), "\n") + print("ip ", hex(u.ip), "\n") + print("flags ", hex(u.flags), "\n") + print("cs ", hex(u.cs), "\n") + print("fs ", hex(u.fs), "\n") + print("gs ", hex(u.gs), "\n") +} + +func sigpanictramp() diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_plan9_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..1adc16e47f9be0e0bf8bdcb0f46171fa5f6a4361 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_plan9_arm.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const _PAGESIZE = 0x1000 + +type ureg struct { + r0 uint32 /* general registers */ + r1 uint32 /* ... */ + r2 uint32 /* ... */ + r3 uint32 /* ... */ + r4 uint32 /* ... */ + r5 uint32 /* ... */ + r6 uint32 /* ... */ + r7 uint32 /* ... */ + r8 uint32 /* ... */ + r9 uint32 /* ... */ + r10 uint32 /* ... */ + r11 uint32 /* ... */ + r12 uint32 /* ... */ + sp uint32 + link uint32 /* ... */ + trap uint32 /* trap type */ + psr uint32 + pc uint32 /* interrupted addr */ +} + +type sigctxt struct { + u *ureg +} + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) } + +func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } +func (c *sigctxt) lr() uintptr { return uintptr(c.u.link) } + +func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) } +func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) } +func (c *sigctxt) setlr(x uintptr) { c.u.link = uint32(x) } +func (c *sigctxt) savelr(x uintptr) { c.u.r0 = uint32(x) } + +func dumpregs(u *ureg) { + print("r0 ", hex(u.r0), "\n") + print("r1 ", hex(u.r1), "\n") + print("r2 ", hex(u.r2), "\n") + print("r3 ", hex(u.r3), "\n") + print("r4 ", hex(u.r4), "\n") + print("r5 ", hex(u.r5), "\n") + print("r6 ", hex(u.r6), "\n") + print("r7 ", hex(u.r7), "\n") + print("r8 ", hex(u.r8), "\n") + print("r9 ", hex(u.r9), "\n") + print("r10 ", hex(u.r10), "\n") + print("r11 ", hex(u.r11), "\n") + print("r12 ", hex(u.r12), "\n") + print("sp ", hex(u.sp), "\n") + print("link ", hex(u.link), "\n") + print("pc ", hex(u.pc), "\n") + print("psr ", hex(u.psr), "\n") +} + +func sigpanictramp() diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_solaris.go b/platform/dbops/binaries/go/go/src/runtime/defs_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..54c400834020a3dca7c8f8a76cb24ca232c4dbf4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_solaris.go @@ -0,0 +1,160 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_solaris.go >defs_solaris_amd64.h +*/ + +package runtime + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + EINTR = C.EINTR + EBADF = C.EBADF + EFAULT = C.EFAULT + EAGAIN = C.EAGAIN + EBUSY = C.EBUSY + ETIME = C.ETIME + ETIMEDOUT = C.ETIMEDOUT + EWOULDBLOCK = C.EWOULDBLOCK + EINPROGRESS = C.EINPROGRESS + + PROT_NONE = C.PROT_NONE + PROT_READ = C.PROT_READ + PROT_WRITE = C.PROT_WRITE + PROT_EXEC = C.PROT_EXEC + + MAP_ANON = C.MAP_ANON + MAP_PRIVATE = C.MAP_PRIVATE + MAP_FIXED = C.MAP_FIXED + + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + + SA_SIGINFO = C.SA_SIGINFO + SA_RESTART = C.SA_RESTART + SA_ONSTACK = C.SA_ONSTACK + + SIGHUP = C.SIGHUP + SIGINT = C.SIGINT + SIGQUIT = C.SIGQUIT + SIGILL = C.SIGILL + SIGTRAP = C.SIGTRAP + SIGABRT = C.SIGABRT + SIGEMT = C.SIGEMT + SIGFPE = C.SIGFPE + SIGKILL = C.SIGKILL + SIGBUS = C.SIGBUS + SIGSEGV = C.SIGSEGV + SIGSYS = C.SIGSYS + SIGPIPE = C.SIGPIPE + SIGALRM = C.SIGALRM + SIGTERM = C.SIGTERM + SIGURG = C.SIGURG + SIGSTOP = C.SIGSTOP + SIGTSTP = C.SIGTSTP + SIGCONT = C.SIGCONT + SIGCHLD = C.SIGCHLD + SIGTTIN = C.SIGTTIN + SIGTTOU = C.SIGTTOU + SIGIO = C.SIGIO + SIGXCPU = C.SIGXCPU + SIGXFSZ = C.SIGXFSZ + SIGVTALRM = C.SIGVTALRM + SIGPROF = C.SIGPROF + SIGWINCH = C.SIGWINCH + SIGUSR1 = C.SIGUSR1 + SIGUSR2 = C.SIGUSR2 + + FPE_INTDIV = C.FPE_INTDIV + FPE_INTOVF = C.FPE_INTOVF + FPE_FLTDIV = C.FPE_FLTDIV + FPE_FLTOVF = C.FPE_FLTOVF + FPE_FLTUND = C.FPE_FLTUND + FPE_FLTRES = C.FPE_FLTRES + FPE_FLTINV = C.FPE_FLTINV + FPE_FLTSUB = C.FPE_FLTSUB + + BUS_ADRALN = C.BUS_ADRALN + BUS_ADRERR = C.BUS_ADRERR + BUS_OBJERR = C.BUS_OBJERR + + SEGV_MAPERR = C.SEGV_MAPERR + SEGV_ACCERR = C.SEGV_ACCERR + + ITIMER_REAL = C.ITIMER_REAL + ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + ITIMER_PROF = C.ITIMER_PROF + + _SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN + + PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + FORK_NOSIGCHLD = C.FORK_NOSIGCHLD + FORK_WAITPID = C.FORK_WAITPID + + MAXHOSTNAMELEN = C.MAXHOSTNAMELEN + + O_WRONLY = C.O_WRONLY + O_NONBLOCK = C.O_NONBLOCK + O_CREAT = C.O_CREAT + O_TRUNC = C.O_TRUNC + O_CLOEXEC = C.O_CLOEXEC + + POLLIN = C.POLLIN + POLLOUT = C.POLLOUT + POLLHUP = C.POLLHUP + POLLERR = C.POLLERR + + PORT_SOURCE_FD = C.PORT_SOURCE_FD + PORT_SOURCE_ALERT = C.PORT_SOURCE_ALERT + PORT_ALERT_UPDATE = C.PORT_ALERT_UPDATE +) + +type SemT C.sem_t + +type Sigset C.sigset_t +type StackT C.stack_t + +type Siginfo C.siginfo_t +type Sigaction C.struct_sigaction + +type Fpregset C.fpregset_t +type Mcontext C.mcontext_t +type Ucontext C.ucontext_t + +type Timespec C.struct_timespec +type Timeval C.struct_timeval +type Itimerval C.struct_itimerval + +type PortEvent C.port_event_t +type Pthread C.pthread_t +type PthreadAttr C.pthread_attr_t + +// depends on Timespec, must appear below +type Stat C.struct_stat diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_solaris_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_solaris_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..56e4b38c5e43842747ee534feee62acd349e27db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_solaris_amd64.go @@ -0,0 +1,48 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +/* +Input to cgo. + +GOARCH=amd64 go tool cgo -cdefs defs_solaris.go defs_solaris_amd64.go >defs_solaris_amd64.h +*/ + +package runtime + +/* +#include +#include +*/ +import "C" + +const ( + REG_RDI = C.REG_RDI + REG_RSI = C.REG_RSI + REG_RDX = C.REG_RDX + REG_RCX = C.REG_RCX + REG_R8 = C.REG_R8 + REG_R9 = C.REG_R9 + REG_R10 = C.REG_R10 + REG_R11 = C.REG_R11 + REG_R12 = C.REG_R12 + REG_R13 = C.REG_R13 + REG_R14 = C.REG_R14 + REG_R15 = C.REG_R15 + REG_RBP = C.REG_RBP + REG_RBX = C.REG_RBX + REG_RAX = C.REG_RAX + REG_GS = C.REG_GS + REG_FS = C.REG_FS + REG_ES = C.REG_ES + REG_DS = C.REG_DS + REG_TRAPNO = C.REG_TRAPNO + REG_ERR = C.REG_ERR + REG_RIP = C.REG_RIP + REG_CS = C.REG_CS + REG_RFLAGS = C.REG_RFL + REG_RSP = C.REG_RSP + REG_SS = C.REG_SS +) diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_windows.go b/platform/dbops/binaries/go/go/src/runtime/defs_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..2dbe1446898e59ee9c012953cfcd8a60c60268b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_windows.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows architecture-independent definitions. + +package runtime + +const ( + _PROT_NONE = 0 + _PROT_READ = 1 + _PROT_WRITE = 2 + _PROT_EXEC = 4 + + _MAP_ANON = 1 + _MAP_PRIVATE = 2 + + _DUPLICATE_SAME_ACCESS = 0x2 + _THREAD_PRIORITY_HIGHEST = 0x2 + + _SIGINT = 0x2 + _SIGTERM = 0xF + _CTRL_C_EVENT = 0x0 + _CTRL_BREAK_EVENT = 0x1 + _CTRL_CLOSE_EVENT = 0x2 + _CTRL_LOGOFF_EVENT = 0x5 + _CTRL_SHUTDOWN_EVENT = 0x6 + + _EXCEPTION_ACCESS_VIOLATION = 0xc0000005 + _EXCEPTION_IN_PAGE_ERROR = 0xc0000006 + _EXCEPTION_BREAKPOINT = 0x80000003 + _EXCEPTION_ILLEGAL_INSTRUCTION = 0xc000001d + _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d + _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e + _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f + _EXCEPTION_FLT_OVERFLOW = 0xc0000091 + _EXCEPTION_FLT_UNDERFLOW = 0xc0000093 + _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094 + _EXCEPTION_INT_OVERFLOW = 0xc0000095 + + _INFINITE = 0xffffffff + _WAIT_TIMEOUT = 0x102 + + _EXCEPTION_CONTINUE_EXECUTION = -0x1 + _EXCEPTION_CONTINUE_SEARCH = 0x0 + _EXCEPTION_CONTINUE_SEARCH_SEH = 0x1 +) + +type systeminfo struct { + anon0 [4]byte + dwpagesize uint32 + lpminimumapplicationaddress *byte + lpmaximumapplicationaddress *byte + dwactiveprocessormask uintptr + dwnumberofprocessors uint32 + dwprocessortype uint32 + dwallocationgranularity uint32 + wprocessorlevel uint16 + wprocessorrevision uint16 +} + +type exceptionpointers struct { + record *exceptionrecord + context *context +} + +type exceptionrecord struct { + exceptioncode uint32 + exceptionflags uint32 + exceptionrecord *exceptionrecord + exceptionaddress uintptr + numberparameters uint32 + exceptioninformation [15]uintptr +} + +type overlapped struct { + internal uintptr + internalhigh uintptr + anon0 [8]byte + hevent *byte +} + +type memoryBasicInformation struct { + baseAddress uintptr + allocationBase uintptr + allocationProtect uint32 + regionSize uintptr + state uint32 + protect uint32 + type_ uint32 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_windows_386.go b/platform/dbops/binaries/go/go/src/runtime/defs_windows_386.go new file mode 100644 index 0000000000000000000000000000000000000000..8cf2bfc307f0def172c5214052b9778187bdbd4c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_windows_386.go @@ -0,0 +1,88 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const _CONTEXT_CONTROL = 0x10001 + +type floatingsavearea struct { + controlword uint32 + statusword uint32 + tagword uint32 + erroroffset uint32 + errorselector uint32 + dataoffset uint32 + dataselector uint32 + registerarea [80]uint8 + cr0npxstate uint32 +} + +type context struct { + contextflags uint32 + dr0 uint32 + dr1 uint32 + dr2 uint32 + dr3 uint32 + dr6 uint32 + dr7 uint32 + floatsave floatingsavearea + seggs uint32 + segfs uint32 + seges uint32 + segds uint32 + edi uint32 + esi uint32 + ebx uint32 + edx uint32 + ecx uint32 + eax uint32 + ebp uint32 + eip uint32 + segcs uint32 + eflags uint32 + esp uint32 + segss uint32 + extendedregisters [512]uint8 +} + +func (c *context) ip() uintptr { return uintptr(c.eip) } +func (c *context) sp() uintptr { return uintptr(c.esp) } + +// 386 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } +func (c *context) set_lr(x uintptr) {} + +func (c *context) set_ip(x uintptr) { c.eip = uint32(x) } +func (c *context) set_sp(x uintptr) { c.esp = uint32(x) } + +// 386 does not have frame pointer register. +func (c *context) set_fp(x uintptr) {} + +func prepareContextForSigResume(c *context) { + c.edx = c.esp + c.ecx = c.eip +} + +func dumpregs(r *context) { + print("eax ", hex(r.eax), "\n") + print("ebx ", hex(r.ebx), "\n") + print("ecx ", hex(r.ecx), "\n") + print("edx ", hex(r.edx), "\n") + print("edi ", hex(r.edi), "\n") + print("esi ", hex(r.esi), "\n") + print("ebp ", hex(r.ebp), "\n") + print("esp ", hex(r.esp), "\n") + print("eip ", hex(r.eip), "\n") + print("eflags ", hex(r.eflags), "\n") + print("cs ", hex(r.segcs), "\n") + print("fs ", hex(r.segfs), "\n") + print("gs ", hex(r.seggs), "\n") +} + +// _DISPATCHER_CONTEXT is not defined on 386. +type _DISPATCHER_CONTEXT struct{} + +func (c *_DISPATCHER_CONTEXT) ctx() *context { + return nil +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_windows_amd64.go b/platform/dbops/binaries/go/go/src/runtime/defs_windows_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..9dbfb40e63ee4065b1ed7d796997f670930ccba2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_windows_amd64.go @@ -0,0 +1,116 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const _CONTEXT_CONTROL = 0x100001 + +type m128a struct { + low uint64 + high int64 +} + +type context struct { + p1home uint64 + p2home uint64 + p3home uint64 + p4home uint64 + p5home uint64 + p6home uint64 + contextflags uint32 + mxcsr uint32 + segcs uint16 + segds uint16 + seges uint16 + segfs uint16 + seggs uint16 + segss uint16 + eflags uint32 + dr0 uint64 + dr1 uint64 + dr2 uint64 + dr3 uint64 + dr6 uint64 + dr7 uint64 + rax uint64 + rcx uint64 + rdx uint64 + rbx uint64 + rsp uint64 + rbp uint64 + rsi uint64 + rdi uint64 + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rip uint64 + anon0 [512]byte + vectorregister [26]m128a + vectorcontrol uint64 + debugcontrol uint64 + lastbranchtorip uint64 + lastbranchfromrip uint64 + lastexceptiontorip uint64 + lastexceptionfromrip uint64 +} + +func (c *context) ip() uintptr { return uintptr(c.rip) } +func (c *context) sp() uintptr { return uintptr(c.rsp) } + +// AMD64 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } +func (c *context) set_lr(x uintptr) {} + +func (c *context) set_ip(x uintptr) { c.rip = uint64(x) } +func (c *context) set_sp(x uintptr) { c.rsp = uint64(x) } +func (c *context) set_fp(x uintptr) { c.rbp = uint64(x) } + +func prepareContextForSigResume(c *context) { + c.r8 = c.rsp + c.r9 = c.rip +} + +func dumpregs(r *context) { + print("rax ", hex(r.rax), "\n") + print("rbx ", hex(r.rbx), "\n") + print("rcx ", hex(r.rcx), "\n") + print("rdx ", hex(r.rdx), "\n") + print("rdi ", hex(r.rdi), "\n") + print("rsi ", hex(r.rsi), "\n") + print("rbp ", hex(r.rbp), "\n") + print("rsp ", hex(r.rsp), "\n") + print("r8 ", hex(r.r8), "\n") + print("r9 ", hex(r.r9), "\n") + print("r10 ", hex(r.r10), "\n") + print("r11 ", hex(r.r11), "\n") + print("r12 ", hex(r.r12), "\n") + print("r13 ", hex(r.r13), "\n") + print("r14 ", hex(r.r14), "\n") + print("r15 ", hex(r.r15), "\n") + print("rip ", hex(r.rip), "\n") + print("rflags ", hex(r.eflags), "\n") + print("cs ", hex(r.segcs), "\n") + print("fs ", hex(r.segfs), "\n") + print("gs ", hex(r.seggs), "\n") +} + +type _DISPATCHER_CONTEXT struct { + controlPc uint64 + imageBase uint64 + functionEntry uintptr + establisherFrame uint64 + targetIp uint64 + context *context + languageHandler uintptr + handlerData uintptr +} + +func (c *_DISPATCHER_CONTEXT) ctx() *context { + return c.context +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm.go b/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm.go new file mode 100644 index 0000000000000000000000000000000000000000..861a88430ea3b87da655560ac7f950f23a05cc95 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// NOTE(rsc): _CONTEXT_CONTROL is actually 0x200001 and should include PC, SP, and LR. +// However, empirically, LR doesn't come along on Windows 10 +// unless you also set _CONTEXT_INTEGER (0x200002). +// Without LR, we skip over the next-to-bottom function in profiles +// when the bottom function is frameless. +// So we set both here, to make a working _CONTEXT_CONTROL. +const _CONTEXT_CONTROL = 0x200003 + +type neon128 struct { + low uint64 + high int64 +} + +type context struct { + contextflags uint32 + r0 uint32 + r1 uint32 + r2 uint32 + r3 uint32 + r4 uint32 + r5 uint32 + r6 uint32 + r7 uint32 + r8 uint32 + r9 uint32 + r10 uint32 + r11 uint32 + r12 uint32 + + spr uint32 + lrr uint32 + pc uint32 + cpsr uint32 + + fpscr uint32 + padding uint32 + + floatNeon [16]neon128 + + bvr [8]uint32 + bcr [8]uint32 + wvr [1]uint32 + wcr [1]uint32 + padding2 [2]uint32 +} + +func (c *context) ip() uintptr { return uintptr(c.pc) } +func (c *context) sp() uintptr { return uintptr(c.spr) } +func (c *context) lr() uintptr { return uintptr(c.lrr) } + +func (c *context) set_ip(x uintptr) { c.pc = uint32(x) } +func (c *context) set_sp(x uintptr) { c.spr = uint32(x) } +func (c *context) set_lr(x uintptr) { c.lrr = uint32(x) } + +// arm does not have frame pointer register. +func (c *context) set_fp(x uintptr) {} + +func prepareContextForSigResume(c *context) { + c.r0 = c.spr + c.r1 = c.pc +} + +func dumpregs(r *context) { + print("r0 ", hex(r.r0), "\n") + print("r1 ", hex(r.r1), "\n") + print("r2 ", hex(r.r2), "\n") + print("r3 ", hex(r.r3), "\n") + print("r4 ", hex(r.r4), "\n") + print("r5 ", hex(r.r5), "\n") + print("r6 ", hex(r.r6), "\n") + print("r7 ", hex(r.r7), "\n") + print("r8 ", hex(r.r8), "\n") + print("r9 ", hex(r.r9), "\n") + print("r10 ", hex(r.r10), "\n") + print("r11 ", hex(r.r11), "\n") + print("r12 ", hex(r.r12), "\n") + print("sp ", hex(r.spr), "\n") + print("lr ", hex(r.lrr), "\n") + print("pc ", hex(r.pc), "\n") + print("cpsr ", hex(r.cpsr), "\n") +} + +func stackcheck() { + // TODO: not implemented on ARM +} + +type _DISPATCHER_CONTEXT struct { + controlPc uint32 + imageBase uint32 + functionEntry uintptr + establisherFrame uint32 + targetIp uint32 + context *context + languageHandler uintptr + handlerData uintptr +} + +func (c *_DISPATCHER_CONTEXT) ctx() *context { + return c.context +} diff --git a/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm64.go b/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..70e28d2ae2875ebe039983c524a58532d1ab014a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/defs_windows_arm64.go @@ -0,0 +1,104 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// NOTE(rsc): _CONTEXT_CONTROL is actually 0x400001 and should include PC, SP, and LR. +// However, empirically, LR doesn't come along on Windows 10 +// unless you also set _CONTEXT_INTEGER (0x400002). +// Without LR, we skip over the next-to-bottom function in profiles +// when the bottom function is frameless. +// So we set both here, to make a working _CONTEXT_CONTROL. +const _CONTEXT_CONTROL = 0x400003 + +type neon128 struct { + low uint64 + high int64 +} + +// See https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-arm64_nt_context +type context struct { + contextflags uint32 + cpsr uint32 + x [31]uint64 // fp is x[29], lr is x[30] + xsp uint64 + pc uint64 + v [32]neon128 + fpcr uint32 + fpsr uint32 + bcr [8]uint32 + bvr [8]uint64 + wcr [2]uint32 + wvr [2]uint64 +} + +func (c *context) ip() uintptr { return uintptr(c.pc) } +func (c *context) sp() uintptr { return uintptr(c.xsp) } +func (c *context) lr() uintptr { return uintptr(c.x[30]) } + +func (c *context) set_ip(x uintptr) { c.pc = uint64(x) } +func (c *context) set_sp(x uintptr) { c.xsp = uint64(x) } +func (c *context) set_lr(x uintptr) { c.x[30] = uint64(x) } +func (c *context) set_fp(x uintptr) { c.x[29] = uint64(x) } + +func prepareContextForSigResume(c *context) { + c.x[0] = c.xsp + c.x[1] = c.pc +} + +func dumpregs(r *context) { + print("r0 ", hex(r.x[0]), "\n") + print("r1 ", hex(r.x[1]), "\n") + print("r2 ", hex(r.x[2]), "\n") + print("r3 ", hex(r.x[3]), "\n") + print("r4 ", hex(r.x[4]), "\n") + print("r5 ", hex(r.x[5]), "\n") + print("r6 ", hex(r.x[6]), "\n") + print("r7 ", hex(r.x[7]), "\n") + print("r8 ", hex(r.x[8]), "\n") + print("r9 ", hex(r.x[9]), "\n") + print("r10 ", hex(r.x[10]), "\n") + print("r11 ", hex(r.x[11]), "\n") + print("r12 ", hex(r.x[12]), "\n") + print("r13 ", hex(r.x[13]), "\n") + print("r14 ", hex(r.x[14]), "\n") + print("r15 ", hex(r.x[15]), "\n") + print("r16 ", hex(r.x[16]), "\n") + print("r17 ", hex(r.x[17]), "\n") + print("r18 ", hex(r.x[18]), "\n") + print("r19 ", hex(r.x[19]), "\n") + print("r20 ", hex(r.x[20]), "\n") + print("r21 ", hex(r.x[21]), "\n") + print("r22 ", hex(r.x[22]), "\n") + print("r23 ", hex(r.x[23]), "\n") + print("r24 ", hex(r.x[24]), "\n") + print("r25 ", hex(r.x[25]), "\n") + print("r26 ", hex(r.x[26]), "\n") + print("r27 ", hex(r.x[27]), "\n") + print("r28 ", hex(r.x[28]), "\n") + print("r29 ", hex(r.x[29]), "\n") + print("lr ", hex(r.x[30]), "\n") + print("sp ", hex(r.xsp), "\n") + print("pc ", hex(r.pc), "\n") + print("cpsr ", hex(r.cpsr), "\n") +} + +func stackcheck() { + // TODO: not implemented on ARM +} + +type _DISPATCHER_CONTEXT struct { + controlPc uint64 + imageBase uint64 + functionEntry uintptr + establisherFrame uint64 + targetIp uint64 + context *context + languageHandler uintptr + handlerData uintptr +} + +func (c *_DISPATCHER_CONTEXT) ctx() *context { + return c.context +} diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_386.s b/platform/dbops/binaries/go/go/src/runtime/duff_386.s new file mode 100644 index 0000000000000000000000000000000000000000..ab01430dd21fb7debaded9c3dd035564a41ffc9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_386.s @@ -0,0 +1,779 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT, $0-0 + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + STOSL + RET + +TEXT runtime·duffcopy(SB), NOSPLIT, $0-0 + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + MOVL (SI), CX + ADDL $4, SI + MOVL CX, (DI) + ADDL $4, DI + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_amd64.s b/platform/dbops/binaries/go/go/src/runtime/duff_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..69e9980a30e7acc943634772c8bfb172cecd37b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_amd64.s @@ -0,0 +1,427 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) + LEAQ 64(DI),DI + + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + MOVUPS (SI), X0 + ADDQ $16, SI + MOVUPS X0, (DI) + ADDQ $16, DI + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_arm.s b/platform/dbops/binaries/go/go/src/runtime/duff_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..ba8235b7409a7b3a27ea8c74fdb4700025196385 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_arm.s @@ -0,0 +1,523 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT, $0-0 + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + MOVW.P R0, 4(R1) + RET + +TEXT runtime·duffcopy(SB), NOSPLIT, $0-0 + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + MOVW.P 4(R1), R0 + MOVW.P R0, 4(R2) + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_arm64.s b/platform/dbops/binaries/go/go/src/runtime/duff_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..33c4905078d242502ac00743a8b705f3ce16e23b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_arm64.s @@ -0,0 +1,267 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP.P (ZR, ZR), 16(R20) + STP (ZR, ZR), (R20) + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + LDP.P 16(R20), (R26, R27) + STP.P (R26, R27), 16(R21) + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_loong64.s b/platform/dbops/binaries/go/go/src/runtime/duff_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..b05502d91db2ef5a0bd7fa845f22427b8eed871d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_loong64.s @@ -0,0 +1,907 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + MOVV (R20), R30 + ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_mips64x.s b/platform/dbops/binaries/go/go/src/runtime/duff_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..3a8524c78bfe1ac0265796f22276a79b703b9168 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_mips64x.s @@ -0,0 +1,909 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +//go:build mips64 || mips64le + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + MOVV R0, 8(R1) + ADDV $8, R1 + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + MOVV (R1), R23 + ADDV $8, R1 + MOVV R23, (R2) + ADDV $8, R2 + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_ppc64x.s b/platform/dbops/binaries/go/go/src/runtime/duff_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..a3caaa881728a05d33c2cf64e23fdeea67823e34 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_ppc64x.s @@ -0,0 +1,397 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + MOVDU R0, 8(R20) + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + MOVDU 8(R20), R5 + MOVDU R5, 8(R21) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/duff_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..ec447677ad0c17645e7f0099ed7228876526a771 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_riscv64.s @@ -0,0 +1,907 @@ +// Code generated by mkduff.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkduff.go for comments. + +#include "textflag.h" + +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + MOV ZERO, (X25) + ADD $8, X25 + RET + +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + MOV (X24), X31 + ADD $8, X24 + MOV X31, (X25) + ADD $8, X25 + + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/duff_s390x.s b/platform/dbops/binaries/go/go/src/runtime/duff_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..95d492a879460027e3ad2c63ab9dfb9509292603 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/duff_s390x.s @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// s390x can copy/zero 1-256 bytes with a single instruction, +// so there's no need for these, except to satisfy the prototypes +// in stubs.go. + +TEXT runtime·duffzero(SB),NOSPLIT|NOFRAME,$0-0 + MOVD $0, 2(R0) + RET + +TEXT runtime·duffcopy(SB),NOSPLIT|NOFRAME,$0-0 + MOVD $0, 2(R0) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/ehooks_test.go b/platform/dbops/binaries/go/go/src/runtime/ehooks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ee286ecb9a76adcef6a6340adf8a0e19cec3a3fc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/ehooks_test.go @@ -0,0 +1,91 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/platform" + "internal/testenv" + "os/exec" + "runtime" + "strings" + "testing" +) + +func TestExitHooks(t *testing.T) { + bmodes := []string{""} + if testing.Short() { + t.Skip("skipping due to -short") + } + // Note the HasCGO() test below; this is to prevent the test + // running if CGO_ENABLED=0 is in effect. + haverace := platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) + if haverace && testenv.HasCGO() { + bmodes = append(bmodes, "-race") + } + for _, bmode := range bmodes { + scenarios := []struct { + mode string + expected string + musthave string + }{ + { + mode: "simple", + expected: "bar foo", + musthave: "", + }, + { + mode: "goodexit", + expected: "orange apple", + musthave: "", + }, + { + mode: "badexit", + expected: "blub blix", + musthave: "", + }, + { + mode: "panics", + expected: "", + musthave: "fatal error: internal error: exit hook invoked panic", + }, + { + mode: "callsexit", + expected: "", + musthave: "fatal error: internal error: exit hook invoked exit", + }, + } + + exe, err := buildTestProg(t, "testexithooks", bmode) + if err != nil { + t.Fatal(err) + } + + bt := "" + if bmode != "" { + bt = " bmode: " + bmode + } + for _, s := range scenarios { + cmd := exec.Command(exe, []string{"-mode", s.mode}...) + out, _ := cmd.CombinedOutput() + outs := strings.ReplaceAll(string(out), "\n", " ") + outs = strings.TrimSpace(outs) + if s.expected != "" { + if s.expected != outs { + t.Logf("raw output: %q", outs) + t.Errorf("failed%s mode %s: wanted %q got %q", bt, + s.mode, s.expected, outs) + } + } else if s.musthave != "" { + if !strings.Contains(outs, s.musthave) { + t.Logf("raw output: %q", outs) + t.Errorf("failed mode %s: output does not contain %q", + s.mode, s.musthave) + } + } else { + panic("badly written scenario") + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/env_plan9.go b/platform/dbops/binaries/go/go/src/runtime/env_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..d206c5dbba4b1fcf1e3745f78cd9f215aac17993 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/env_plan9.go @@ -0,0 +1,126 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +const ( + // Plan 9 environment device + envDir = "/env/" + // size of buffer to read from a directory + dirBufSize = 4096 + // size of buffer to read an environment variable (may grow) + envBufSize = 128 + // offset of the name field in a 9P directory entry - see syscall.UnmarshalDir() + nameOffset = 39 +) + +// goenvs caches the Plan 9 environment variables at start of execution into +// string array envs, to supply the initial contents for os.Environ. +// Subsequent calls to os.Setenv will change this cache, without writing back +// to the (possibly shared) Plan 9 environment, so that Setenv and Getenv +// conform to the same Posix semantics as on other operating systems. +// For Plan 9 shared environment semantics, instead of Getenv(key) and +// Setenv(key, value), one can use os.ReadFile("/env/" + key) and +// os.WriteFile("/env/" + key, value, 0666) respectively. +// +//go:nosplit +func goenvs() { + buf := make([]byte, envBufSize) + copy(buf, envDir) + dirfd := open(&buf[0], _OREAD, 0) + if dirfd < 0 { + return + } + defer closefd(dirfd) + dofiles(dirfd, func(name []byte) { + name = append(name, 0) + buf = buf[:len(envDir)] + copy(buf, envDir) + buf = append(buf, name...) + fd := open(&buf[0], _OREAD, 0) + if fd < 0 { + return + } + defer closefd(fd) + n := len(buf) + r := 0 + for { + r = int(pread(fd, unsafe.Pointer(&buf[0]), int32(n), 0)) + if r < n { + break + } + n = int(seek(fd, 0, 2)) + 1 + if len(buf) < n { + buf = make([]byte, n) + } + } + if r <= 0 { + r = 0 + } else if buf[r-1] == 0 { + r-- + } + name[len(name)-1] = '=' + env := make([]byte, len(name)+r) + copy(env, name) + copy(env[len(name):], buf[:r]) + envs = append(envs, string(env)) + }) +} + +// dofiles reads the directory opened with file descriptor fd, applying function f +// to each filename in it. +// +//go:nosplit +func dofiles(dirfd int32, f func([]byte)) { + dirbuf := new([dirBufSize]byte) + + var off int64 = 0 + for { + n := pread(dirfd, unsafe.Pointer(&dirbuf[0]), int32(dirBufSize), off) + if n <= 0 { + return + } + for b := dirbuf[:n]; len(b) > 0; { + var name []byte + name, b = gdirname(b) + if name == nil { + return + } + f(name) + } + off += int64(n) + } +} + +// gdirname returns the first filename from a buffer of directory entries, +// and a slice containing the remaining directory entries. +// If the buffer doesn't start with a valid directory entry, the returned name is nil. +// +//go:nosplit +func gdirname(buf []byte) (name []byte, rest []byte) { + if 2+nameOffset+2 > len(buf) { + return + } + entryLen, buf := gbit16(buf) + if entryLen > len(buf) { + return + } + n, b := gbit16(buf[nameOffset:]) + if n > len(b) { + return + } + name = b[:n] + rest = buf[entryLen:] + return +} + +// gbit16 reads a 16-bit little-endian binary number from b and returns it +// with the remaining slice of b. +// +//go:nosplit +func gbit16(b []byte) (int, []byte) { + return int(b[0]) | int(b[1])<<8, b[2:] +} diff --git a/platform/dbops/binaries/go/go/src/runtime/env_posix.go b/platform/dbops/binaries/go/go/src/runtime/env_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..0eb4f0d7a3b8b25c3201d66376ed7f5564ee5456 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/env_posix.go @@ -0,0 +1,70 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func gogetenv(key string) string { + env := environ() + if env == nil { + throw("getenv before env init") + } + for _, s := range env { + if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) { + return s[len(key)+1:] + } + } + return "" +} + +// envKeyEqual reports whether a == b, with ASCII-only case insensitivity +// on Windows. The two strings must have the same length. +func envKeyEqual(a, b string) bool { + if GOOS == "windows" { // case insensitive + for i := 0; i < len(a); i++ { + ca, cb := a[i], b[i] + if ca == cb || lowerASCII(ca) == lowerASCII(cb) { + continue + } + return false + } + return true + } + return a == b +} + +func lowerASCII(c byte) byte { + if 'A' <= c && c <= 'Z' { + return c + ('a' - 'A') + } + return c +} + +var _cgo_setenv unsafe.Pointer // pointer to C function +var _cgo_unsetenv unsafe.Pointer // pointer to C function + +// Update the C environment if cgo is loaded. +func setenv_c(k string, v string) { + if _cgo_setenv == nil { + return + } + arg := [2]unsafe.Pointer{cstring(k), cstring(v)} + asmcgocall(_cgo_setenv, unsafe.Pointer(&arg)) +} + +// Update the C environment if cgo is loaded. +func unsetenv_c(k string) { + if _cgo_unsetenv == nil { + return + } + arg := [1]unsafe.Pointer{cstring(k)} + asmcgocall(_cgo_unsetenv, unsafe.Pointer(&arg)) +} + +func cstring(s string) unsafe.Pointer { + p := make([]byte, len(s)+1) + copy(p, s) + return unsafe.Pointer(&p[0]) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/env_test.go b/platform/dbops/binaries/go/go/src/runtime/env_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c009d0f31e1758ce725124fad1b5e58387ccb392 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/env_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "syscall" + "testing" +) + +func TestFixedGOROOT(t *testing.T) { + // Restore both the real GOROOT environment variable, and runtime's copies: + if orig, ok := syscall.Getenv("GOROOT"); ok { + defer syscall.Setenv("GOROOT", orig) + } else { + defer syscall.Unsetenv("GOROOT") + } + envs := runtime.Envs() + oldenvs := append([]string{}, envs...) + defer runtime.SetEnvs(oldenvs) + + // attempt to reuse existing envs backing array. + want := runtime.GOROOT() + runtime.SetEnvs(append(envs[:0], "GOROOT="+want)) + + if got := runtime.GOROOT(); got != want { + t.Errorf(`initial runtime.GOROOT()=%q, want %q`, got, want) + } + if err := syscall.Setenv("GOROOT", "/os"); err != nil { + t.Fatal(err) + } + if got := runtime.GOROOT(); got != want { + t.Errorf(`after setenv runtime.GOROOT()=%q, want %q`, got, want) + } + if err := syscall.Unsetenv("GOROOT"); err != nil { + t.Fatal(err) + } + if got := runtime.GOROOT(); got != want { + t.Errorf(`after unsetenv runtime.GOROOT()=%q, want %q`, got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/error.go b/platform/dbops/binaries/go/go/src/runtime/error.go new file mode 100644 index 0000000000000000000000000000000000000000..b507f25e185d661789d3531ec64c9a5659f7ce47 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/error.go @@ -0,0 +1,330 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "internal/bytealg" + +// The Error interface identifies a run time error. +type Error interface { + error + + // RuntimeError is a no-op function but + // serves to distinguish types that are run time + // errors from ordinary errors: a type is a + // run time error if it has a RuntimeError method. + RuntimeError() +} + +// A TypeAssertionError explains a failed type assertion. +type TypeAssertionError struct { + _interface *_type + concrete *_type + asserted *_type + missingMethod string // one method needed by Interface, missing from Concrete +} + +func (*TypeAssertionError) RuntimeError() {} + +func (e *TypeAssertionError) Error() string { + inter := "interface" + if e._interface != nil { + inter = toRType(e._interface).string() + } + as := toRType(e.asserted).string() + if e.concrete == nil { + return "interface conversion: " + inter + " is nil, not " + as + } + cs := toRType(e.concrete).string() + if e.missingMethod == "" { + msg := "interface conversion: " + inter + " is " + cs + ", not " + as + if cs == as { + // provide slightly clearer error message + if toRType(e.concrete).pkgpath() != toRType(e.asserted).pkgpath() { + msg += " (types from different packages)" + } else { + msg += " (types from different scopes)" + } + } + return msg + } + return "interface conversion: " + cs + " is not " + as + + ": missing method " + e.missingMethod +} + +// itoa converts val to a decimal representation. The result is +// written somewhere within buf and the location of the result is returned. +// buf must be at least 20 bytes. +// +//go:nosplit +func itoa(buf []byte, val uint64) []byte { + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return buf[i:] +} + +// An errorString represents a runtime error described by a single string. +type errorString string + +func (e errorString) RuntimeError() {} + +func (e errorString) Error() string { + return "runtime error: " + string(e) +} + +type errorAddressString struct { + msg string // error message + addr uintptr // memory address where the error occurred +} + +func (e errorAddressString) RuntimeError() {} + +func (e errorAddressString) Error() string { + return "runtime error: " + e.msg +} + +// Addr returns the memory address where a fault occurred. +// The address provided is best-effort. +// The veracity of the result may depend on the platform. +// Errors providing this method will only be returned as +// a result of using [runtime/debug.SetPanicOnFault]. +func (e errorAddressString) Addr() uintptr { + return e.addr +} + +// plainError represents a runtime error described a string without +// the prefix "runtime error: " after invoking errorString.Error(). +// See Issue #14965. +type plainError string + +func (e plainError) RuntimeError() {} + +func (e plainError) Error() string { + return string(e) +} + +// A boundsError represents an indexing or slicing operation gone wrong. +type boundsError struct { + x int64 + y int + // Values in an index or slice expression can be signed or unsigned. + // That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1. + // Instead, we keep track of whether x should be interpreted as signed or unsigned. + // y is known to be nonnegative and to fit in an int. + signed bool + code boundsErrorCode +} + +type boundsErrorCode uint8 + +const ( + boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed + + boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed + boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed + boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen) + + boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed + boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed + boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen) + boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen) + + boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed + // Note: in the above, len(s) and cap(s) are stored in y +) + +// boundsErrorFmts provide error text for various out-of-bounds panics. +// Note: if you change these strings, you should adjust the size of the buffer +// in boundsError.Error below as well. +var boundsErrorFmts = [...]string{ + boundsIndex: "index out of range [%x] with length %y", + boundsSliceAlen: "slice bounds out of range [:%x] with length %y", + boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y", + boundsSliceB: "slice bounds out of range [%x:%y]", + boundsSlice3Alen: "slice bounds out of range [::%x] with length %y", + boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y", + boundsSlice3B: "slice bounds out of range [:%x:%y]", + boundsSlice3C: "slice bounds out of range [%x:%y:]", + boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x", +} + +// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y. +var boundsNegErrorFmts = [...]string{ + boundsIndex: "index out of range [%x]", + boundsSliceAlen: "slice bounds out of range [:%x]", + boundsSliceAcap: "slice bounds out of range [:%x]", + boundsSliceB: "slice bounds out of range [%x:]", + boundsSlice3Alen: "slice bounds out of range [::%x]", + boundsSlice3Acap: "slice bounds out of range [::%x]", + boundsSlice3B: "slice bounds out of range [:%x:]", + boundsSlice3C: "slice bounds out of range [%x::]", +} + +func (e boundsError) RuntimeError() {} + +func appendIntStr(b []byte, v int64, signed bool) []byte { + if signed && v < 0 { + b = append(b, '-') + v = -v + } + var buf [20]byte + b = append(b, itoa(buf[:], uint64(v))...) + return b +} + +func (e boundsError) Error() string { + fmt := boundsErrorFmts[e.code] + if e.signed && e.x < 0 { + fmt = boundsNegErrorFmts[e.code] + } + // max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y" + // x can be at most 20 characters. y can be at most 19. + b := make([]byte, 0, 100) + b = append(b, "runtime error: "...) + for i := 0; i < len(fmt); i++ { + c := fmt[i] + if c != '%' { + b = append(b, c) + continue + } + i++ + switch fmt[i] { + case 'x': + b = appendIntStr(b, e.x, e.signed) + case 'y': + b = appendIntStr(b, int64(e.y), true) + } + } + return string(b) +} + +type stringer interface { + String() string +} + +// printany prints an argument passed to panic. +// If panic is called with a value that has a String or Error method, +// it has already been converted into a string by preprintpanics. +func printany(i any) { + switch v := i.(type) { + case nil: + print("nil") + case bool: + print(v) + case int: + print(v) + case int8: + print(v) + case int16: + print(v) + case int32: + print(v) + case int64: + print(v) + case uint: + print(v) + case uint8: + print(v) + case uint16: + print(v) + case uint32: + print(v) + case uint64: + print(v) + case uintptr: + print(v) + case float32: + print(v) + case float64: + print(v) + case complex64: + print(v) + case complex128: + print(v) + case string: + print(v) + default: + printanycustomtype(i) + } +} + +func printanycustomtype(i any) { + eface := efaceOf(&i) + typestring := toRType(eface._type).string() + + switch eface._type.Kind_ { + case kindString: + print(typestring, `("`, *(*string)(eface.data), `")`) + case kindBool: + print(typestring, "(", *(*bool)(eface.data), ")") + case kindInt: + print(typestring, "(", *(*int)(eface.data), ")") + case kindInt8: + print(typestring, "(", *(*int8)(eface.data), ")") + case kindInt16: + print(typestring, "(", *(*int16)(eface.data), ")") + case kindInt32: + print(typestring, "(", *(*int32)(eface.data), ")") + case kindInt64: + print(typestring, "(", *(*int64)(eface.data), ")") + case kindUint: + print(typestring, "(", *(*uint)(eface.data), ")") + case kindUint8: + print(typestring, "(", *(*uint8)(eface.data), ")") + case kindUint16: + print(typestring, "(", *(*uint16)(eface.data), ")") + case kindUint32: + print(typestring, "(", *(*uint32)(eface.data), ")") + case kindUint64: + print(typestring, "(", *(*uint64)(eface.data), ")") + case kindUintptr: + print(typestring, "(", *(*uintptr)(eface.data), ")") + case kindFloat32: + print(typestring, "(", *(*float32)(eface.data), ")") + case kindFloat64: + print(typestring, "(", *(*float64)(eface.data), ")") + case kindComplex64: + print(typestring, *(*complex64)(eface.data)) + case kindComplex128: + print(typestring, *(*complex128)(eface.data)) + default: + print("(", typestring, ") ", eface.data) + } +} + +// panicwrap generates a panic for a call to a wrapped value method +// with a nil pointer receiver. +// +// It is called from the generated wrapper code. +func panicwrap() { + pc := getcallerpc() + name := funcNameForPrint(funcname(findfunc(pc))) + // name is something like "main.(*T).F". + // We want to extract pkg ("main"), typ ("T"), and meth ("F"). + // Do it by finding the parens. + i := bytealg.IndexByteString(name, '(') + if i < 0 { + throw("panicwrap: no ( in " + name) + } + pkg := name[:i-1] + if i+2 >= len(name) || name[i-1:i+2] != ".(*" { + throw("panicwrap: unexpected string after package name: " + name) + } + name = name[i+2:] + i = bytealg.IndexByteString(name, ')') + if i < 0 { + throw("panicwrap: no ) in " + name) + } + if i+2 >= len(name) || name[i:i+2] != ")." { + throw("panicwrap: unexpected string after type name: " + name) + } + typ := name[:i] + meth := name[i+2:] + panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer")) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/example_test.go b/platform/dbops/binaries/go/go/src/runtime/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dcb8f7798e290581eab6858bbcb348966b0cd810 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/example_test.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "runtime" + "strings" +) + +func ExampleFrames() { + c := func() { + // Ask runtime.Callers for up to 10 PCs, including runtime.Callers itself. + pc := make([]uintptr, 10) + n := runtime.Callers(0, pc) + if n == 0 { + // No PCs available. This can happen if the first argument to + // runtime.Callers is large. + // + // Return now to avoid processing the zero Frame that would + // otherwise be returned by frames.Next below. + return + } + + pc = pc[:n] // pass only valid pcs to runtime.CallersFrames + frames := runtime.CallersFrames(pc) + + // Loop to get frames. + // A fixed number of PCs can expand to an indefinite number of Frames. + for { + frame, more := frames.Next() + + // Process this frame. + // + // To keep this example's output stable + // even if there are changes in the testing package, + // stop unwinding when we leave package runtime. + if !strings.Contains(frame.File, "runtime/") { + break + } + fmt.Printf("- more:%v | %s\n", more, frame.Function) + + // Check whether there are more frames to process after this one. + if !more { + break + } + } + } + + b := func() { c() } + a := func() { b() } + + a() + // Output: + // - more:true | runtime.Callers + // - more:true | runtime_test.ExampleFrames.func1 + // - more:true | runtime_test.ExampleFrames.func2 + // - more:true | runtime_test.ExampleFrames.func3 + // - more:true | runtime_test.ExampleFrames +} diff --git a/platform/dbops/binaries/go/go/src/runtime/exithook.go b/platform/dbops/binaries/go/go/src/runtime/exithook.go new file mode 100644 index 0000000000000000000000000000000000000000..65b426b383f8d6ee37538abf01a394a2243a8d70 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/exithook.go @@ -0,0 +1,69 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// addExitHook registers the specified function 'f' to be run at +// program termination (e.g. when someone invokes os.Exit(), or when +// main.main returns). Hooks are run in reverse order of registration: +// first hook added is the last one run. +// +// CAREFUL: the expectation is that addExitHook should only be called +// from a safe context (e.g. not an error/panic path or signal +// handler, preemption enabled, allocation allowed, write barriers +// allowed, etc), and that the exit function 'f' will be invoked under +// similar circumstances. That is the say, we are expecting that 'f' +// uses normal / high-level Go code as opposed to one of the more +// restricted dialects used for the trickier parts of the runtime. +func addExitHook(f func(), runOnNonZeroExit bool) { + exitHooks.hooks = append(exitHooks.hooks, exitHook{f: f, runOnNonZeroExit: runOnNonZeroExit}) +} + +// exitHook stores a function to be run on program exit, registered +// by the utility runtime.addExitHook. +type exitHook struct { + f func() // func to run + runOnNonZeroExit bool // whether to run on non-zero exit code +} + +// exitHooks stores state related to hook functions registered to +// run when program execution terminates. +var exitHooks struct { + hooks []exitHook + runningExitHooks bool +} + +// runExitHooks runs any registered exit hook functions (funcs +// previously registered using runtime.addExitHook). Here 'exitCode' +// is the status code being passed to os.Exit, or zero if the program +// is terminating normally without calling os.Exit. +func runExitHooks(exitCode int) { + if exitHooks.runningExitHooks { + throw("internal error: exit hook invoked exit") + } + exitHooks.runningExitHooks = true + + runExitHook := func(f func()) (caughtPanic bool) { + defer func() { + if x := recover(); x != nil { + caughtPanic = true + } + }() + f() + return + } + + finishPageTrace() + for i := range exitHooks.hooks { + h := exitHooks.hooks[len(exitHooks.hooks)-i-1] + if exitCode != 0 && !h.runOnNonZeroExit { + continue + } + if caughtPanic := runExitHook(h.f); caughtPanic { + throw("internal error: exit hook invoked panic") + } + } + exitHooks.hooks = nil + exitHooks.runningExitHooks = false +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_aix_test.go b/platform/dbops/binaries/go/go/src/runtime/export_aix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..48455333c146d6e430bf3cd7af287b0411b28669 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_aix_test.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var SetNonblock = setNonblock diff --git a/platform/dbops/binaries/go/go/src/runtime/export_arm_test.go b/platform/dbops/binaries/go/go/src/runtime/export_arm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b8a89fc0d2217760cd81a5f5f3687dfa54e83082 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_arm_test.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing. + +package runtime + +var Usplit = usplit diff --git a/platform/dbops/binaries/go/go/src/runtime/export_darwin_test.go b/platform/dbops/binaries/go/go/src/runtime/export_darwin_test.go new file mode 100644 index 0000000000000000000000000000000000000000..48455333c146d6e430bf3cd7af287b0411b28669 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_darwin_test.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var SetNonblock = setNonblock diff --git a/platform/dbops/binaries/go/go/src/runtime/export_debug_amd64_test.go b/platform/dbops/binaries/go/go/src/runtime/export_debug_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f9908cd494595cebaebaf430bce6f6aa7a3027b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_debug_amd64_test.go @@ -0,0 +1,132 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && linux + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +type sigContext struct { + savedRegs sigcontext + // sigcontext.fpstate is a pointer, so we need to save + // the its value with a fpstate1 structure. + savedFP fpstate1 +} + +func sigctxtSetContextRegister(ctxt *sigctxt, x uint64) { + ctxt.regs().rdx = x +} + +func sigctxtAtTrapInstruction(ctxt *sigctxt) bool { + return *(*byte)(unsafe.Pointer(uintptr(ctxt.rip() - 1))) == 0xcc // INT 3 +} + +func sigctxtStatus(ctxt *sigctxt) uint64 { + return ctxt.r12() +} + +func (h *debugCallHandler) saveSigContext(ctxt *sigctxt) { + // Push current PC on the stack. + rsp := ctxt.rsp() - goarch.PtrSize + *(*uint64)(unsafe.Pointer(uintptr(rsp))) = ctxt.rip() + ctxt.set_rsp(rsp) + // Write the argument frame size. + *(*uintptr)(unsafe.Pointer(uintptr(rsp - 16))) = h.argSize + // Save current registers. + h.sigCtxt.savedRegs = *ctxt.regs() + h.sigCtxt.savedFP = *h.sigCtxt.savedRegs.fpstate + h.sigCtxt.savedRegs.fpstate = nil +} + +// case 0 +func (h *debugCallHandler) debugCallRun(ctxt *sigctxt) { + rsp := ctxt.rsp() + memmove(unsafe.Pointer(uintptr(rsp)), h.argp, h.argSize) + if h.regArgs != nil { + storeRegArgs(ctxt.regs(), h.regArgs) + } + // Push return PC. + rsp -= goarch.PtrSize + ctxt.set_rsp(rsp) + // The signal PC is the next PC of the trap instruction. + *(*uint64)(unsafe.Pointer(uintptr(rsp))) = ctxt.rip() + // Set PC to call and context register. + ctxt.set_rip(uint64(h.fv.fn)) + sigctxtSetContextRegister(ctxt, uint64(uintptr(unsafe.Pointer(h.fv)))) +} + +// case 1 +func (h *debugCallHandler) debugCallReturn(ctxt *sigctxt) { + rsp := ctxt.rsp() + memmove(h.argp, unsafe.Pointer(uintptr(rsp)), h.argSize) + if h.regArgs != nil { + loadRegArgs(h.regArgs, ctxt.regs()) + } +} + +// case 2 +func (h *debugCallHandler) debugCallPanicOut(ctxt *sigctxt) { + rsp := ctxt.rsp() + memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(rsp)), 2*goarch.PtrSize) +} + +// case 8 +func (h *debugCallHandler) debugCallUnsafe(ctxt *sigctxt) { + rsp := ctxt.rsp() + reason := *(*string)(unsafe.Pointer(uintptr(rsp))) + h.err = plainError(reason) +} + +// case 16 +func (h *debugCallHandler) restoreSigContext(ctxt *sigctxt) { + // Restore all registers except RIP and RSP. + rip, rsp := ctxt.rip(), ctxt.rsp() + fp := ctxt.regs().fpstate + *ctxt.regs() = h.sigCtxt.savedRegs + ctxt.regs().fpstate = fp + *fp = h.sigCtxt.savedFP + ctxt.set_rip(rip) + ctxt.set_rsp(rsp) +} + +// storeRegArgs sets up argument registers in the signal +// context state from an abi.RegArgs. +// +// Both src and dst must be non-nil. +func storeRegArgs(dst *sigcontext, src *abi.RegArgs) { + dst.rax = uint64(src.Ints[0]) + dst.rbx = uint64(src.Ints[1]) + dst.rcx = uint64(src.Ints[2]) + dst.rdi = uint64(src.Ints[3]) + dst.rsi = uint64(src.Ints[4]) + dst.r8 = uint64(src.Ints[5]) + dst.r9 = uint64(src.Ints[6]) + dst.r10 = uint64(src.Ints[7]) + dst.r11 = uint64(src.Ints[8]) + for i := range src.Floats { + dst.fpstate._xmm[i].element[0] = uint32(src.Floats[i] >> 0) + dst.fpstate._xmm[i].element[1] = uint32(src.Floats[i] >> 32) + } +} + +func loadRegArgs(dst *abi.RegArgs, src *sigcontext) { + dst.Ints[0] = uintptr(src.rax) + dst.Ints[1] = uintptr(src.rbx) + dst.Ints[2] = uintptr(src.rcx) + dst.Ints[3] = uintptr(src.rdi) + dst.Ints[4] = uintptr(src.rsi) + dst.Ints[5] = uintptr(src.r8) + dst.Ints[6] = uintptr(src.r9) + dst.Ints[7] = uintptr(src.r10) + dst.Ints[8] = uintptr(src.r11) + for i := range dst.Floats { + dst.Floats[i] = uint64(src.fpstate._xmm[i].element[0]) << 0 + dst.Floats[i] |= uint64(src.fpstate._xmm[i].element[1]) << 32 + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_debug_arm64_test.go b/platform/dbops/binaries/go/go/src/runtime/export_debug_arm64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ee902414afe92afc27eb8ae77f887975d997f884 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_debug_arm64_test.go @@ -0,0 +1,135 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && linux + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +type sigContext struct { + savedRegs sigcontext +} + +func sigctxtSetContextRegister(ctxt *sigctxt, x uint64) { + ctxt.regs().regs[26] = x +} + +func sigctxtAtTrapInstruction(ctxt *sigctxt) bool { + return *(*uint32)(unsafe.Pointer(ctxt.sigpc())) == 0xd4200000 // BRK 0 +} + +func sigctxtStatus(ctxt *sigctxt) uint64 { + return ctxt.r20() +} + +func (h *debugCallHandler) saveSigContext(ctxt *sigctxt) { + sp := ctxt.sp() + sp -= 2 * goarch.PtrSize + ctxt.set_sp(sp) + *(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.lr() // save the current lr + ctxt.set_lr(ctxt.pc()) // set new lr to the current pc + // Write the argument frame size. + *(*uintptr)(unsafe.Pointer(uintptr(sp - 16))) = h.argSize + // Save current registers. + h.sigCtxt.savedRegs = *ctxt.regs() +} + +// case 0 +func (h *debugCallHandler) debugCallRun(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(unsafe.Pointer(uintptr(sp)+8), h.argp, h.argSize) + if h.regArgs != nil { + storeRegArgs(ctxt.regs(), h.regArgs) + } + // Push return PC, which should be the signal PC+4, because + // the signal PC is the PC of the trap instruction itself. + ctxt.set_lr(ctxt.pc() + 4) + // Set PC to call and context register. + ctxt.set_pc(uint64(h.fv.fn)) + sigctxtSetContextRegister(ctxt, uint64(uintptr(unsafe.Pointer(h.fv)))) +} + +// case 1 +func (h *debugCallHandler) debugCallReturn(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(h.argp, unsafe.Pointer(uintptr(sp)+8), h.argSize) + if h.regArgs != nil { + loadRegArgs(h.regArgs, ctxt.regs()) + } + // Restore the old lr from *sp + olr := *(*uint64)(unsafe.Pointer(uintptr(sp))) + ctxt.set_lr(olr) + pc := ctxt.pc() + ctxt.set_pc(pc + 4) // step to next instruction +} + +// case 2 +func (h *debugCallHandler) debugCallPanicOut(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)+8), 2*goarch.PtrSize) + ctxt.set_pc(ctxt.pc() + 4) +} + +// case 8 +func (h *debugCallHandler) debugCallUnsafe(ctxt *sigctxt) { + sp := ctxt.sp() + reason := *(*string)(unsafe.Pointer(uintptr(sp) + 8)) + h.err = plainError(reason) + ctxt.set_pc(ctxt.pc() + 4) +} + +// case 16 +func (h *debugCallHandler) restoreSigContext(ctxt *sigctxt) { + // Restore all registers except for pc and sp + pc, sp := ctxt.pc(), ctxt.sp() + *ctxt.regs() = h.sigCtxt.savedRegs + ctxt.set_pc(pc + 4) + ctxt.set_sp(sp) +} + +// storeRegArgs sets up argument registers in the signal +// context state from an abi.RegArgs. +// +// Both src and dst must be non-nil. +func storeRegArgs(dst *sigcontext, src *abi.RegArgs) { + for i, r := range src.Ints { + dst.regs[i] = uint64(r) + } + for i, r := range src.Floats { + *(fpRegAddr(dst, i)) = r + } +} + +func loadRegArgs(dst *abi.RegArgs, src *sigcontext) { + for i := range dst.Ints { + dst.Ints[i] = uintptr(src.regs[i]) + } + for i := range dst.Floats { + dst.Floats[i] = *(fpRegAddr(src, i)) + } +} + +// fpRegAddr returns the address of the ith fp-simd register in sigcontext. +func fpRegAddr(dst *sigcontext, i int) *uint64 { + /* FP-SIMD registers are saved in sigcontext.__reserved, which is orgnized in + the following C structs: + struct fpsimd_context { + struct _aarch64_ctx head; + __u32 fpsr; + __u32 fpcr; + __uint128_t vregs[32]; + }; + struct _aarch64_ctx { + __u32 magic; + __u32 size; + }; + So the offset of the ith FP_SIMD register is 16+i*128. + */ + return (*uint64)(unsafe.Pointer(&dst.__reserved[16+i*128])) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_debug_ppc64le_test.go b/platform/dbops/binaries/go/go/src/runtime/export_debug_ppc64le_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd5dce56495945851df8234c5791127019119e7e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_debug_ppc64le_test.go @@ -0,0 +1,131 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64le && linux + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "math" + "unsafe" +) + +type sigContext struct { + savedRegs sigcontext +} + +func sigctxtSetContextRegister(ctxt *sigctxt, x uint64) { + ctxt.regs().gpr[11] = x +} + +func sigctxtAtTrapInstruction(ctxt *sigctxt) bool { + return *(*uint32)(unsafe.Pointer(ctxt.sigpc())) == 0x7fe00008 // Trap +} + +func sigctxtStatus(ctxt *sigctxt) uint64 { + return ctxt.r20() +} + +func (h *debugCallHandler) saveSigContext(ctxt *sigctxt) { + sp := ctxt.sp() + sp -= 4 * goarch.PtrSize + ctxt.set_sp(sp) + *(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.link() // save the current lr + ctxt.set_link(ctxt.pc()) // set new lr to the current pc + // Write the argument frame size. + *(*uintptr)(unsafe.Pointer(uintptr(sp - 32))) = h.argSize + // Save current registers. + h.sigCtxt.savedRegs = *ctxt.cregs() +} + +// case 0 +func (h *debugCallHandler) debugCallRun(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(unsafe.Pointer(uintptr(sp)+32), h.argp, h.argSize) + if h.regArgs != nil { + storeRegArgs(ctxt.cregs(), h.regArgs) + } + // Push return PC, which should be the signal PC+4, because + // the signal PC is the PC of the trap instruction itself. + ctxt.set_link(ctxt.pc() + 4) + // Set PC to call and context register. + ctxt.set_pc(uint64(h.fv.fn)) + sigctxtSetContextRegister(ctxt, uint64(uintptr(unsafe.Pointer(h.fv)))) +} + +// case 1 +func (h *debugCallHandler) debugCallReturn(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(h.argp, unsafe.Pointer(uintptr(sp)+32), h.argSize) + if h.regArgs != nil { + loadRegArgs(h.regArgs, ctxt.cregs()) + } + // Restore the old lr from *sp + olr := *(*uint64)(unsafe.Pointer(uintptr(sp))) + ctxt.set_link(olr) + pc := ctxt.pc() + ctxt.set_pc(pc + 4) // step to next instruction +} + +// case 2 +func (h *debugCallHandler) debugCallPanicOut(ctxt *sigctxt) { + sp := ctxt.sp() + memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)+32), 2*goarch.PtrSize) + ctxt.set_pc(ctxt.pc() + 4) +} + +// case 8 +func (h *debugCallHandler) debugCallUnsafe(ctxt *sigctxt) { + sp := ctxt.sp() + reason := *(*string)(unsafe.Pointer(uintptr(sp) + 40)) + h.err = plainError(reason) + ctxt.set_pc(ctxt.pc() + 4) +} + +// case 16 +func (h *debugCallHandler) restoreSigContext(ctxt *sigctxt) { + // Restore all registers except for pc and sp + pc, sp := ctxt.pc(), ctxt.sp() + *ctxt.cregs() = h.sigCtxt.savedRegs + ctxt.set_pc(pc + 4) + ctxt.set_sp(sp) +} + +// storeRegArgs sets up argument registers in the signal +// context state from an abi.RegArgs. +// +// Both src and dst must be non-nil. +func storeRegArgs(dst *sigcontext, src *abi.RegArgs) { + // Gprs R3..R10, R14..R17 are used to pass int arguments in registers on PPC64 + for i := 0; i < 12; i++ { + if i > 7 { + dst.gp_regs[i+6] = uint64(src.Ints[i]) + } else { + dst.gp_regs[i+3] = uint64(src.Ints[i]) + } + } + // Fprs F1..F13 are used to pass float arguments in registers on PPC64 + for i := 0; i < 12; i++ { + dst.fp_regs[i+1] = math.Float64frombits(src.Floats[i]) + } + +} + +func loadRegArgs(dst *abi.RegArgs, src *sigcontext) { + // Gprs R3..R10, R14..R17 are used to pass int arguments in registers on PPC64 + for i := range [12]int{} { + if i > 7 { + dst.Ints[i] = uintptr(src.gp_regs[i+6]) + } else { + dst.Ints[i] = uintptr(src.gp_regs[i+3]) + } + } + // Fprs F1..F13 are used to pass float arguments in registers on PPC64 + for i := range [12]int{} { + dst.Floats[i] = math.Float64bits(src.fp_regs[i+1]) + } + +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_debug_test.go b/platform/dbops/binaries/go/go/src/runtime/export_debug_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7ee73ef07ca0e6090ae8471bcdc93bd92cfb52f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_debug_test.go @@ -0,0 +1,182 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64 || ppc64le) && linux + +package runtime + +import ( + "internal/abi" + "unsafe" +) + +// InjectDebugCall injects a debugger call to fn into g. regArgs must +// contain any arguments to fn that are passed in registers, according +// to the internal Go ABI. It may be nil if no arguments are passed in +// registers to fn. args must be a pointer to a valid call frame (including +// arguments and return space) for fn, or nil. tkill must be a function that +// will send SIGTRAP to thread ID tid. gp must be locked to its OS thread and +// running. +// +// On success, InjectDebugCall returns the panic value of fn or nil. +// If fn did not panic, its results will be available in args. +func InjectDebugCall(gp *g, fn any, regArgs *abi.RegArgs, stackArgs any, tkill func(tid int) error, returnOnUnsafePoint bool) (any, error) { + if gp.lockedm == 0 { + return nil, plainError("goroutine not locked to thread") + } + + tid := int(gp.lockedm.ptr().procid) + if tid == 0 { + return nil, plainError("missing tid") + } + + f := efaceOf(&fn) + if f._type == nil || f._type.Kind_&kindMask != kindFunc { + return nil, plainError("fn must be a function") + } + fv := (*funcval)(f.data) + + a := efaceOf(&stackArgs) + if a._type != nil && a._type.Kind_&kindMask != kindPtr { + return nil, plainError("args must be a pointer or nil") + } + argp := a.data + var argSize uintptr + if argp != nil { + argSize = (*ptrtype)(unsafe.Pointer(a._type)).Elem.Size_ + } + + h := new(debugCallHandler) + h.gp = gp + // gp may not be running right now, but we can still get the M + // it will run on since it's locked. + h.mp = gp.lockedm.ptr() + h.fv, h.regArgs, h.argp, h.argSize = fv, regArgs, argp, argSize + h.handleF = h.handle // Avoid allocating closure during signal + + defer func() { testSigtrap = nil }() + for i := 0; ; i++ { + testSigtrap = h.inject + noteclear(&h.done) + h.err = "" + + if err := tkill(tid); err != nil { + return nil, err + } + // Wait for completion. + notetsleepg(&h.done, -1) + if h.err != "" { + switch h.err { + case "call not at safe point": + if returnOnUnsafePoint { + // This is for TestDebugCallUnsafePoint. + return nil, h.err + } + fallthrough + case "retry _Grunnable", "executing on Go runtime stack", "call from within the Go runtime": + // These are transient states. Try to get out of them. + if i < 100 { + usleep(100) + Gosched() + continue + } + } + return nil, h.err + } + return h.panic, nil + } +} + +type debugCallHandler struct { + gp *g + mp *m + fv *funcval + regArgs *abi.RegArgs + argp unsafe.Pointer + argSize uintptr + panic any + + handleF func(info *siginfo, ctxt *sigctxt, gp2 *g) bool + + err plainError + done note + sigCtxt sigContext +} + +func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool { + // TODO(49370): This code is riddled with write barriers, but called from + // a signal handler. Add the go:nowritebarrierrec annotation and restructure + // this to avoid write barriers. + + switch h.gp.atomicstatus.Load() { + case _Grunning: + if getg().m != h.mp { + println("trap on wrong M", getg().m, h.mp) + return false + } + // Save the signal context + h.saveSigContext(ctxt) + // Set PC to debugCallV2. + ctxt.setsigpc(uint64(abi.FuncPCABIInternal(debugCallV2))) + // Call injected. Switch to the debugCall protocol. + testSigtrap = h.handleF + case _Grunnable: + // Ask InjectDebugCall to pause for a bit and then try + // again to interrupt this goroutine. + h.err = plainError("retry _Grunnable") + notewakeup(&h.done) + default: + h.err = plainError("goroutine in unexpected state at call inject") + notewakeup(&h.done) + } + // Resume execution. + return true +} + +func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool { + // TODO(49370): This code is riddled with write barriers, but called from + // a signal handler. Add the go:nowritebarrierrec annotation and restructure + // this to avoid write barriers. + + // Double-check m. + if getg().m != h.mp { + println("trap on wrong M", getg().m, h.mp) + return false + } + f := findfunc(ctxt.sigpc()) + if !(hasPrefix(funcname(f), "runtime.debugCall") || hasPrefix(funcname(f), "debugCall")) { + println("trap in unknown function", funcname(f)) + return false + } + if !sigctxtAtTrapInstruction(ctxt) { + println("trap at non-INT3 instruction pc =", hex(ctxt.sigpc())) + return false + } + + switch status := sigctxtStatus(ctxt); status { + case 0: + // Frame is ready. Copy the arguments to the frame and to registers. + // Call the debug function. + h.debugCallRun(ctxt) + case 1: + // Function returned. Copy frame and result registers back out. + h.debugCallReturn(ctxt) + case 2: + // Function panicked. Copy panic out. + h.debugCallPanicOut(ctxt) + case 8: + // Call isn't safe. Get the reason. + h.debugCallUnsafe(ctxt) + // Don't wake h.done. We need to transition to status 16 first. + case 16: + h.restoreSigContext(ctxt) + // Done + notewakeup(&h.done) + default: + h.err = plainError("unexpected debugCallV2 status") + notewakeup(&h.done) + } + // Resume execution. + return true +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_debuglog_test.go b/platform/dbops/binaries/go/go/src/runtime/export_debuglog_test.go new file mode 100644 index 0000000000000000000000000000000000000000..04ac79f35791c7d47884bb1e048ab51e5f64a299 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_debuglog_test.go @@ -0,0 +1,46 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export debuglog guts for testing. + +package runtime + +const DlogEnabled = dlogEnabled + +const DebugLogBytes = debugLogBytes + +const DebugLogStringLimit = debugLogStringLimit + +var Dlog = dlog + +func (l *dlogger) End() { l.end() } +func (l *dlogger) B(x bool) *dlogger { return l.b(x) } +func (l *dlogger) I(x int) *dlogger { return l.i(x) } +func (l *dlogger) I16(x int16) *dlogger { return l.i16(x) } +func (l *dlogger) U64(x uint64) *dlogger { return l.u64(x) } +func (l *dlogger) Hex(x uint64) *dlogger { return l.hex(x) } +func (l *dlogger) P(x any) *dlogger { return l.p(x) } +func (l *dlogger) S(x string) *dlogger { return l.s(x) } +func (l *dlogger) PC(x uintptr) *dlogger { return l.pc(x) } + +func DumpDebugLog() string { + gp := getg() + gp.writebuf = make([]byte, 0, 1<<20) + printDebugLog() + buf := gp.writebuf + gp.writebuf = nil + + return string(buf) +} + +func ResetDebugLog() { + stw := stopTheWorld(stwForTestResetDebugLog) + for l := allDloggers; l != nil; l = l.allLink { + l.w.write = 0 + l.w.tick, l.w.nano = 0, 0 + l.w.r.begin, l.w.r.end = 0, 0 + l.w.r.tick, l.w.r.nano = 0, 0 + } + startTheWorld(stw) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_linux_test.go b/platform/dbops/binaries/go/go/src/runtime/export_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..52afd28666e9af6c7d50a97aa7880bfea3a685bf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_linux_test.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing. + +package runtime + +const SiginfoMaxSize = _si_max_size +const SigeventMaxSize = _sigev_max_size + +var NewOSProc0 = newosproc0 +var Mincore = mincore + +type Siginfo siginfo +type Sigevent sigevent diff --git a/platform/dbops/binaries/go/go/src/runtime/export_mmap_test.go b/platform/dbops/binaries/go/go/src/runtime/export_mmap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f73fcbde9bdf9572aefdcc836248862874b53473 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_mmap_test.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +// Export guts for testing. + +package runtime + +var Mmap = mmap +var Munmap = munmap + +const ENOMEM = _ENOMEM +const MAP_ANON = _MAP_ANON +const MAP_PRIVATE = _MAP_PRIVATE +const MAP_FIXED = _MAP_FIXED + +func GetPhysPageSize() uintptr { + return physPageSize +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_pipe2_test.go b/platform/dbops/binaries/go/go/src/runtime/export_pipe2_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8d49009b43b280588894a8fa02927fb670105a44 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_pipe2_test.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package runtime + +func Pipe() (r, w int32, errno int32) { + return pipe2(0) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_pipe_test.go b/platform/dbops/binaries/go/go/src/runtime/export_pipe_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0583039982ebbc909af45356c4770f8311a1d5f2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_pipe_test.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin + +package runtime + +var Pipe = pipe diff --git a/platform/dbops/binaries/go/go/src/runtime/export_test.go b/platform/dbops/binaries/go/go/src/runtime/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e25f748ed4506d8da8d5daf46d4b4f7afcdb422e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_test.go @@ -0,0 +1,2001 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "internal/goexperiment" + "internal/goos" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +var Fadd64 = fadd64 +var Fsub64 = fsub64 +var Fmul64 = fmul64 +var Fdiv64 = fdiv64 +var F64to32 = f64to32 +var F32to64 = f32to64 +var Fcmp64 = fcmp64 +var Fintto64 = fintto64 +var F64toint = f64toint + +var Entersyscall = entersyscall +var Exitsyscall = exitsyscall +var LockedOSThread = lockedOSThread +var Xadduintptr = atomic.Xadduintptr + +var ReadRandomFailed = &readRandomFailed + +var Fastlog2 = fastlog2 + +var Atoi = atoi +var Atoi32 = atoi32 +var ParseByteCount = parseByteCount + +var Nanotime = nanotime +var NetpollBreak = netpollBreak +var Usleep = usleep + +var PhysPageSize = physPageSize +var PhysHugePageSize = physHugePageSize + +var NetpollGenericInit = netpollGenericInit + +var Memmove = memmove +var MemclrNoHeapPointers = memclrNoHeapPointers + +var CgoCheckPointer = cgoCheckPointer + +const CrashStackImplemented = crashStackImplemented + +const TracebackInnerFrames = tracebackInnerFrames +const TracebackOuterFrames = tracebackOuterFrames + +var MapKeys = keys +var MapValues = values + +var LockPartialOrder = lockPartialOrder + +type LockRank lockRank + +func (l LockRank) String() string { + return lockRank(l).String() +} + +const PreemptMSupported = preemptMSupported + +type LFNode struct { + Next uint64 + Pushcnt uintptr +} + +func LFStackPush(head *uint64, node *LFNode) { + (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node))) +} + +func LFStackPop(head *uint64) *LFNode { + return (*LFNode)((*lfstack)(head).pop()) +} +func LFNodeValidate(node *LFNode) { + lfnodeValidate((*lfnode)(unsafe.Pointer(node))) +} + +func Netpoll(delta int64) { + systemstack(func() { + netpoll(delta) + }) +} + +func GCMask(x any) (ret []byte) { + systemstack(func() { + ret = getgcmask(x) + }) + return +} + +func RunSchedLocalQueueTest() { + pp := new(p) + gs := make([]g, len(pp.runq)) + Escape(gs) // Ensure gs doesn't move, since we use guintptrs + for i := 0; i < len(pp.runq); i++ { + if g, _ := runqget(pp); g != nil { + throw("runq is not empty initially") + } + for j := 0; j < i; j++ { + runqput(pp, &gs[i], false) + } + for j := 0; j < i; j++ { + if g, _ := runqget(pp); g != &gs[i] { + print("bad element at iter ", i, "/", j, "\n") + throw("bad element") + } + } + if g, _ := runqget(pp); g != nil { + throw("runq is not empty afterwards") + } + } +} + +func RunSchedLocalQueueStealTest() { + p1 := new(p) + p2 := new(p) + gs := make([]g, len(p1.runq)) + Escape(gs) // Ensure gs doesn't move, since we use guintptrs + for i := 0; i < len(p1.runq); i++ { + for j := 0; j < i; j++ { + gs[j].sig = 0 + runqput(p1, &gs[j], false) + } + gp := runqsteal(p2, p1, true) + s := 0 + if gp != nil { + s++ + gp.sig++ + } + for { + gp, _ = runqget(p2) + if gp == nil { + break + } + s++ + gp.sig++ + } + for { + gp, _ = runqget(p1) + if gp == nil { + break + } + gp.sig++ + } + for j := 0; j < i; j++ { + if gs[j].sig != 1 { + print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") + throw("bad element") + } + } + if s != i/2 && s != i/2+1 { + print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") + throw("bad steal") + } + } +} + +func RunSchedLocalQueueEmptyTest(iters int) { + // Test that runq is not spuriously reported as empty. + // Runq emptiness affects scheduling decisions and spurious emptiness + // can lead to underutilization (both runnable Gs and idle Ps coexist + // for arbitrary long time). + done := make(chan bool, 1) + p := new(p) + gs := make([]g, 2) + Escape(gs) // Ensure gs doesn't move, since we use guintptrs + ready := new(uint32) + for i := 0; i < iters; i++ { + *ready = 0 + next0 := (i & 1) == 0 + next1 := (i & 2) == 0 + runqput(p, &gs[0], next0) + go func() { + for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { + } + if runqempty(p) { + println("next:", next0, next1) + throw("queue is empty") + } + done <- true + }() + for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { + } + runqput(p, &gs[1], next1) + runqget(p) + <-done + runqget(p) + } +} + +var ( + StringHash = stringHash + BytesHash = bytesHash + Int32Hash = int32Hash + Int64Hash = int64Hash + MemHash = memhash + MemHash32 = memhash32 + MemHash64 = memhash64 + EfaceHash = efaceHash + IfaceHash = ifaceHash +) + +var UseAeshash = &useAeshash + +func MemclrBytes(b []byte) { + s := (*slice)(unsafe.Pointer(&b)) + memclrNoHeapPointers(s.array, uintptr(s.len)) +} + +const HashLoad = hashLoad + +// entry point for testing +func GostringW(w []uint16) (s string) { + systemstack(func() { + s = gostringw(&w[0]) + }) + return +} + +var Open = open +var Close = closefd +var Read = read +var Write = write + +func Envs() []string { return envs } +func SetEnvs(e []string) { envs = e } + +// For benchmarking. + +// blockWrapper is a wrapper type that ensures a T is placed within a +// large object. This is necessary for safely benchmarking things +// that manipulate the heap bitmap, like heapBitsSetType. +// +// More specifically, allocating threads assume they're the sole writers +// to their span's heap bits, which allows those writes to be non-atomic. +// The heap bitmap is written byte-wise, so if one tried to call heapBitsSetType +// on an existing object in a small object span, we might corrupt that +// span's bitmap with a concurrent byte write to the heap bitmap. Large +// object spans contain exactly one object, so we can be sure no other P +// is going to be allocating from it concurrently, hence this wrapper type +// which ensures we have a T in a large object span. +type blockWrapper[T any] struct { + value T + _ [_MaxSmallSize]byte // Ensure we're a large object. +} + +func BenchSetType[T any](n int, resetTimer func()) { + x := new(blockWrapper[T]) + + // Escape x to ensure it is allocated on the heap, as we are + // working on the heap bits here. + Escape(x) + + // Grab the type. + var i any = *new(T) + e := *efaceOf(&i) + t := e._type + + // Benchmark setting the type bits for just the internal T of the block. + benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t) +} + +const maxArrayBlockWrapperLen = 32 + +// arrayBlockWrapper is like blockWrapper, but the interior value is intended +// to be used as a backing store for a slice. +type arrayBlockWrapper[T any] struct { + value [maxArrayBlockWrapperLen]T + _ [_MaxSmallSize]byte // Ensure we're a large object. +} + +// arrayLargeBlockWrapper is like arrayBlockWrapper, but the interior array +// accommodates many more elements. +type arrayLargeBlockWrapper[T any] struct { + value [1024]T + _ [_MaxSmallSize]byte // Ensure we're a large object. +} + +func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) { + // We have two separate cases here because we want to avoid + // tests on big types but relatively small slices to avoid generating + // an allocation that's really big. This will likely force a GC which will + // skew the test results. + var y unsafe.Pointer + if len <= maxArrayBlockWrapperLen { + x := new(arrayBlockWrapper[T]) + // Escape x to ensure it is allocated on the heap, as we are + // working on the heap bits here. + Escape(x) + y = unsafe.Pointer(&x.value[0]) + } else { + x := new(arrayLargeBlockWrapper[T]) + Escape(x) + y = unsafe.Pointer(&x.value[0]) + } + + // Grab the type. + var i any = *new(T) + e := *efaceOf(&i) + t := e._type + + // Benchmark setting the type for a slice created from the array + // of T within the arrayBlock. + benchSetType(n, resetTimer, len, y, t) +} + +// benchSetType is the implementation of the BenchSetType* functions. +// x must be len consecutive Ts allocated within a large object span (to +// avoid a race on the heap bitmap). +// +// Note: this function cannot be generic. It would get its type from one of +// its callers (BenchSetType or BenchSetTypeSlice) whose type parameters are +// set by a call in the runtime_test package. That means this function and its +// callers will get instantiated in the package that provides the type argument, +// i.e. runtime_test. However, we call a function on the system stack. In race +// mode the runtime package is usually left uninstrumented because e.g. g0 has +// no valid racectx, but if we're instantiated in the runtime_test package, +// we might accidentally cause runtime code to be incorrectly instrumented. +func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) { + // This benchmark doesn't work with the allocheaders experiment. It sets up + // an elaborate scenario to be able to benchmark the function safely, but doing + // this work for the allocheaders' version of the function would be complex. + // Just fail instead and rely on the test code making sure we never get here. + if goexperiment.AllocHeaders { + panic("called benchSetType with allocheaders experiment enabled") + } + + // Compute the input sizes. + size := t.Size() * uintptr(len) + + // Validate this function's invariant. + s := spanOfHeap(uintptr(x)) + if s == nil { + panic("no heap span for input") + } + if s.spanclass.sizeclass() != 0 { + panic("span is not a large object span") + } + + // Round up the size to the size class to make the benchmark a little more + // realistic. However, validate it, to make sure this is safe. + allocSize := roundupsize(size, t.PtrBytes == 0) + if s.npages*pageSize < allocSize { + panic("backing span not large enough for benchmark") + } + + // Benchmark heapBitsSetType by calling it in a loop. This is safe because + // x is in a large object span. + resetTimer() + systemstack(func() { + for i := 0; i < n; i++ { + heapBitsSetType(uintptr(x), allocSize, size, t) + } + }) + + // Make sure x doesn't get freed, since we're taking a uintptr. + KeepAlive(x) +} + +const PtrSize = goarch.PtrSize + +var ForceGCPeriod = &forcegcperiod + +// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises +// the "environment" traceback level, so later calls to +// debug.SetTraceback (e.g., from testing timeouts) can't lower it. +func SetTracebackEnv(level string) { + setTraceback(level) + traceback_env = traceback_cache +} + +var ReadUnaligned32 = readUnaligned32 +var ReadUnaligned64 = readUnaligned64 + +func CountPagesInUse() (pagesInUse, counted uintptr) { + stw := stopTheWorld(stwForTestCountPagesInUse) + + pagesInUse = mheap_.pagesInUse.Load() + + for _, s := range mheap_.allspans { + if s.state.get() == mSpanInUse { + counted += s.npages + } + } + + startTheWorld(stw) + + return +} + +func Fastrand() uint32 { return uint32(rand()) } +func Fastrand64() uint64 { return rand() } +func Fastrandn(n uint32) uint32 { return randn(n) } + +type ProfBuf profBuf + +func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf { + return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags)) +} + +func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) { + (*profBuf)(p).write(tag, now, hdr, stk) +} + +const ( + ProfBufBlocking = profBufBlocking + ProfBufNonBlocking = profBufNonBlocking +) + +func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) { + return (*profBuf)(p).read(mode) +} + +func (p *ProfBuf) Close() { + (*profBuf)(p).close() +} + +func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) { + stw := stopTheWorld(stwForTestReadMetricsSlow) + + // Initialize the metrics beforehand because this could + // allocate and skew the stats. + metricsLock() + initMetrics() + + systemstack(func() { + // Donate the racectx to g0. readMetricsLocked calls into the race detector + // via map access. + getg().racectx = getg().m.curg.racectx + + // Read the metrics once before in case it allocates and skews the metrics. + // readMetricsLocked is designed to only allocate the first time it is called + // with a given slice of samples. In effect, this extra read tests that this + // remains true, since otherwise the second readMetricsLocked below could + // allocate before it returns. + readMetricsLocked(samplesp, len, cap) + + // Read memstats first. It's going to flush + // the mcaches which readMetrics does not do, so + // going the other way around may result in + // inconsistent statistics. + readmemstats_m(memStats) + + // Read metrics again. We need to be sure we're on the + // system stack with readmemstats_m so that we don't call into + // the stack allocator and adjust metrics between there and here. + readMetricsLocked(samplesp, len, cap) + + // Undo the donation. + getg().racectx = 0 + }) + metricsUnlock() + + startTheWorld(stw) +} + +var DoubleCheckReadMemStats = &doubleCheckReadMemStats + +// ReadMemStatsSlow returns both the runtime-computed MemStats and +// MemStats accumulated by scanning the heap. +func ReadMemStatsSlow() (base, slow MemStats) { + stw := stopTheWorld(stwForTestReadMemStatsSlow) + + // Run on the system stack to avoid stack growth allocation. + systemstack(func() { + // Make sure stats don't change. + getg().m.mallocing++ + + readmemstats_m(&base) + + // Initialize slow from base and zero the fields we're + // recomputing. + slow = base + slow.Alloc = 0 + slow.TotalAlloc = 0 + slow.Mallocs = 0 + slow.Frees = 0 + slow.HeapReleased = 0 + var bySize [_NumSizeClasses]struct { + Mallocs, Frees uint64 + } + + // Add up current allocations in spans. + for _, s := range mheap_.allspans { + if s.state.get() != mSpanInUse { + continue + } + if s.isUnusedUserArenaChunk() { + continue + } + if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 { + slow.Mallocs++ + slow.Alloc += uint64(s.elemsize) + } else { + slow.Mallocs += uint64(s.allocCount) + slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize) + bySize[sizeclass].Mallocs += uint64(s.allocCount) + } + } + + // Add in frees by just reading the stats for those directly. + var m heapStatsDelta + memstats.heapStats.unsafeRead(&m) + + // Collect per-sizeclass free stats. + var smallFree uint64 + for i := 0; i < _NumSizeClasses; i++ { + slow.Frees += m.smallFreeCount[i] + bySize[i].Frees += m.smallFreeCount[i] + bySize[i].Mallocs += m.smallFreeCount[i] + smallFree += m.smallFreeCount[i] * uint64(class_to_size[i]) + } + slow.Frees += m.tinyAllocCount + m.largeFreeCount + slow.Mallocs += slow.Frees + + slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree + + for i := range slow.BySize { + slow.BySize[i].Mallocs = bySize[i].Mallocs + slow.BySize[i].Frees = bySize[i].Frees + } + + for i := mheap_.pages.start; i < mheap_.pages.end; i++ { + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } + pg := chunk.scavenged.popcntRange(0, pallocChunkPages) + slow.HeapReleased += uint64(pg) * pageSize + } + for _, p := range allp { + pg := sys.OnesCount64(p.pcache.scav) + slow.HeapReleased += uint64(pg) * pageSize + } + + getg().m.mallocing-- + }) + + startTheWorld(stw) + return +} + +// ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine +// and verifies that unwinding the new stack doesn't crash, even if the old +// stack has been freed or reused (simulated via poisoning). +func ShrinkStackAndVerifyFramePointers() { + before := stackPoisonCopy + defer func() { stackPoisonCopy = before }() + stackPoisonCopy = 1 + + gp := getg() + systemstack(func() { + shrinkstack(gp) + }) + // If our new stack contains frame pointers into the old stack, this will + // crash because the old stack has been poisoned. + FPCallers(make([]uintptr, 1024)) +} + +// BlockOnSystemStack switches to the system stack, prints "x\n" to +// stderr, and blocks in a stack containing +// "runtime.blockOnSystemStackInternal". +func BlockOnSystemStack() { + systemstack(blockOnSystemStackInternal) +} + +func blockOnSystemStackInternal() { + print("x\n") + lock(&deadlock) + lock(&deadlock) +} + +type RWMutex struct { + rw rwmutex +} + +func (rw *RWMutex) Init() { + rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW) +} + +func (rw *RWMutex) RLock() { + rw.rw.rlock() +} + +func (rw *RWMutex) RUnlock() { + rw.rw.runlock() +} + +func (rw *RWMutex) Lock() { + rw.rw.lock() +} + +func (rw *RWMutex) Unlock() { + rw.rw.unlock() +} + +const RuntimeHmapSize = unsafe.Sizeof(hmap{}) + +func MapBucketsCount(m map[int]int) int { + h := *(**hmap)(unsafe.Pointer(&m)) + return 1 << h.B +} + +func MapBucketsPointerIsNil(m map[int]int) bool { + h := *(**hmap)(unsafe.Pointer(&m)) + return h.buckets == nil +} + +func OverLoadFactor(count int, B uint8) bool { + return overLoadFactor(count, B) +} + +func LockOSCounts() (external, internal uint32) { + gp := getg() + if gp.m.lockedExt+gp.m.lockedInt == 0 { + if gp.lockedm != 0 { + panic("lockedm on non-locked goroutine") + } + } else { + if gp.lockedm == 0 { + panic("nil lockedm on locked goroutine") + } + } + return gp.m.lockedExt, gp.m.lockedInt +} + +//go:noinline +func TracebackSystemstack(stk []uintptr, i int) int { + if i == 0 { + pc, sp := getcallerpc(), getcallersp() + var u unwinder + u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing + return tracebackPCs(&u, 0, stk) + } + n := 0 + systemstack(func() { + n = TracebackSystemstack(stk, i-1) + }) + return n +} + +func KeepNArenaHints(n int) { + hint := mheap_.arenaHints + for i := 1; i < n; i++ { + hint = hint.next + if hint == nil { + return + } + } + hint.next = nil +} + +// MapNextArenaHint reserves a page at the next arena growth hint, +// preventing the arena from growing there, and returns the range of +// addresses that are no longer viable. +// +// This may fail to reserve memory. If it fails, it still returns the +// address range it attempted to reserve. +func MapNextArenaHint() (start, end uintptr, ok bool) { + hint := mheap_.arenaHints + addr := hint.addr + if hint.down { + start, end = addr-heapArenaBytes, addr + addr -= physPageSize + } else { + start, end = addr, addr+heapArenaBytes + } + got := sysReserve(unsafe.Pointer(addr), physPageSize) + ok = (addr == uintptr(got)) + if !ok { + // We were unable to get the requested reservation. + // Release what we did get and fail. + sysFreeOS(got, physPageSize) + } + return +} + +func GetNextArenaHint() uintptr { + return mheap_.arenaHints.addr +} + +type G = g + +type Sudog = sudog + +func Getg() *G { + return getg() +} + +func Goid() uint64 { + return getg().goid +} + +func GIsWaitingOnMutex(gp *G) bool { + return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait() +} + +var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack + +//go:noinline +func PanicForTesting(b []byte, i int) byte { + return unexportedPanicForTesting(b, i) +} + +//go:noinline +func unexportedPanicForTesting(b []byte, i int) byte { + return b[i] +} + +func G0StackOverflow() { + systemstack(func() { + g0 := getg() + sp := getcallersp() + // The stack bounds for g0 stack is not always precise. + // Use an artificially small stack, to trigger a stack overflow + // without actually run out of the system stack (which may seg fault). + g0.stack.lo = sp - 4096 - stackSystem + g0.stackguard0 = g0.stack.lo + stackGuard + g0.stackguard1 = g0.stackguard0 + + stackOverflow(nil) + }) +} + +func stackOverflow(x *byte) { + var buf [256]byte + stackOverflow(&buf[0]) +} + +func MapTombstoneCheck(m map[int]int) { + // Make sure emptyOne and emptyRest are distributed correctly. + // We should have a series of filled and emptyOne cells, followed by + // a series of emptyRest cells. + h := *(**hmap)(unsafe.Pointer(&m)) + i := any(m) + t := *(**maptype)(unsafe.Pointer(&i)) + + for x := 0; x < 1<= n && b.tophash[i] != emptyRest { + panic("late non-emptyRest") + } + if k == n-1 && b.tophash[i] == emptyOne { + panic("last non-emptyRest entry is emptyOne") + } + k++ + } + } + } +} + +func RunGetgThreadSwitchTest() { + // Test that getg works correctly with thread switch. + // With gccgo, if we generate getg inlined, the backend + // may cache the address of the TLS variable, which + // will become invalid after a thread switch. This test + // checks that the bad caching doesn't happen. + + ch := make(chan int) + go func(ch chan int) { + ch <- 5 + LockOSThread() + }(ch) + + g1 := getg() + + // Block on a receive. This is likely to get us a thread + // switch. If we yield to the sender goroutine, it will + // lock the thread, forcing us to resume on a different + // thread. + <-ch + + g2 := getg() + if g1 != g2 { + panic("g1 != g2") + } + + // Also test getg after some control flow, as the + // backend is sensitive to control flow. + g3 := getg() + if g1 != g3 { + panic("g1 != g3") + } +} + +const ( + PageSize = pageSize + PallocChunkPages = pallocChunkPages + PageAlloc64Bit = pageAlloc64Bit + PallocSumBytes = pallocSumBytes +) + +// Expose pallocSum for testing. +type PallocSum pallocSum + +func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) } +func (m PallocSum) Start() uint { return pallocSum(m).start() } +func (m PallocSum) Max() uint { return pallocSum(m).max() } +func (m PallocSum) End() uint { return pallocSum(m).end() } + +// Expose pallocBits for testing. +type PallocBits pallocBits + +func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) { + return (*pallocBits)(b).find(npages, searchIdx) +} +func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) } +func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) } +func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) } +func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) } + +// SummarizeSlow is a slow but more obviously correct implementation +// of (*pallocBits).summarize. Used for testing. +func SummarizeSlow(b *PallocBits) PallocSum { + var start, most, end uint + + const N = uint(len(b)) * 64 + for start < N && (*pageBits)(b).get(start) == 0 { + start++ + } + for end < N && (*pageBits)(b).get(N-end-1) == 0 { + end++ + } + run := uint(0) + for i := uint(0); i < N; i++ { + if (*pageBits)(b).get(i) == 0 { + run++ + } else { + run = 0 + } + most = max(most, run) + } + return PackPallocSum(start, most, end) +} + +// Expose non-trivial helpers for testing. +func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) } + +// Given two PallocBits, returns a set of bit ranges where +// they differ. +func DiffPallocBits(a, b *PallocBits) []BitRange { + ba := (*pageBits)(a) + bb := (*pageBits)(b) + + var d []BitRange + base, size := uint(0), uint(0) + for i := uint(0); i < uint(len(ba))*64; i++ { + if ba.get(i) != bb.get(i) { + if size == 0 { + base = i + } + size++ + } else { + if size != 0 { + d = append(d, BitRange{base, size}) + } + size = 0 + } + } + if size != 0 { + d = append(d, BitRange{base, size}) + } + return d +} + +// StringifyPallocBits gets the bits in the bit range r from b, +// and returns a string containing the bits as ASCII 0 and 1 +// characters. +func StringifyPallocBits(b *PallocBits, r BitRange) string { + str := "" + for j := r.I; j < r.I+r.N; j++ { + if (*pageBits)(b).get(j) != 0 { + str += "1" + } else { + str += "0" + } + } + return str +} + +// Expose pallocData for testing. +type PallocData pallocData + +func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) { + return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max) +} +func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) } +func (d *PallocData) ScavengedSetRange(i, n uint) { + (*pallocData)(d).scavenged.setRange(i, n) +} +func (d *PallocData) PallocBits() *PallocBits { + return (*PallocBits)(&(*pallocData)(d).pallocBits) +} +func (d *PallocData) Scavenged() *PallocBits { + return (*PallocBits)(&(*pallocData)(d).scavenged) +} + +// Expose fillAligned for testing. +func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) } + +// Expose pageCache for testing. +type PageCache pageCache + +const PageCachePages = pageCachePages + +func NewPageCache(base uintptr, cache, scav uint64) PageCache { + return PageCache(pageCache{base: base, cache: cache, scav: scav}) +} +func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() } +func (c *PageCache) Base() uintptr { return (*pageCache)(c).base } +func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache } +func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav } +func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) { + return (*pageCache)(c).alloc(npages) +} +func (c *PageCache) Flush(s *PageAlloc) { + cp := (*pageCache)(c) + sp := (*pageAlloc)(s) + + systemstack(func() { + // None of the tests need any higher-level locking, so we just + // take the lock internally. + lock(sp.mheapLock) + cp.flush(sp) + unlock(sp.mheapLock) + }) +} + +// Expose chunk index type. +type ChunkIdx chunkIdx + +// Expose pageAlloc for testing. Note that because pageAlloc is +// not in the heap, so is PageAlloc. +type PageAlloc pageAlloc + +func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) { + pp := (*pageAlloc)(p) + + var addr, scav uintptr + systemstack(func() { + // None of the tests need any higher-level locking, so we just + // take the lock internally. + lock(pp.mheapLock) + addr, scav = pp.alloc(npages) + unlock(pp.mheapLock) + }) + return addr, scav +} +func (p *PageAlloc) AllocToCache() PageCache { + pp := (*pageAlloc)(p) + + var c PageCache + systemstack(func() { + // None of the tests need any higher-level locking, so we just + // take the lock internally. + lock(pp.mheapLock) + c = PageCache(pp.allocToCache()) + unlock(pp.mheapLock) + }) + return c +} +func (p *PageAlloc) Free(base, npages uintptr) { + pp := (*pageAlloc)(p) + + systemstack(func() { + // None of the tests need any higher-level locking, so we just + // take the lock internally. + lock(pp.mheapLock) + pp.free(base, npages) + unlock(pp.mheapLock) + }) +} +func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) { + return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end) +} +func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) { + pp := (*pageAlloc)(p) + systemstack(func() { + r = pp.scavenge(nbytes, nil, true) + }) + return +} +func (p *PageAlloc) InUse() []AddrRange { + ranges := make([]AddrRange, 0, len(p.inUse.ranges)) + for _, r := range p.inUse.ranges { + ranges = append(ranges, AddrRange{r}) + } + return ranges +} + +// Returns nil if the PallocData's L2 is missing. +func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData { + ci := chunkIdx(i) + return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci)) +} + +// AddrRange is a wrapper around addrRange for testing. +type AddrRange struct { + addrRange +} + +// MakeAddrRange creates a new address range. +func MakeAddrRange(base, limit uintptr) AddrRange { + return AddrRange{makeAddrRange(base, limit)} +} + +// Base returns the virtual base address of the address range. +func (a AddrRange) Base() uintptr { + return a.addrRange.base.addr() +} + +// Base returns the virtual address of the limit of the address range. +func (a AddrRange) Limit() uintptr { + return a.addrRange.limit.addr() +} + +// Equals returns true if the two address ranges are exactly equal. +func (a AddrRange) Equals(b AddrRange) bool { + return a == b +} + +// Size returns the size in bytes of the address range. +func (a AddrRange) Size() uintptr { + return a.addrRange.size() +} + +// testSysStat is the sysStat passed to test versions of various +// runtime structures. We do actually have to keep track of this +// because otherwise memstats.mappedReady won't actually line up +// with other stats in the runtime during tests. +var testSysStat = &memstats.other_sys + +// AddrRanges is a wrapper around addrRanges for testing. +type AddrRanges struct { + addrRanges + mutable bool +} + +// NewAddrRanges creates a new empty addrRanges. +// +// Note that this initializes addrRanges just like in the +// runtime, so its memory is persistentalloc'd. Call this +// function sparingly since the memory it allocates is +// leaked. +// +// This AddrRanges is mutable, so we can test methods like +// Add. +func NewAddrRanges() AddrRanges { + r := addrRanges{} + r.init(testSysStat) + return AddrRanges{r, true} +} + +// MakeAddrRanges creates a new addrRanges populated with +// the ranges in a. +// +// The returned AddrRanges is immutable, so methods like +// Add will fail. +func MakeAddrRanges(a ...AddrRange) AddrRanges { + // Methods that manipulate the backing store of addrRanges.ranges should + // not be used on the result from this function (e.g. add) since they may + // trigger reallocation. That would normally be fine, except the new + // backing store won't come from the heap, but from persistentalloc, so + // we'll leak some memory implicitly. + ranges := make([]addrRange, 0, len(a)) + total := uintptr(0) + for _, r := range a { + ranges = append(ranges, r.addrRange) + total += r.Size() + } + return AddrRanges{addrRanges{ + ranges: ranges, + totalBytes: total, + sysStat: testSysStat, + }, false} +} + +// Ranges returns a copy of the ranges described by the +// addrRanges. +func (a *AddrRanges) Ranges() []AddrRange { + result := make([]AddrRange, 0, len(a.addrRanges.ranges)) + for _, r := range a.addrRanges.ranges { + result = append(result, AddrRange{r}) + } + return result +} + +// FindSucc returns the successor to base. See addrRanges.findSucc +// for more details. +func (a *AddrRanges) FindSucc(base uintptr) int { + return a.findSucc(base) +} + +// Add adds a new AddrRange to the AddrRanges. +// +// The AddrRange must be mutable (i.e. created by NewAddrRanges), +// otherwise this method will throw. +func (a *AddrRanges) Add(r AddrRange) { + if !a.mutable { + throw("attempt to mutate immutable AddrRanges") + } + a.add(r.addrRange) +} + +// TotalBytes returns the totalBytes field of the addrRanges. +func (a *AddrRanges) TotalBytes() uintptr { + return a.addrRanges.totalBytes +} + +// BitRange represents a range over a bitmap. +type BitRange struct { + I, N uint // bit index and length in bits +} + +// NewPageAlloc creates a new page allocator for testing and +// initializes it with the scav and chunks maps. Each key in these maps +// represents a chunk index and each value is a series of bit ranges to +// set within each bitmap's chunk. +// +// The initialization of the pageAlloc preserves the invariant that if a +// scavenged bit is set the alloc bit is necessarily unset, so some +// of the bits described by scav may be cleared in the final bitmap if +// ranges in chunks overlap with them. +// +// scav is optional, and if nil, the scavenged bitmap will be cleared +// (as opposed to all 1s, which it usually is). Furthermore, every +// chunk index in scav must appear in chunks; ones that do not are +// ignored. +func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc { + p := new(pageAlloc) + + // We've got an entry, so initialize the pageAlloc. + p.init(new(mutex), testSysStat, true) + lockInit(p.mheapLock, lockRankMheap) + for i, init := range chunks { + addr := chunkBase(chunkIdx(i)) + + // Mark the chunk's existence in the pageAlloc. + systemstack(func() { + lock(p.mheapLock) + p.grow(addr, pallocChunkBytes) + unlock(p.mheapLock) + }) + + // Initialize the bitmap and update pageAlloc metadata. + ci := chunkIndex(addr) + chunk := p.chunkOf(ci) + + // Clear all the scavenged bits which grow set. + chunk.scavenged.clearRange(0, pallocChunkPages) + + // Simulate the allocation and subsequent free of all pages in + // the chunk for the scavenge index. This sets the state equivalent + // with all pages within the index being free. + p.scav.index.alloc(ci, pallocChunkPages) + p.scav.index.free(ci, 0, pallocChunkPages) + + // Apply scavenge state if applicable. + if scav != nil { + if scvg, ok := scav[i]; ok { + for _, s := range scvg { + // Ignore the case of s.N == 0. setRange doesn't handle + // it and it's a no-op anyway. + if s.N != 0 { + chunk.scavenged.setRange(s.I, s.N) + } + } + } + } + + // Apply alloc state. + for _, s := range init { + // Ignore the case of s.N == 0. allocRange doesn't handle + // it and it's a no-op anyway. + if s.N != 0 { + chunk.allocRange(s.I, s.N) + + // Make sure the scavenge index is updated. + p.scav.index.alloc(ci, s.N) + } + } + + // Update heap metadata for the allocRange calls above. + systemstack(func() { + lock(p.mheapLock) + p.update(addr, pallocChunkPages, false, false) + unlock(p.mheapLock) + }) + } + + return (*PageAlloc)(p) +} + +// FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this +// is called the pageAlloc may no longer be used. The object itself will be +// collected by the garbage collector once it is no longer live. +func FreePageAlloc(pp *PageAlloc) { + p := (*pageAlloc)(pp) + + // Free all the mapped space for the summary levels. + if pageAlloc64Bit != 0 { + for l := 0; l < summaryLevels; l++ { + sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes) + } + } else { + resSize := uintptr(0) + for _, s := range p.summary { + resSize += uintptr(cap(s)) * pallocSumBytes + } + sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize)) + } + + // Free extra data structures. + sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{})) + + // Subtract back out whatever we mapped for the summaries. + // sysUsed adds to p.sysStat and memstats.mappedReady no matter what + // (and in anger should actually be accounted for), and there's no other + // way to figure out how much we actually mapped. + gcController.mappedReady.Add(-int64(p.summaryMappedReady)) + testSysStat.add(-int64(p.summaryMappedReady)) + + // Free the mapped space for chunks. + for i := range p.chunks { + if x := p.chunks[i]; x != nil { + p.chunks[i] = nil + // This memory comes from sysAlloc and will always be page-aligned. + sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat) + } + } +} + +// BaseChunkIdx is a convenient chunkIdx value which works on both +// 64 bit and 32 bit platforms, allowing the tests to share code +// between the two. +// +// This should not be higher than 0x100*pallocChunkBytes to support +// mips and mipsle, which only have 31-bit address spaces. +var BaseChunkIdx = func() ChunkIdx { + var prefix uintptr + if pageAlloc64Bit != 0 { + prefix = 0xc000 + } else { + prefix = 0x100 + } + baseAddr := prefix * pallocChunkBytes + if goos.IsAix != 0 { + baseAddr += arenaBaseOffset + } + return ChunkIdx(chunkIndex(baseAddr)) +}() + +// PageBase returns an address given a chunk index and a page index +// relative to that chunk. +func PageBase(c ChunkIdx, pageIdx uint) uintptr { + return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize +} + +type BitsMismatch struct { + Base uintptr + Got, Want uint64 +} + +func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { + ok = true + + // Run on the system stack to avoid stack growth allocation. + systemstack(func() { + getg().m.mallocing++ + + // Lock so that we can safely access the bitmap. + lock(&mheap_.lock) + chunkLoop: + for i := mheap_.pages.start; i < mheap_.pages.end; i++ { + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } + for j := 0; j < pallocChunkPages/64; j++ { + // Run over each 64-bit bitmap section and ensure + // scavenged is being cleared properly on allocation. + // If a used bit and scavenged bit are both set, that's + // an error, and could indicate a larger problem, or + // an accounting problem. + want := chunk.scavenged[j] &^ chunk.pallocBits[j] + got := chunk.scavenged[j] + if want != got { + ok = false + if n >= len(mismatches) { + break chunkLoop + } + mismatches[n] = BitsMismatch{ + Base: chunkBase(i) + uintptr(j)*64*pageSize, + Got: got, + Want: want, + } + n++ + } + } + } + unlock(&mheap_.lock) + + getg().m.mallocing-- + }) + return +} + +func PageCachePagesLeaked() (leaked uintptr) { + stw := stopTheWorld(stwForTestPageCachePagesLeaked) + + // Walk over destroyed Ps and look for unflushed caches. + deadp := allp[len(allp):cap(allp)] + for _, p := range deadp { + // Since we're going past len(allp) we may see nil Ps. + // Just ignore them. + if p != nil { + leaked += uintptr(sys.OnesCount64(p.pcache.cache)) + } + } + + startTheWorld(stw) + return +} + +type Mutex = mutex + +var Lock = lock +var Unlock = unlock + +var MutexContended = mutexContended + +func SemRootLock(addr *uint32) *mutex { + root := semtable.rootFor(addr) + return &root.lock +} + +var Semacquire = semacquire +var Semrelease1 = semrelease1 + +func SemNwait(addr *uint32) uint32 { + root := semtable.rootFor(addr) + return root.nwait.Load() +} + +const SemTableSize = semTabSize + +// SemTable is a wrapper around semTable exported for testing. +type SemTable struct { + semTable +} + +// Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr. +func (t *SemTable) Enqueue(addr *uint32) { + s := acquireSudog() + s.releasetime = 0 + s.acquiretime = 0 + s.ticket = 0 + t.semTable.rootFor(addr).queue(addr, s, false) +} + +// Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr. +// +// Returns true if there actually was a waiter to be dequeued. +func (t *SemTable) Dequeue(addr *uint32) bool { + s, _, _ := t.semTable.rootFor(addr).dequeue(addr) + if s != nil { + releaseSudog(s) + return true + } + return false +} + +// mspan wrapper for testing. +type MSpan mspan + +// Allocate an mspan for testing. +func AllocMSpan() *MSpan { + var s *mspan + systemstack(func() { + lock(&mheap_.lock) + s = (*mspan)(mheap_.spanalloc.alloc()) + unlock(&mheap_.lock) + }) + return (*MSpan)(s) +} + +// Free an allocated mspan. +func FreeMSpan(s *MSpan) { + systemstack(func() { + lock(&mheap_.lock) + mheap_.spanalloc.free(unsafe.Pointer(s)) + unlock(&mheap_.lock) + }) +} + +func MSpanCountAlloc(ms *MSpan, bits []byte) int { + s := (*mspan)(ms) + s.nelems = uint16(len(bits) * 8) + s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0])) + result := s.countAlloc() + s.gcmarkBits = nil + return result +} + +const ( + TimeHistSubBucketBits = timeHistSubBucketBits + TimeHistNumSubBuckets = timeHistNumSubBuckets + TimeHistNumBuckets = timeHistNumBuckets + TimeHistMinBucketBits = timeHistMinBucketBits + TimeHistMaxBucketBits = timeHistMaxBucketBits +) + +type TimeHistogram timeHistogram + +// Counts returns the counts for the given bucket, subBucket indices. +// Returns true if the bucket was valid, otherwise returns the counts +// for the overflow bucket if bucket > 0 or the underflow bucket if +// bucket < 0, and false. +func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) { + t := (*timeHistogram)(th) + if bucket < 0 { + return t.underflow.Load(), false + } + i := bucket*TimeHistNumSubBuckets + subBucket + if i >= len(t.counts) { + return t.overflow.Load(), false + } + return t.counts[i].Load(), true +} + +func (th *TimeHistogram) Record(duration int64) { + (*timeHistogram)(th).record(duration) +} + +var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets + +func SetIntArgRegs(a int) int { + lock(&finlock) + old := intArgRegs + if a >= 0 { + intArgRegs = a + } + unlock(&finlock) + return old +} + +func FinalizerGAsleep() bool { + return fingStatus.Load()&fingWait != 0 +} + +// For GCTestMoveStackOnNextCall, it's important not to introduce an +// extra layer of call, since then there's a return before the "real" +// next call. +var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall + +// For GCTestIsReachable, it's important that we do this as a call so +// escape analysis can see through it. +func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) { + return gcTestIsReachable(ptrs...) +} + +// For GCTestPointerClass, it's important that we do this as a call so +// escape analysis can see through it. +// +// This is nosplit because gcTestPointerClass is. +// +//go:nosplit +func GCTestPointerClass(p unsafe.Pointer) string { + return gcTestPointerClass(p) +} + +const Raceenabled = raceenabled + +const ( + GCBackgroundUtilization = gcBackgroundUtilization + GCGoalUtilization = gcGoalUtilization + DefaultHeapMinimum = defaultHeapMinimum + MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent + MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom +) + +type GCController struct { + gcControllerState +} + +func NewGCController(gcPercent int, memoryLimit int64) *GCController { + // Force the controller to escape. We're going to + // do 64-bit atomics on it, and if it gets stack-allocated + // on a 32-bit architecture, it may get allocated unaligned + // space. + g := Escape(new(GCController)) + g.gcControllerState.test = true // Mark it as a test copy. + g.init(int32(gcPercent), memoryLimit) + return g +} + +func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) { + trigger, _ := c.trigger() + if c.heapMarked > trigger { + trigger = c.heapMarked + } + c.maxStackScan.Store(stackSize) + c.globalsScan.Store(globalsSize) + c.heapLive.Store(trigger) + c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac)) + c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap}) +} + +func (c *GCController) AssistWorkPerByte() float64 { + return c.assistWorkPerByte.Load() +} + +func (c *GCController) HeapGoal() uint64 { + return c.heapGoal() +} + +func (c *GCController) HeapLive() uint64 { + return c.heapLive.Load() +} + +func (c *GCController) HeapMarked() uint64 { + return c.heapMarked +} + +func (c *GCController) Triggered() uint64 { + return c.triggered +} + +type GCControllerReviseDelta struct { + HeapLive int64 + HeapScan int64 + HeapScanWork int64 + StackScanWork int64 + GlobalsScanWork int64 +} + +func (c *GCController) Revise(d GCControllerReviseDelta) { + c.heapLive.Add(d.HeapLive) + c.heapScan.Add(d.HeapScan) + c.heapScanWork.Add(d.HeapScanWork) + c.stackScanWork.Add(d.StackScanWork) + c.globalsScanWork.Add(d.GlobalsScanWork) + c.revise() +} + +func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) { + c.assistTime.Store(assistTime) + c.endCycle(elapsed, gomaxprocs, false) + c.resetLive(bytesMarked) + c.commit(false) +} + +func (c *GCController) AddIdleMarkWorker() bool { + return c.addIdleMarkWorker() +} + +func (c *GCController) NeedIdleMarkWorker() bool { + return c.needIdleMarkWorker() +} + +func (c *GCController) RemoveIdleMarkWorker() { + c.removeIdleMarkWorker() +} + +func (c *GCController) SetMaxIdleMarkWorkers(max int32) { + c.setMaxIdleMarkWorkers(max) +} + +var alwaysFalse bool +var escapeSink any + +func Escape[T any](x T) T { + if alwaysFalse { + escapeSink = x + } + return x +} + +// Acquirem blocks preemption. +func Acquirem() { + acquirem() +} + +func Releasem() { + releasem(getg().m) +} + +var Timediv = timediv + +type PIController struct { + piController +} + +func NewPIController(kp, ti, tt, min, max float64) *PIController { + return &PIController{piController{ + kp: kp, + ti: ti, + tt: tt, + min: min, + max: max, + }} +} + +func (c *PIController) Next(input, setpoint, period float64) (float64, bool) { + return c.piController.next(input, setpoint, period) +} + +const ( + CapacityPerProc = capacityPerProc + GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod +) + +type GCCPULimiter struct { + limiter gcCPULimiterState +} + +func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter { + // Force the controller to escape. We're going to + // do 64-bit atomics on it, and if it gets stack-allocated + // on a 32-bit architecture, it may get allocated unaligned + // space. + l := Escape(new(GCCPULimiter)) + l.limiter.test = true + l.limiter.resetCapacity(now, gomaxprocs) + return l +} + +func (l *GCCPULimiter) Fill() uint64 { + return l.limiter.bucket.fill +} + +func (l *GCCPULimiter) Capacity() uint64 { + return l.limiter.bucket.capacity +} + +func (l *GCCPULimiter) Overflow() uint64 { + return l.limiter.overflow +} + +func (l *GCCPULimiter) Limiting() bool { + return l.limiter.limiting() +} + +func (l *GCCPULimiter) NeedUpdate(now int64) bool { + return l.limiter.needUpdate(now) +} + +func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) { + l.limiter.startGCTransition(enableGC, now) +} + +func (l *GCCPULimiter) FinishGCTransition(now int64) { + l.limiter.finishGCTransition(now) +} + +func (l *GCCPULimiter) Update(now int64) { + l.limiter.update(now) +} + +func (l *GCCPULimiter) AddAssistTime(t int64) { + l.limiter.addAssistTime(t) +} + +func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) { + l.limiter.resetCapacity(now, nprocs) +} + +const ScavengePercent = scavengePercent + +type Scavenger struct { + Sleep func(int64) int64 + Scavenge func(uintptr) (uintptr, int64) + ShouldStop func() bool + GoMaxProcs func() int32 + + released atomic.Uintptr + scavenger scavengerState + stop chan<- struct{} + done <-chan struct{} +} + +func (s *Scavenger) Start() { + if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil { + panic("must populate all stubs") + } + + // Install hooks. + s.scavenger.sleepStub = s.Sleep + s.scavenger.scavenge = s.Scavenge + s.scavenger.shouldStop = s.ShouldStop + s.scavenger.gomaxprocs = s.GoMaxProcs + + // Start up scavenger goroutine, and wait for it to be ready. + stop := make(chan struct{}) + s.stop = stop + done := make(chan struct{}) + s.done = done + go func() { + // This should match bgscavenge, loosely. + s.scavenger.init() + s.scavenger.park() + for { + select { + case <-stop: + close(done) + return + default: + } + released, workTime := s.scavenger.run() + if released == 0 { + s.scavenger.park() + continue + } + s.released.Add(released) + s.scavenger.sleep(workTime) + } + }() + if !s.BlockUntilParked(1e9 /* 1 second */) { + panic("timed out waiting for scavenger to get ready") + } +} + +// BlockUntilParked blocks until the scavenger parks, or until +// timeout is exceeded. Returns true if the scavenger parked. +// +// Note that in testing, parked means something slightly different. +// In anger, the scavenger parks to sleep, too, but in testing, +// it only parks when it actually has no work to do. +func (s *Scavenger) BlockUntilParked(timeout int64) bool { + // Just spin, waiting for it to park. + // + // The actual parking process is racy with respect to + // wakeups, which is fine, but for testing we need something + // a bit more robust. + start := nanotime() + for nanotime()-start < timeout { + lock(&s.scavenger.lock) + parked := s.scavenger.parked + unlock(&s.scavenger.lock) + if parked { + return true + } + Gosched() + } + return false +} + +// Released returns how many bytes the scavenger released. +func (s *Scavenger) Released() uintptr { + return s.released.Load() +} + +// Wake wakes up a parked scavenger to keep running. +func (s *Scavenger) Wake() { + s.scavenger.wake() +} + +// Stop cleans up the scavenger's resources. The scavenger +// must be parked for this to work. +func (s *Scavenger) Stop() { + lock(&s.scavenger.lock) + parked := s.scavenger.parked + unlock(&s.scavenger.lock) + if !parked { + panic("tried to clean up scavenger that is not parked") + } + close(s.stop) + s.Wake() + <-s.done +} + +type ScavengeIndex struct { + i scavengeIndex +} + +func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex { + s := new(ScavengeIndex) + // This is a bit lazy but we easily guarantee we'll be able + // to reference all the relevant chunks. The worst-case + // memory usage here is 512 MiB, but tests generally use + // small offsets from BaseChunkIdx, which results in ~100s + // of KiB in memory use. + // + // This may still be worth making better, at least by sharing + // this fairly large array across calls with a sync.Pool or + // something. Currently, when the tests are run serially, + // it takes around 0.5s. Not all that much, but if we have + // a lot of tests like this it could add up. + s.i.chunks = make([]atomicScavChunkData, max) + s.i.min.Store(uintptr(min)) + s.i.max.Store(uintptr(max)) + s.i.minHeapIdx.Store(uintptr(min)) + s.i.test = true + return s +} + +func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) { + ci, off := s.i.find(force) + return ChunkIdx(ci), off +} + +func (s *ScavengeIndex) AllocRange(base, limit uintptr) { + sc, ec := chunkIndex(base), chunkIndex(limit-1) + si, ei := chunkPageIndex(base), chunkPageIndex(limit-1) + + if sc == ec { + // The range doesn't cross any chunk boundaries. + s.i.alloc(sc, ei+1-si) + } else { + // The range crosses at least one chunk boundary. + s.i.alloc(sc, pallocChunkPages-si) + for c := sc + 1; c < ec; c++ { + s.i.alloc(c, pallocChunkPages) + } + s.i.alloc(ec, ei+1) + } +} + +func (s *ScavengeIndex) FreeRange(base, limit uintptr) { + sc, ec := chunkIndex(base), chunkIndex(limit-1) + si, ei := chunkPageIndex(base), chunkPageIndex(limit-1) + + if sc == ec { + // The range doesn't cross any chunk boundaries. + s.i.free(sc, si, ei+1-si) + } else { + // The range crosses at least one chunk boundary. + s.i.free(sc, si, pallocChunkPages-si) + for c := sc + 1; c < ec; c++ { + s.i.free(c, 0, pallocChunkPages) + } + s.i.free(ec, 0, ei+1) + } +} + +func (s *ScavengeIndex) ResetSearchAddrs() { + for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} { + addr, marked := a.Load() + if marked { + a.StoreUnmark(addr, addr) + } + a.Clear() + } + s.i.freeHWM = minOffAddr +} + +func (s *ScavengeIndex) NextGen() { + s.i.nextGen() +} + +func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) { + s.i.setEmpty(chunkIdx(ci)) +} + +func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool { + sc0 := scavChunkData{ + gen: gen, + inUse: inUse, + lastInUse: lastInUse, + scavChunkFlags: scavChunkFlags(flags), + } + scp := sc0.pack() + sc1 := unpackScavChunkData(scp) + return sc0 == sc1 +} + +const GTrackingPeriod = gTrackingPeriod + +var ZeroBase = unsafe.Pointer(&zerobase) + +const UserArenaChunkBytes = userArenaChunkBytes + +type UserArena struct { + arena *userArena +} + +func NewUserArena() *UserArena { + return &UserArena{newUserArena()} +} + +func (a *UserArena) New(out *any) { + i := efaceOf(out) + typ := i._type + if typ.Kind_&kindMask != kindPtr { + panic("new result of non-ptr type") + } + typ = (*ptrtype)(unsafe.Pointer(typ)).Elem + i.data = a.arena.new(typ) +} + +func (a *UserArena) Slice(sl any, cap int) { + a.arena.slice(sl, cap) +} + +func (a *UserArena) Free() { + a.arena.free() +} + +func GlobalWaitingArenaChunks() int { + n := 0 + systemstack(func() { + lock(&mheap_.lock) + for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next { + n++ + } + unlock(&mheap_.lock) + }) + return n +} + +func UserArenaClone[T any](s T) T { + return arena_heapify(s).(T) +} + +var AlignUp = alignUp + +func BlockUntilEmptyFinalizerQueue(timeout int64) bool { + return blockUntilEmptyFinalizerQueue(timeout) +} + +func FrameStartLine(f *Frame) int { + return f.startLine +} + +// PersistentAlloc allocates some memory that lives outside the Go heap. +// This memory will never be freed; use sparingly. +func PersistentAlloc(n uintptr) unsafe.Pointer { + return persistentalloc(n, 0, &memstats.other_sys) +} + +// FPCallers works like Callers and uses frame pointer unwinding to populate +// pcBuf with the return addresses of the physical frames on the stack. +func FPCallers(pcBuf []uintptr) int { + return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf) +} + +const FramePointerEnabled = framepointer_enabled + +var ( + IsPinned = isPinned + GetPinCounter = pinnerGetPinCounter +) + +func SetPinnerLeakPanic(f func()) { + pinnerLeakPanic = f +} +func GetPinnerLeakPanic() func() { + return pinnerLeakPanic +} + +var testUintptr uintptr + +func MyGenericFunc[T any]() { + systemstack(func() { + testUintptr = 4 + }) +} + +func UnsafePoint(pc uintptr) bool { + fi := findfunc(pc) + v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc) + switch v { + case abi.UnsafePointUnsafe: + return true + case abi.UnsafePointSafe: + return false + case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry: + // These are all interruptible, they just encode a nonstandard + // way of recovering when interrupted. + return false + default: + var buf [20]byte + panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v)))) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/export_unix_test.go b/platform/dbops/binaries/go/go/src/runtime/export_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..56ff7716208c9a6875a98de6fb3de6447b8429ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_unix_test.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package runtime + +import "unsafe" + +var NonblockingPipe = nonblockingPipe +var Fcntl = fcntl +var Closeonexec = closeonexec + +func sigismember(mask *sigset, i int) bool { + clear := *mask + sigdelset(&clear, i) + return clear != *mask +} + +func Sigisblocked(i int) bool { + var sigmask sigset + sigprocmask(_SIG_SETMASK, nil, &sigmask) + return sigismember(&sigmask, i) +} + +type M = m + +var waitForSigusr1 struct { + rdpipe int32 + wrpipe int32 + mID int64 +} + +// WaitForSigusr1 blocks until a SIGUSR1 is received. It calls ready +// when it is set up to receive SIGUSR1. The ready function should +// cause a SIGUSR1 to be sent. The r and w arguments are a pipe that +// the signal handler can use to report when the signal is received. +// +// Once SIGUSR1 is received, it returns the ID of the current M and +// the ID of the M the SIGUSR1 was received on. If the caller writes +// a non-zero byte to w, WaitForSigusr1 returns immediately with -1, -1. +func WaitForSigusr1(r, w int32, ready func(mp *M)) (int64, int64) { + lockOSThread() + // Make sure we can receive SIGUSR1. + unblocksig(_SIGUSR1) + + waitForSigusr1.rdpipe = r + waitForSigusr1.wrpipe = w + + mp := getg().m + testSigusr1 = waitForSigusr1Callback + ready(mp) + + // Wait for the signal. We use a pipe rather than a note + // because write is always async-signal-safe. + entersyscallblock() + var b byte + read(waitForSigusr1.rdpipe, noescape(unsafe.Pointer(&b)), 1) + exitsyscall() + + gotM := waitForSigusr1.mID + testSigusr1 = nil + + unlockOSThread() + + if b != 0 { + // timeout signal from caller + return -1, -1 + } + return mp.id, gotM +} + +// waitForSigusr1Callback is called from the signal handler during +// WaitForSigusr1. It must not have write barriers because there may +// not be a P. +// +//go:nowritebarrierrec +func waitForSigusr1Callback(gp *g) bool { + if gp == nil || gp.m == nil { + waitForSigusr1.mID = -1 + } else { + waitForSigusr1.mID = gp.m.id + } + b := byte(0) + write(uintptr(waitForSigusr1.wrpipe), noescape(unsafe.Pointer(&b)), 1) + return true +} + +// SendSigusr1 sends SIGUSR1 to mp. +func SendSigusr1(mp *M) { + signalM(mp, _SIGUSR1) +} + +const ( + O_WRONLY = _O_WRONLY + O_CREAT = _O_CREAT + O_TRUNC = _O_TRUNC +) diff --git a/platform/dbops/binaries/go/go/src/runtime/export_windows_test.go b/platform/dbops/binaries/go/go/src/runtime/export_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cf0db576b80babeb0f92da1d892bd0e5f102f6b0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/export_windows_test.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing. + +package runtime + +import "unsafe" + +const MaxArgs = maxArgs + +var ( + OsYield = osyield + TimeBeginPeriodRetValue = &timeBeginPeriodRetValue +) + +func NumberOfProcessors() int32 { + var info systeminfo + stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) + return int32(info.dwnumberofprocessors) +} + +type ContextStub struct { + context +} + +func (c ContextStub) GetPC() uintptr { + return c.ip() +} + +func NewContextStub() *ContextStub { + var ctx context + ctx.set_ip(getcallerpc()) + ctx.set_sp(getcallersp()) + fp := getfp() + // getfp is not implemented on windows/386 and windows/arm, + // in which case it returns 0. + if fp != 0 { + ctx.set_fp(*(*uintptr)(unsafe.Pointer(fp))) + } + return &ContextStub{ctx} +} diff --git a/platform/dbops/binaries/go/go/src/runtime/extern.go b/platform/dbops/binaries/go/go/src/runtime/extern.go new file mode 100644 index 0000000000000000000000000000000000000000..e42122fd3a1cd1dadb6a8dbb6a7b10532b7e22ea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/extern.go @@ -0,0 +1,365 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package runtime contains operations that interact with Go's runtime system, +such as functions to control goroutines. It also includes the low-level type information +used by the reflect package; see [reflect]'s documentation for the programmable +interface to the run-time type system. + +# Environment Variables + +The following environment variables ($name or %name%, depending on the host +operating system) control the run-time behavior of Go programs. The meanings +and use may change from release to release. + +The GOGC variable sets the initial garbage collection target percentage. +A collection is triggered when the ratio of freshly allocated data to live data +remaining after the previous collection reaches this percentage. The default +is GOGC=100. Setting GOGC=off disables the garbage collector entirely. +[runtime/debug.SetGCPercent] allows changing this percentage at run time. + +The GOMEMLIMIT variable sets a soft memory limit for the runtime. This memory limit +includes the Go heap and all other memory managed by the runtime, and excludes +external memory sources such as mappings of the binary itself, memory managed in +other languages, and memory held by the operating system on behalf of the Go +program. GOMEMLIMIT is a numeric value in bytes with an optional unit suffix. +The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes +represent quantities of bytes as defined by the IEC 80000-13 standard. That is, +they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes, +and so on. The default setting is [math.MaxInt64], which effectively disables the +memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run +time. + +The GODEBUG variable controls debugging variables within the runtime. +It is a comma-separated list of name=val pairs setting these named variables: + + allocfreetrace: setting allocfreetrace=1 causes every allocation to be + profiled and a stack trace printed on each object's allocation and free. + + clobberfree: setting clobberfree=1 causes the garbage collector to + clobber the memory content of an object with bad content when it frees + the object. + + cpu.*: cpu.all=off disables the use of all optional instruction set extensions. + cpu.extension=off disables use of instructions from the specified instruction set extension. + extension is the lower case name for the instruction set extension such as sse41 or avx + as listed in internal/cpu package. As an example cpu.avx=off disables runtime detection + and thereby use of AVX instructions. + + cgocheck: setting cgocheck=0 disables all checks for packages + using cgo to incorrectly pass Go pointers to non-Go code. + Setting cgocheck=1 (the default) enables relatively cheap + checks that may miss some errors. A more complete, but slow, + cgocheck mode can be enabled using GOEXPERIMENT (which + requires a rebuild), see https://pkg.go.dev/internal/goexperiment for details. + + disablethp: setting disablethp=1 on Linux disables transparent huge pages for the heap. + It has no effect on other platforms. disablethp is meant for compatibility with versions + of Go before 1.21, which stopped working around a Linux kernel default that can result + in significant memory overuse. See https://go.dev/issue/64332. This setting will be + removed in a future release, so operators should tweak their Linux configuration to suit + their needs before then. See https://go.dev/doc/gc-guide#Linux_transparent_huge_pages. + + dontfreezetheworld: by default, the start of a fatal panic or throw + "freezes the world", preempting all threads to stop all running + goroutines, which makes it possible to traceback all goroutines, and + keeps their state close to the point of panic. Setting + dontfreezetheworld=1 disables this preemption, allowing goroutines to + continue executing during panic processing. Note that goroutines that + naturally enter the scheduler will still stop. This can be useful when + debugging the runtime scheduler, as freezetheworld perturbs scheduler + state and thus may hide problems. + + efence: setting efence=1 causes the allocator to run in a mode + where each object is allocated on a unique page and addresses are + never recycled. + + gccheckmark: setting gccheckmark=1 enables verification of the + garbage collector's concurrent mark phase by performing a + second mark pass while the world is stopped. If the second + pass finds a reachable object that was not found by concurrent + mark, the garbage collector will panic. + + gcpacertrace: setting gcpacertrace=1 causes the garbage collector to + print information about the internal state of the concurrent pacer. + + gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines + onto smaller stacks. In this mode, a goroutine's stack can only grow. + + gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection, + making every garbage collection a stop-the-world event. Setting gcstoptheworld=2 + also disables concurrent sweeping after the garbage collection finishes. + + gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard + error at each collection, summarizing the amount of memory collected and the + length of the pause. The format of this line is subject to change. Included in + the explanation below is also the relevant runtime/metrics metric for each field. + Currently, it is: + gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # MB stacks, #MB globals, # P + where the fields are as follows: + gc # the GC number, incremented at each GC + @#s time in seconds since program start + #% percentage of time spent in GC since program start + #+...+# wall-clock/CPU times for the phases of the GC + #->#-># MB heap size at GC start, at GC end, and live heap, or /gc/scan/heap:bytes + # MB goal goal heap size, or /gc/heap/goal:bytes + # MB stacks estimated scannable stack size, or /gc/scan/stack:bytes + # MB globals scannable global size, or /gc/scan/globals:bytes + # P number of processors used, or /sched/gomaxprocs:threads + The phases are stop-the-world (STW) sweep termination, concurrent + mark and scan, and STW mark termination. The CPU times + for mark/scan are broken down in to assist time (GC performed in + line with allocation), background GC time, and idle GC time. + If the line ends with "(forced)", this GC was forced by a + runtime.GC() call. + + harddecommit: setting harddecommit=1 causes memory that is returned to the OS to + also have protections removed on it. This is the only mode of operation on Windows, + but is helpful in debugging scavenger-related issues on other platforms. Currently, + only supported on Linux. + + inittrace: setting inittrace=1 causes the runtime to emit a single line to standard + error for each package with init work, summarizing the execution time and memory + allocation. No information is printed for inits executed as part of plugin loading + and for packages without both user defined and compiler generated init work. + The format of this line is subject to change. Currently, it is: + init # @#ms, # ms clock, # bytes, # allocs + where the fields are as follows: + init # the package name + @# ms time in milliseconds when the init started since program start + # clock wall-clock time for package initialization work + # bytes memory allocated on the heap + # allocs number of heap allocations + + madvdontneed: setting madvdontneed=0 will use MADV_FREE + instead of MADV_DONTNEED on Linux when returning memory to the + kernel. This is more efficient, but means RSS numbers will + drop only when the OS is under memory pressure. On the BSDs and + Illumos/Solaris, setting madvdontneed=1 will use MADV_DONTNEED instead + of MADV_FREE. This is less efficient, but causes RSS numbers to drop + more quickly. + + memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate. + When set to 0 memory profiling is disabled. Refer to the description of + MemProfileRate for the default value. + + pagetrace: setting pagetrace=/path/to/file will write out a trace of page events + that can be viewed, analyzed, and visualized using the x/debug/cmd/pagetrace tool. + Build your program with GOEXPERIMENT=pagetrace to enable this functionality. Do not + enable this functionality if your program is a setuid binary as it introduces a security + risk in that scenario. Currently not supported on Windows, plan9 or js/wasm. Setting this + option for some applications can produce large traces, so use with care. + + panicnil: setting panicnil=1 disables the runtime error when calling panic with nil + interface value or an untyped nil. + + runtimecontentionstacks: setting runtimecontentionstacks=1 enables inclusion of call stacks + related to contention on runtime-internal locks in the "mutex" profile, subject to the + MutexProfileFraction setting. When runtimecontentionstacks=0, contention on + runtime-internal locks will report as "runtime._LostContendedRuntimeLock". When + runtimecontentionstacks=1, the call stacks will correspond to the unlock call that released + the lock. But instead of the value corresponding to the amount of contention that call + stack caused, it corresponds to the amount of time the caller of unlock had to wait in its + original call to lock. A future release is expected to align those and remove this setting. + + invalidptr: invalidptr=1 (the default) causes the garbage collector and stack + copier to crash the program if an invalid pointer value (for example, 1) + is found in a pointer-typed location. Setting invalidptr=0 disables this check. + This should only be used as a temporary workaround to diagnose buggy code. + The real fix is to not store integers in pointer-typed locations. + + sbrk: setting sbrk=1 replaces the memory allocator and garbage collector + with a trivial allocator that obtains memory from the operating system and + never reclaims any memory. + + scavtrace: setting scavtrace=1 causes the runtime to emit a single line to standard + error, roughly once per GC cycle, summarizing the amount of work done by the + scavenger as well as the total amount of memory returned to the operating system + and an estimate of physical memory utilization. The format of this line is subject + to change, but currently it is: + scav # KiB work (bg), # KiB work (eager), # KiB total, #% util + where the fields are as follows: + # KiB work (bg) the amount of memory returned to the OS in the background since + the last line + # KiB work (eager) the amount of memory returned to the OS eagerly since the last line + # KiB now the amount of address space currently returned to the OS + #% util the fraction of all unscavenged heap memory which is in-use + If the line ends with "(forced)", then scavenging was forced by a + debug.FreeOSMemory() call. + + scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit + detailed multiline info every X milliseconds, describing state of the scheduler, + processors, threads and goroutines. + + schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard + error every X milliseconds, summarizing the scheduler state. + + tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at + which goroutines were created, where N limits the number of ancestor goroutines to + report. This also extends the information returned by runtime.Stack. Ancestor's goroutine + IDs will refer to the ID of the goroutine at the time of creation; it's possible for this + ID to be reused for another goroutine. Setting N to 0 will report no ancestry information. + + tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer to + use the runtime's default stack unwinder instead of frame pointer unwinding. + This increases tracer overhead, but could be helpful as a workaround or for + debugging unexpected regressions caused by frame pointer unwinding. + + traceadvanceperiod: the approximate period in nanoseconds between trace generations. Only + applies if a program is built with GOEXPERIMENT=exectracer2. Used primarily for testing + and debugging the execution tracer. + + asyncpreemptoff: asyncpreemptoff=1 disables signal-based + asynchronous goroutine preemption. This makes some loops + non-preemptible for long periods, which may delay GC and + goroutine scheduling. This is useful for debugging GC issues + because it also disables the conservative stack scanning used + for asynchronously preempted goroutines. + +The [net] and [net/http] packages also refer to debugging variables in GODEBUG. +See the documentation for those packages for details. + +The GOMAXPROCS variable limits the number of operating system threads that +can execute user-level Go code simultaneously. There is no limit to the number of threads +that can be blocked in system calls on behalf of Go code; those do not count against +the GOMAXPROCS limit. This package's [GOMAXPROCS] function queries and changes +the limit. + +The GORACE variable configures the race detector, for programs built using -race. +See the [Race Detector article] for details. + +The GOTRACEBACK variable controls the amount of output generated when a Go +program fails due to an unrecovered panic or an unexpected runtime condition. +By default, a failure prints a stack trace for the current goroutine, +eliding functions internal to the run-time system, and then exits with exit code 2. +The failure prints stack traces for all goroutines if there is no current goroutine +or the failure is internal to the run-time. +GOTRACEBACK=none omits the goroutine stack traces entirely. +GOTRACEBACK=single (the default) behaves as described above. +GOTRACEBACK=all adds stack traces for all user-created goroutines. +GOTRACEBACK=system is like “all” but adds stack frames for run-time functions +and shows goroutines created internally by the run-time. +GOTRACEBACK=crash is like “system” but crashes in an operating system-specific +manner instead of exiting. For example, on Unix systems, the crash raises +SIGABRT to trigger a core dump. +GOTRACEBACK=wer is like “crash” but doesn't disable Windows Error Reporting (WER). +For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for +none, all, and system, respectively. +The [runtime/debug.SetTraceback] function allows increasing the +amount of output at run time, but it cannot reduce the amount below that +specified by the environment variable. + +The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete +the set of Go environment variables. They influence the building of Go programs +(see [cmd/go] and [go/build]). +GOARCH, GOOS, and GOROOT are recorded at compile time and made available by +constants or functions in this package, but they do not influence the execution +of the run-time system. + +# Security + +On Unix platforms, Go's runtime system behaves slightly differently when a +binary is setuid/setgid or executed with setuid/setgid-like properties, in order +to prevent dangerous behaviors. On Linux this is determined by checking for the +AT_SECURE flag in the auxiliary vector, on the BSDs and Solaris/Illumos it is +determined by checking the issetugid syscall, and on AIX it is determined by +checking if the uid/gid match the effective uid/gid. + +When the runtime determines the binary is setuid/setgid-like, it does three main +things: + - The standard input/output file descriptors (0, 1, 2) are checked to be open. + If any of them are closed, they are opened pointing at /dev/null. + - The value of the GOTRACEBACK environment variable is set to 'none'. + - When a signal is received that terminates the program, or the program + encounters an unrecoverable panic that would otherwise override the value + of GOTRACEBACK, the goroutine stack, registers, and other memory related + information are omitted. + +[Race Detector article]: https://go.dev/doc/articles/race_detector +*/ +package runtime + +import ( + "internal/goarch" + "internal/goos" +) + +// Caller reports file and line number information about function invocations on +// the calling goroutine's stack. The argument skip is the number of stack frames +// to ascend, with 0 identifying the caller of Caller. (For historical reasons the +// meaning of skip differs between Caller and [Callers].) The return values report the +// program counter, file name, and line number within the file of the corresponding +// call. The boolean ok is false if it was not possible to recover the information. +func Caller(skip int) (pc uintptr, file string, line int, ok bool) { + rpc := make([]uintptr, 1) + n := callers(skip+1, rpc[:]) + if n < 1 { + return + } + frame, _ := CallersFrames(rpc).Next() + return frame.PC, frame.File, frame.Line, frame.PC != 0 +} + +// Callers fills the slice pc with the return program counters of function invocations +// on the calling goroutine's stack. The argument skip is the number of stack frames +// to skip before recording in pc, with 0 identifying the frame for Callers itself and +// 1 identifying the caller of Callers. +// It returns the number of entries written to pc. +// +// To translate these PCs into symbolic information such as function +// names and line numbers, use [CallersFrames]. CallersFrames accounts +// for inlined functions and adjusts the return program counters into +// call program counters. Iterating over the returned slice of PCs +// directly is discouraged, as is using [FuncForPC] on any of the +// returned PCs, since these cannot account for inlining or return +// program counter adjustment. +func Callers(skip int, pc []uintptr) int { + // runtime.callers uses pc.array==nil as a signal + // to print a stack trace. Pick off 0-length pc here + // so that we don't let a nil pc slice get to it. + if len(pc) == 0 { + return 0 + } + return callers(skip, pc) +} + +var defaultGOROOT string // set by cmd/link + +// GOROOT returns the root of the Go tree. It uses the +// GOROOT environment variable, if set at process start, +// or else the root used during the Go build. +func GOROOT() string { + s := gogetenv("GOROOT") + if s != "" { + return s + } + return defaultGOROOT +} + +// buildVersion is the Go tree's version string at build time. +// +// If any GOEXPERIMENTs are set to non-default values, it will include +// "X:". +// +// This is set by the linker. +// +// This is accessed by "go version ". +var buildVersion string + +// Version returns the Go tree's version string. +// It is either the commit hash and date at the time of the build or, +// when possible, a release tag like "go1.3". +func Version() string { + return buildVersion +} + +// GOOS is the running program's operating system target: +// one of darwin, freebsd, linux, and so on. +// To view possible combinations of GOOS and GOARCH, run "go tool dist list". +const GOOS string = goos.GOOS + +// GOARCH is the running program's architecture target: +// one of 386, amd64, arm, s390x, and so on. +const GOARCH string = goarch.GOARCH diff --git a/platform/dbops/binaries/go/go/src/runtime/fastlog2.go b/platform/dbops/binaries/go/go/src/runtime/fastlog2.go new file mode 100644 index 0000000000000000000000000000000000000000..1f251bfaab2d59057bd0abd557c76c9b87777c9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/fastlog2.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// fastlog2 implements a fast approximation to the base 2 log of a +// float64. This is used to compute a geometric distribution for heap +// sampling, without introducing dependencies into package math. This +// uses a very rough approximation using the float64 exponent and the +// first 25 bits of the mantissa. The top 5 bits of the mantissa are +// used to load limits from a table of constants and the rest are used +// to scale linearly between them. +func fastlog2(x float64) float64 { + const fastlogScaleBits = 20 + const fastlogScaleRatio = 1.0 / (1 << fastlogScaleBits) + + xBits := float64bits(x) + // Extract the exponent from the IEEE float64, and index a constant + // table with the first 10 bits from the mantissa. + xExp := int64((xBits>>52)&0x7FF) - 1023 + xManIndex := (xBits >> (52 - fastlogNumBits)) % (1 << fastlogNumBits) + xManScale := (xBits >> (52 - fastlogNumBits - fastlogScaleBits)) % (1 << fastlogScaleBits) + + low, high := fastlog2Table[xManIndex], fastlog2Table[xManIndex+1] + return float64(xExp) + low + (high-low)*float64(xManScale)*fastlogScaleRatio +} diff --git a/platform/dbops/binaries/go/go/src/runtime/fastlog2_test.go b/platform/dbops/binaries/go/go/src/runtime/fastlog2_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae0f40b2bb7772a8eae364e02fb4660194928092 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/fastlog2_test.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math" + "runtime" + "testing" +) + +func TestFastLog2(t *testing.T) { + // Compute the euclidean distance between math.Log2 and the FastLog2 + // implementation over the range of interest for heap sampling. + const randomBitCount = 26 + var e float64 + + inc := 1 + if testing.Short() { + // Check 1K total values, down from 64M. + inc = 1 << 16 + } + for i := 1; i < 1< 1.0 { + t.Fatalf("imprecision on fastlog2 implementation, want <=1.0, got %f", e) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/fastlog2table.go b/platform/dbops/binaries/go/go/src/runtime/fastlog2table.go new file mode 100644 index 0000000000000000000000000000000000000000..6ba4a7d3f24ccfb9c9da6b18a583c9006722fe8f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/fastlog2table.go @@ -0,0 +1,43 @@ +// Code generated by mkfastlog2table.go; DO NOT EDIT. +// Run go generate from src/runtime to update. +// See mkfastlog2table.go for comments. + +package runtime + +const fastlogNumBits = 5 + +var fastlog2Table = [1<= 0 { + continue + } + + if errno != EBADF { + print("runtime: unexpected error while checking standard file descriptor ", i, ", errno=", errno, "\n") + throw("cannot open standard fds") + } + + if ret := open(&devNull[0], O_RDWR, 0); ret < 0 { + print("runtime: standard file descriptor ", i, " closed, unable to open /dev/null, errno=", errno, "\n") + throw("cannot open standard fds") + } else if ret != int32(i) { + print("runtime: opened unexpected file descriptor ", ret, " when attempting to open ", i, "\n") + throw("cannot open standard fds") + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/float.go b/platform/dbops/binaries/go/go/src/runtime/float.go new file mode 100644 index 0000000000000000000000000000000000000000..9f281c404505aef53ea063f6cc5a5a1bc37fcc3c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/float.go @@ -0,0 +1,54 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +var inf = float64frombits(0x7FF0000000000000) + +// isNaN reports whether f is an IEEE 754 “not-a-number” value. +func isNaN(f float64) (is bool) { + // IEEE 754 says that only NaNs satisfy f != f. + return f != f +} + +// isFinite reports whether f is neither NaN nor an infinity. +func isFinite(f float64) bool { + return !isNaN(f - f) +} + +// isInf reports whether f is an infinity. +func isInf(f float64) bool { + return !isNaN(f) && !isFinite(f) +} + +// abs returns the absolute value of x. +// +// Special cases are: +// +// abs(±Inf) = +Inf +// abs(NaN) = NaN +func abs(x float64) float64 { + const sign = 1 << 63 + return float64frombits(float64bits(x) &^ sign) +} + +// copysign returns a value with the magnitude +// of x and the sign of y. +func copysign(x, y float64) float64 { + const sign = 1 << 63 + return float64frombits(float64bits(x)&^sign | float64bits(y)&sign) +} + +// float64bits returns the IEEE 754 binary representation of f. +func float64bits(f float64) uint64 { + return *(*uint64)(unsafe.Pointer(&f)) +} + +// float64frombits returns the floating point number corresponding +// the IEEE 754 binary representation b. +func float64frombits(b uint64) float64 { + return *(*float64)(unsafe.Pointer(&b)) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/float_test.go b/platform/dbops/binaries/go/go/src/runtime/float_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b2aa43da5970856262d54c6f0ba22618bf7927a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/float_test.go @@ -0,0 +1,25 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "testing" +) + +func TestIssue48807(t *testing.T) { + for _, i := range []uint64{ + 0x8234508000000001, // from issue48807 + 1<<56 + 1<<32 + 1, + } { + got := float32(i) + dontwant := float32(float64(i)) + if got == dontwant { + // The test cases above should be uint64s such that + // this equality doesn't hold. These examples trigger + // the case where using an intermediate float64 doesn't work. + t.Errorf("direct float32 conversion doesn't work: arg=%x got=%x dontwant=%x", i, got, dontwant) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/funcdata.h b/platform/dbops/binaries/go/go/src/runtime/funcdata.h new file mode 100644 index 0000000000000000000000000000000000000000..4bbc58ea48823ee02a9f4ed96473205626cab9e0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/funcdata.h @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file defines the IDs for PCDATA and FUNCDATA instructions +// in Go binaries. It is included by assembly sources, so it must +// be written using #defines. +// +// These must agree with internal/abi/symtab.go. + +#define PCDATA_UnsafePoint 0 +#define PCDATA_StackMapIndex 1 +#define PCDATA_InlTreeIndex 2 +#define PCDATA_ArgLiveIndex 3 + +#define FUNCDATA_ArgsPointerMaps 0 /* garbage collector blocks */ +#define FUNCDATA_LocalsPointerMaps 1 +#define FUNCDATA_StackObjects 2 +#define FUNCDATA_InlTree 3 +#define FUNCDATA_OpenCodedDeferInfo 4 /* info for func with open-coded defers */ +#define FUNCDATA_ArgInfo 5 +#define FUNCDATA_ArgLiveInfo 6 +#define FUNCDATA_WrapInfo 7 + +// Pseudo-assembly statements. + +// GO_ARGS, GO_RESULTS_INITIALIZED, and NO_LOCAL_POINTERS are macros +// that communicate to the runtime information about the location and liveness +// of pointers in an assembly function's arguments, results, and stack frame. +// This communication is only required in assembly functions that make calls +// to other functions that might be preempted or grow the stack. +// NOSPLIT functions that make no calls do not need to use these macros. + +// GO_ARGS indicates that the Go prototype for this assembly function +// defines the pointer map for the function's arguments. +// GO_ARGS should be the first instruction in a function that uses it. +// It can be omitted if there are no arguments at all. +// GO_ARGS is inserted implicitly by the assembler for any function +// whose package-qualified symbol name belongs to the current package; +// it is therefore usually not necessary to write explicitly. +#define GO_ARGS FUNCDATA $FUNCDATA_ArgsPointerMaps, go_args_stackmap(SB) + +// GO_RESULTS_INITIALIZED indicates that the assembly function +// has initialized the stack space for its results and that those results +// should be considered live for the remainder of the function. +#define GO_RESULTS_INITIALIZED PCDATA $PCDATA_StackMapIndex, $1 + +// NO_LOCAL_POINTERS indicates that the assembly function stores +// no pointers to heap objects in its local stack variables. +#define NO_LOCAL_POINTERS FUNCDATA $FUNCDATA_LocalsPointerMaps, no_pointers_stackmap(SB) + +// ArgsSizeUnknown is set in Func.argsize to mark all functions +// whose argument size is unknown (C vararg functions, and +// assembly code without an explicit specification). +// This value is generated by the compiler, assembler, or linker. +#define ArgsSizeUnknown 0x80000000 diff --git a/platform/dbops/binaries/go/go/src/runtime/gc_test.go b/platform/dbops/binaries/go/go/src/runtime/gc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c6759a172c8647415811ce2934efb795ff4984fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/gc_test.go @@ -0,0 +1,947 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/goexperiment" + "math/rand" + "os" + "reflect" + "runtime" + "runtime/debug" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +func TestGcSys(t *testing.T) { + t.Skip("skipping known-flaky test; golang.org/issue/37331") + if os.Getenv("GOGC") == "off" { + t.Skip("skipping test; GOGC=off in environment") + } + got := runTestProg(t, "testprog", "GCSys") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %q", want, got) + } +} + +func TestGcDeepNesting(t *testing.T) { + type T [2][2][2][2][2][2][2][2][2][2]*int + a := new(T) + + // Prevent the compiler from applying escape analysis. + // This makes sure new(T) is allocated on heap, not on the stack. + t.Logf("%p", a) + + a[0][0][0][0][0][0][0][0][0][0] = new(int) + *a[0][0][0][0][0][0][0][0][0][0] = 13 + runtime.GC() + if *a[0][0][0][0][0][0][0][0][0][0] != 13 { + t.Fail() + } +} + +func TestGcMapIndirection(t *testing.T) { + defer debug.SetGCPercent(debug.SetGCPercent(1)) + runtime.GC() + type T struct { + a [256]int + } + m := make(map[T]T) + for i := 0; i < 2000; i++ { + var a T + a.a[0] = i + m[a] = T{} + } +} + +func TestGcArraySlice(t *testing.T) { + type X struct { + buf [1]byte + nextbuf []byte + next *X + } + var head *X + for i := 0; i < 10; i++ { + p := &X{} + p.buf[0] = 42 + p.next = head + if head != nil { + p.nextbuf = head.buf[:] + } + head = p + runtime.GC() + } + for p := head; p != nil; p = p.next { + if p.buf[0] != 42 { + t.Fatal("corrupted heap") + } + } +} + +func TestGcRescan(t *testing.T) { + type X struct { + c chan error + nextx *X + } + type Y struct { + X + nexty *Y + p *int + } + var head *Y + for i := 0; i < 10; i++ { + p := &Y{} + p.c = make(chan error) + if head != nil { + p.nextx = &head.X + } + p.nexty = head + p.p = new(int) + *p.p = 42 + head = p + runtime.GC() + } + for p := head; p != nil; p = p.nexty { + if *p.p != 42 { + t.Fatal("corrupted heap") + } + } +} + +func TestGcLastTime(t *testing.T) { + ms := new(runtime.MemStats) + t0 := time.Now().UnixNano() + runtime.GC() + t1 := time.Now().UnixNano() + runtime.ReadMemStats(ms) + last := int64(ms.LastGC) + if t0 > last || last > t1 { + t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) + } + pause := ms.PauseNs[(ms.NumGC+255)%256] + // Due to timer granularity, pause can actually be 0 on windows + // or on virtualized environments. + if pause == 0 { + t.Logf("last GC pause was 0") + } else if pause > 10e9 { + t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) + } +} + +var hugeSink any + +func TestHugeGCInfo(t *testing.T) { + // The test ensures that compiler can chew these huge types even on weakest machines. + // The types are not allocated at runtime. + if hugeSink != nil { + // 400MB on 32 bots, 4TB on 64-bits. + const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 + hugeSink = new([n]*byte) + hugeSink = new([n]uintptr) + hugeSink = new(struct { + x float64 + y [n]*byte + z []string + }) + hugeSink = new(struct { + x float64 + y [n]uintptr + z []string + }) + } +} + +func TestPeriodicGC(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("no sysmon on wasm yet") + } + + // Make sure we're not in the middle of a GC. + runtime.GC() + + var ms1, ms2 runtime.MemStats + runtime.ReadMemStats(&ms1) + + // Make periodic GC run continuously. + orig := *runtime.ForceGCPeriod + *runtime.ForceGCPeriod = 0 + + // Let some periodic GCs happen. In a heavily loaded system, + // it's possible these will be delayed, so this is designed to + // succeed quickly if things are working, but to give it some + // slack if things are slow. + var numGCs uint32 + const want = 2 + for i := 0; i < 200 && numGCs < want; i++ { + time.Sleep(5 * time.Millisecond) + + // Test that periodic GC actually happened. + runtime.ReadMemStats(&ms2) + numGCs = ms2.NumGC - ms1.NumGC + } + *runtime.ForceGCPeriod = orig + + if numGCs < want { + t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs) + } +} + +func TestGcZombieReporting(t *testing.T) { + // This test is somewhat sensitive to how the allocator works. + // Pointers in zombies slice may cross-span, thus we + // add invalidptr=0 for avoiding the badPointer check. + // See issue https://golang.org/issues/49613/ + got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0") + want := "found pointer to free object" + if !strings.Contains(got, want) { + t.Fatalf("expected %q in output, but got %q", want, got) + } +} + +func TestGCTestMoveStackOnNextCall(t *testing.T) { + t.Parallel() + var onStack int + // GCTestMoveStackOnNextCall can fail in rare cases if there's + // a preemption. This won't happen many times in quick + // succession, so just retry a few times. + for retry := 0; retry < 5; retry++ { + runtime.GCTestMoveStackOnNextCall() + if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) { + // Passed. + return + } + } + t.Fatal("stack did not move") +} + +// This must not be inlined because the point is to force a stack +// growth check and move the stack. +// +//go:noinline +func moveStackCheck(t *testing.T, new *int, old uintptr) bool { + // new should have been updated by the stack move; + // old should not have. + + // Capture new's value before doing anything that could + // further move the stack. + new2 := uintptr(unsafe.Pointer(new)) + + t.Logf("old stack pointer %x, new stack pointer %x", old, new2) + if new2 == old { + // Check that we didn't screw up the test's escape analysis. + if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" { + t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls) + } + // This was a real failure. + return false + } + return true +} + +func TestGCTestMoveStackRepeatedly(t *testing.T) { + // Move the stack repeatedly to make sure we're not doubling + // it each time. + for i := 0; i < 100; i++ { + runtime.GCTestMoveStackOnNextCall() + moveStack1(false) + } +} + +//go:noinline +func moveStack1(x bool) { + // Make sure this function doesn't get auto-nosplit. + if x { + println("x") + } +} + +func TestGCTestIsReachable(t *testing.T) { + var all, half []unsafe.Pointer + var want uint64 + for i := 0; i < 16; i++ { + // The tiny allocator muddies things, so we use a + // scannable type. + p := unsafe.Pointer(new(*int)) + all = append(all, p) + if i%2 == 0 { + half = append(half, p) + want |= 1 << i + } + } + + got := runtime.GCTestIsReachable(all...) + if want != got { + t.Fatalf("did not get expected reachable set; want %b, got %b", want, got) + } + runtime.KeepAlive(half) +} + +var pointerClassBSS *int +var pointerClassData = 42 + +func TestGCTestPointerClass(t *testing.T) { + t.Parallel() + check := func(p unsafe.Pointer, want string) { + t.Helper() + got := runtime.GCTestPointerClass(p) + if got != want { + // Convert the pointer to a uintptr to avoid + // escaping it. + t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got) + } + } + var onStack int + var notOnStack int + check(unsafe.Pointer(&onStack), "stack") + check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap") + check(unsafe.Pointer(&pointerClassBSS), "bss") + check(unsafe.Pointer(&pointerClassData), "data") + check(nil, "other") +} + +func BenchmarkSetTypePtr(b *testing.B) { + benchSetType[*byte](b) +} + +func BenchmarkSetTypePtr8(b *testing.B) { + benchSetType[[8]*byte](b) +} + +func BenchmarkSetTypePtr16(b *testing.B) { + benchSetType[[16]*byte](b) +} + +func BenchmarkSetTypePtr32(b *testing.B) { + benchSetType[[32]*byte](b) +} + +func BenchmarkSetTypePtr64(b *testing.B) { + benchSetType[[64]*byte](b) +} + +func BenchmarkSetTypePtr126(b *testing.B) { + benchSetType[[126]*byte](b) +} + +func BenchmarkSetTypePtr128(b *testing.B) { + benchSetType[[128]*byte](b) +} + +func BenchmarkSetTypePtrSlice(b *testing.B) { + benchSetTypeSlice[*byte](b, 1<<10) +} + +type Node1 struct { + Value [1]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode1(b *testing.B) { + benchSetType[Node1](b) +} + +func BenchmarkSetTypeNode1Slice(b *testing.B) { + benchSetTypeSlice[Node1](b, 32) +} + +type Node8 struct { + Value [8]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode8(b *testing.B) { + benchSetType[Node8](b) +} + +func BenchmarkSetTypeNode8Slice(b *testing.B) { + benchSetTypeSlice[Node8](b, 32) +} + +type Node64 struct { + Value [64]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode64(b *testing.B) { + benchSetType[Node64](b) +} + +func BenchmarkSetTypeNode64Slice(b *testing.B) { + benchSetTypeSlice[Node64](b, 32) +} + +type Node64Dead struct { + Left, Right *byte + Value [64]uintptr +} + +func BenchmarkSetTypeNode64Dead(b *testing.B) { + benchSetType[Node64Dead](b) +} + +func BenchmarkSetTypeNode64DeadSlice(b *testing.B) { + benchSetTypeSlice[Node64Dead](b, 32) +} + +type Node124 struct { + Value [124]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode124(b *testing.B) { + benchSetType[Node124](b) +} + +func BenchmarkSetTypeNode124Slice(b *testing.B) { + benchSetTypeSlice[Node124](b, 32) +} + +type Node126 struct { + Value [126]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode126(b *testing.B) { + benchSetType[Node126](b) +} + +func BenchmarkSetTypeNode126Slice(b *testing.B) { + benchSetTypeSlice[Node126](b, 32) +} + +type Node128 struct { + Value [128]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode128(b *testing.B) { + benchSetType[Node128](b) +} + +func BenchmarkSetTypeNode128Slice(b *testing.B) { + benchSetTypeSlice[Node128](b, 32) +} + +type Node130 struct { + Value [130]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode130(b *testing.B) { + benchSetType[Node130](b) +} + +func BenchmarkSetTypeNode130Slice(b *testing.B) { + benchSetTypeSlice[Node130](b, 32) +} + +type Node1024 struct { + Value [1024]uintptr + Left, Right *byte +} + +func BenchmarkSetTypeNode1024(b *testing.B) { + benchSetType[Node1024](b) +} + +func BenchmarkSetTypeNode1024Slice(b *testing.B) { + benchSetTypeSlice[Node1024](b, 32) +} + +func benchSetType[T any](b *testing.B) { + if goexperiment.AllocHeaders { + b.Skip("not supported with allocation headers experiment") + } + b.SetBytes(int64(unsafe.Sizeof(*new(T)))) + runtime.BenchSetType[T](b.N, b.ResetTimer) +} + +func benchSetTypeSlice[T any](b *testing.B, len int) { + if goexperiment.AllocHeaders { + b.Skip("not supported with allocation headers experiment") + } + b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len))) + runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len) +} + +func BenchmarkAllocation(b *testing.B) { + type T struct { + x, y *byte + } + ngo := runtime.GOMAXPROCS(0) + work := make(chan bool, b.N+ngo) + result := make(chan *T) + for i := 0; i < b.N; i++ { + work <- true + } + for i := 0; i < ngo; i++ { + work <- false + } + for i := 0; i < ngo; i++ { + go func() { + var x *T + for <-work { + for i := 0; i < 1000; i++ { + x = &T{} + } + } + result <- x + }() + } + for i := 0; i < ngo; i++ { + <-result + } +} + +func TestPrintGC(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + done := make(chan bool) + go func() { + for { + select { + case <-done: + return + default: + runtime.GC() + } + } + }() + for i := 0; i < 1e4; i++ { + func() { + defer print("") + }() + } + close(done) +} + +func testTypeSwitch(x any) error { + switch y := x.(type) { + case nil: + // ok + case error: + return y + } + return nil +} + +func testAssert(x any) error { + if y, ok := x.(error); ok { + return y + } + return nil +} + +func testAssertVar(x any) error { + var y, ok = x.(error) + if ok { + return y + } + return nil +} + +var a bool + +//go:noinline +func testIfaceEqual(x any) { + if x == "abc" { + a = true + } +} + +func TestPageAccounting(t *testing.T) { + // Grow the heap in small increments. This used to drop the + // pages-in-use count below zero because of a rounding + // mismatch (golang.org/issue/15022). + const blockSize = 64 << 10 + blocks := make([]*[blockSize]byte, (64<<20)/blockSize) + for i := range blocks { + blocks[i] = new([blockSize]byte) + } + + // Check that the running page count matches reality. + pagesInUse, counted := runtime.CountPagesInUse() + if pagesInUse != counted { + t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted) + } +} + +func init() { + // Enable ReadMemStats' double-check mode. + *runtime.DoubleCheckReadMemStats = true +} + +func TestReadMemStats(t *testing.T) { + base, slow := runtime.ReadMemStatsSlow() + if base != slow { + logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow)) + t.Fatal("memstats mismatch") + } +} + +func logDiff(t *testing.T, prefix string, got, want reflect.Value) { + typ := got.Type() + switch typ.Kind() { + case reflect.Array, reflect.Slice: + if got.Len() != want.Len() { + t.Logf("len(%s): got %v, want %v", prefix, got, want) + return + } + for i := 0; i < got.Len(); i++ { + logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i)) + } + case reflect.Struct: + for i := 0; i < typ.NumField(); i++ { + gf, wf := got.Field(i), want.Field(i) + logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf) + } + case reflect.Map: + t.Fatal("not implemented: logDiff for map") + default: + if got.Interface() != want.Interface() { + t.Logf("%s: got %v, want %v", prefix, got, want) + } + } +} + +func BenchmarkReadMemStats(b *testing.B) { + var ms runtime.MemStats + const heapSize = 100 << 20 + x := make([]*[1024]byte, heapSize/1024) + for i := range x { + x[i] = new([1024]byte) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.ReadMemStats(&ms) + } + + runtime.KeepAlive(x) +} + +func applyGCLoad(b *testing.B) func() { + // We’ll apply load to the runtime with maxProcs-1 goroutines + // and use one more to actually benchmark. It doesn't make sense + // to try to run this test with only 1 P (that's what + // BenchmarkReadMemStats is for). + maxProcs := runtime.GOMAXPROCS(-1) + if maxProcs == 1 { + b.Skip("This benchmark can only be run with GOMAXPROCS > 1") + } + + // Code to build a big tree with lots of pointers. + type node struct { + children [16]*node + } + var buildTree func(depth int) *node + buildTree = func(depth int) *node { + tree := new(node) + if depth != 0 { + for i := range tree.children { + tree.children[i] = buildTree(depth - 1) + } + } + return tree + } + + // Keep the GC busy by continuously generating large trees. + done := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < maxProcs-1; i++ { + wg.Add(1) + go func() { + defer wg.Done() + var hold *node + loop: + for { + hold = buildTree(5) + select { + case <-done: + break loop + default: + } + } + runtime.KeepAlive(hold) + }() + } + return func() { + close(done) + wg.Wait() + } +} + +func BenchmarkReadMemStatsLatency(b *testing.B) { + stop := applyGCLoad(b) + + // Spend this much time measuring latencies. + latencies := make([]time.Duration, 0, 1024) + + // Run for timeToBench hitting ReadMemStats continuously + // and measuring the latency. + b.ResetTimer() + var ms runtime.MemStats + for i := 0; i < b.N; i++ { + // Sleep for a bit, otherwise we're just going to keep + // stopping the world and no one will get to do anything. + time.Sleep(100 * time.Millisecond) + start := time.Now() + runtime.ReadMemStats(&ms) + latencies = append(latencies, time.Since(start)) + } + // Make sure to stop the timer before we wait! The load created above + // is very heavy-weight and not easy to stop, so we could end up + // confusing the benchmarking framework for small b.N. + b.StopTimer() + stop() + + // Disable the default */op metrics. + // ns/op doesn't mean anything because it's an average, but we + // have a sleep in our b.N loop above which skews this significantly. + b.ReportMetric(0, "ns/op") + b.ReportMetric(0, "B/op") + b.ReportMetric(0, "allocs/op") + + // Sort latencies then report percentiles. + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns") + b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns") + b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") +} + +func TestUserForcedGC(t *testing.T) { + // Test that runtime.GC() triggers a GC even if GOGC=off. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + var ms1, ms2 runtime.MemStats + runtime.ReadMemStats(&ms1) + runtime.GC() + runtime.ReadMemStats(&ms2) + if ms1.NumGC == ms2.NumGC { + t.Fatalf("runtime.GC() did not trigger GC") + } + if ms1.NumForcedGC == ms2.NumForcedGC { + t.Fatalf("runtime.GC() was not accounted in NumForcedGC") + } +} + +func writeBarrierBenchmark(b *testing.B, f func()) { + runtime.GC() + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + //b.Logf("heap size: %d MB", ms.HeapAlloc>>20) + + // Keep GC running continuously during the benchmark, which in + // turn keeps the write barrier on continuously. + var stop uint32 + done := make(chan bool) + go func() { + for atomic.LoadUint32(&stop) == 0 { + runtime.GC() + } + close(done) + }() + defer func() { + atomic.StoreUint32(&stop, 1) + <-done + }() + + b.ResetTimer() + f() + b.StopTimer() +} + +func BenchmarkWriteBarrier(b *testing.B) { + if runtime.GOMAXPROCS(-1) < 2 { + // We don't want GC to take our time. + b.Skip("need GOMAXPROCS >= 2") + } + + // Construct a large tree both so the GC runs for a while and + // so we have a data structure to manipulate the pointers of. + type node struct { + l, r *node + } + var wbRoots []*node + var mkTree func(level int) *node + mkTree = func(level int) *node { + if level == 0 { + return nil + } + n := &node{mkTree(level - 1), mkTree(level - 1)} + if level == 10 { + // Seed GC with enough early pointers so it + // doesn't start termination barriers when it + // only has the top of the tree. + wbRoots = append(wbRoots, n) + } + return n + } + const depth = 22 // 64 MB + root := mkTree(22) + + writeBarrierBenchmark(b, func() { + var stack [depth]*node + tos := -1 + + // There are two write barriers per iteration, so i+=2. + for i := 0; i < b.N; i += 2 { + if tos == -1 { + stack[0] = root + tos = 0 + } + + // Perform one step of reversing the tree. + n := stack[tos] + if n.l == nil { + tos-- + } else { + n.l, n.r = n.r, n.l + stack[tos] = n.l + stack[tos+1] = n.r + tos++ + } + + if i%(1<<12) == 0 { + // Avoid non-preemptible loops (see issue #10958). + runtime.Gosched() + } + } + }) + + runtime.KeepAlive(wbRoots) +} + +func BenchmarkBulkWriteBarrier(b *testing.B) { + if runtime.GOMAXPROCS(-1) < 2 { + // We don't want GC to take our time. + b.Skip("need GOMAXPROCS >= 2") + } + + // Construct a large set of objects we can copy around. + const heapSize = 64 << 20 + type obj [16]*byte + ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{})) + for i := range ptrs { + ptrs[i] = new(obj) + } + + writeBarrierBenchmark(b, func() { + const blockSize = 1024 + var pos int + for i := 0; i < b.N; i += blockSize { + // Rotate block. + block := ptrs[pos : pos+blockSize] + first := block[0] + copy(block, block[1:]) + block[blockSize-1] = first + + pos += blockSize + if pos+blockSize > len(ptrs) { + pos = 0 + } + + runtime.Gosched() + } + }) + + runtime.KeepAlive(ptrs) +} + +func BenchmarkScanStackNoLocals(b *testing.B) { + var ready sync.WaitGroup + teardown := make(chan bool) + for j := 0; j < 10; j++ { + ready.Add(1) + go func() { + x := 100000 + countpwg(&x, &ready, teardown) + }() + } + ready.Wait() + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StartTimer() + runtime.GC() + runtime.GC() + b.StopTimer() + } + close(teardown) +} + +func BenchmarkMSpanCountAlloc(b *testing.B) { + // Allocate one dummy mspan for the whole benchmark. + s := runtime.AllocMSpan() + defer runtime.FreeMSpan(s) + + // n is the number of bytes to benchmark against. + // n must always be a multiple of 8, since gcBits is + // always rounded up 8 bytes. + for _, n := range []int{8, 16, 32, 64, 128} { + b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) { + // Initialize a new byte slice with pseduo-random data. + bits := make([]byte, n) + rand.Read(bits) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.MSpanCountAlloc(s, bits) + } + }) + } +} + +func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) { + if *n == 0 { + ready.Done() + <-teardown + return + } + *n-- + countpwg(n, ready, teardown) +} + +func TestMemoryLimit(t *testing.T) { + if testing.Short() { + t.Skip("stress test that takes time to run") + } + if runtime.NumCPU() < 4 { + t.Skip("want at least 4 CPUs for this test") + } + got := runTestProg(t, "testprog", "GCMemoryLimit") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %q", want, got) + } +} + +func TestMemoryLimitNoGCPercent(t *testing.T) { + if testing.Short() { + t.Skip("stress test that takes time to run") + } + if runtime.NumCPU() < 4 { + t.Skip("want at least 4 CPUs for this test") + } + got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %q", want, got) + } +} + +func TestMyGenericFunc(t *testing.T) { + runtime.MyGenericFunc[int]() +} diff --git a/platform/dbops/binaries/go/go/src/runtime/gcinfo_test.go b/platform/dbops/binaries/go/go/src/runtime/gcinfo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5f72caf0ecebfbed9c98ca02e4241ca2ac119262 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/gcinfo_test.go @@ -0,0 +1,214 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "runtime" + "testing" +) + +const ( + typeScalar = 0 + typePointer = 1 +) + +// TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info. +func TestGCInfo(t *testing.T) { + verifyGCInfo(t, "bss Ptr", &bssPtr, infoPtr) + verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, infoScalarPtr) + verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, infoPtrScalar) + verifyGCInfo(t, "bss BigStruct", &bssBigStruct, infoBigStruct()) + verifyGCInfo(t, "bss string", &bssString, infoString) + verifyGCInfo(t, "bss slice", &bssSlice, infoSlice) + verifyGCInfo(t, "bss eface", &bssEface, infoEface) + verifyGCInfo(t, "bss iface", &bssIface, infoIface) + + verifyGCInfo(t, "data Ptr", &dataPtr, infoPtr) + verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, infoScalarPtr) + verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, infoPtrScalar) + verifyGCInfo(t, "data BigStruct", &dataBigStruct, infoBigStruct()) + verifyGCInfo(t, "data string", &dataString, infoString) + verifyGCInfo(t, "data slice", &dataSlice, infoSlice) + verifyGCInfo(t, "data eface", &dataEface, infoEface) + verifyGCInfo(t, "data iface", &dataIface, infoIface) + + { + var x Ptr + verifyGCInfo(t, "stack Ptr", &x, infoPtr) + runtime.KeepAlive(x) + } + { + var x ScalarPtr + verifyGCInfo(t, "stack ScalarPtr", &x, infoScalarPtr) + runtime.KeepAlive(x) + } + { + var x PtrScalar + verifyGCInfo(t, "stack PtrScalar", &x, infoPtrScalar) + runtime.KeepAlive(x) + } + { + var x BigStruct + verifyGCInfo(t, "stack BigStruct", &x, infoBigStruct()) + runtime.KeepAlive(x) + } + { + var x string + verifyGCInfo(t, "stack string", &x, infoString) + runtime.KeepAlive(x) + } + { + var x []string + verifyGCInfo(t, "stack slice", &x, infoSlice) + runtime.KeepAlive(x) + } + { + var x any + verifyGCInfo(t, "stack eface", &x, infoEface) + runtime.KeepAlive(x) + } + { + var x Iface + verifyGCInfo(t, "stack iface", &x, infoIface) + runtime.KeepAlive(x) + } + + for i := 0; i < 10; i++ { + verifyGCInfo(t, "heap Ptr", runtime.Escape(new(Ptr)), trimDead(infoPtr)) + verifyGCInfo(t, "heap PtrSlice", runtime.Escape(&make([]*byte, 10)[0]), trimDead(infoPtr10)) + verifyGCInfo(t, "heap ScalarPtr", runtime.Escape(new(ScalarPtr)), trimDead(infoScalarPtr)) + verifyGCInfo(t, "heap ScalarPtrSlice", runtime.Escape(&make([]ScalarPtr, 4)[0]), trimDead(infoScalarPtr4)) + verifyGCInfo(t, "heap PtrScalar", runtime.Escape(new(PtrScalar)), trimDead(infoPtrScalar)) + verifyGCInfo(t, "heap BigStruct", runtime.Escape(new(BigStruct)), trimDead(infoBigStruct())) + verifyGCInfo(t, "heap string", runtime.Escape(new(string)), trimDead(infoString)) + verifyGCInfo(t, "heap eface", runtime.Escape(new(any)), trimDead(infoEface)) + verifyGCInfo(t, "heap iface", runtime.Escape(new(Iface)), trimDead(infoIface)) + } +} + +func verifyGCInfo(t *testing.T, name string, p any, mask0 []byte) { + mask := runtime.GCMask(p) + if bytes.HasPrefix(mask, mask0) { + // Just the prefix matching is OK. + // + // The Go runtime's pointer/scalar iterator generates pointers beyond + // the size of the type, up to the size of the size class. This space + // is safe for the GC to scan since it's zero, and GCBits checks to + // make sure that's true. But we need to handle the fact that the bitmap + // may be larger than we expect. + return + } + t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask) +} + +func trimDead(mask []byte) []byte { + for len(mask) > 0 && mask[len(mask)-1] == typeScalar { + mask = mask[:len(mask)-1] + } + return mask +} + +var infoPtr = []byte{typePointer} + +type Ptr struct { + *byte +} + +var infoPtr10 = []byte{typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer} + +type ScalarPtr struct { + q int + w *int + e int + r *int + t int + y *int +} + +var infoScalarPtr = []byte{typeScalar, typePointer, typeScalar, typePointer, typeScalar, typePointer} + +var infoScalarPtr4 = append(append(append(append([]byte(nil), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...) + +type PtrScalar struct { + q *int + w int + e *int + r int + t *int + y int +} + +var infoPtrScalar = []byte{typePointer, typeScalar, typePointer, typeScalar, typePointer, typeScalar} + +type BigStruct struct { + q *int + w byte + e [17]byte + r []byte + t int + y uint16 + u uint64 + i string +} + +func infoBigStruct() []byte { + switch runtime.GOARCH { + case "386", "arm", "mips", "mipsle": + return []byte{ + typePointer, // q *int + typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte + typePointer, typeScalar, typeScalar, // r []byte + typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 + typePointer, typeScalar, // i string + } + case "arm64", "amd64", "loong64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm": + return []byte{ + typePointer, // q *int + typeScalar, typeScalar, typeScalar, // w byte; e [17]byte + typePointer, typeScalar, typeScalar, // r []byte + typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 + typePointer, typeScalar, // i string + } + default: + panic("unknown arch") + } +} + +type Iface interface { + f() +} + +type IfaceImpl int + +func (IfaceImpl) f() { +} + +var ( + // BSS + bssPtr Ptr + bssScalarPtr ScalarPtr + bssPtrScalar PtrScalar + bssBigStruct BigStruct + bssString string + bssSlice []string + bssEface any + bssIface Iface + + // DATA + dataPtr = Ptr{new(byte)} + dataScalarPtr = ScalarPtr{q: 1} + dataPtrScalar = PtrScalar{w: 1} + dataBigStruct = BigStruct{w: 1} + dataString = "foo" + dataSlice = []string{"foo"} + dataEface any = 42 + dataIface Iface = IfaceImpl(42) + + infoString = []byte{typePointer, typeScalar} + infoSlice = []byte{typePointer, typeScalar, typeScalar} + infoEface = []byte{typeScalar, typePointer} + infoIface = []byte{typeScalar, typePointer} +) diff --git a/platform/dbops/binaries/go/go/src/runtime/go_tls.h b/platform/dbops/binaries/go/go/src/runtime/go_tls.h new file mode 100644 index 0000000000000000000000000000000000000000..a47e798d9d5492d5d6c7aab198734315479b6169 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/go_tls.h @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#ifdef GOARCH_arm +#define LR R14 +#endif + +#ifdef GOARCH_amd64 +#define get_tls(r) MOVQ TLS, r +#define g(r) 0(r)(TLS*1) +#endif + +#ifdef GOARCH_386 +#define get_tls(r) MOVL TLS, r +#define g(r) 0(r)(TLS*1) +#endif diff --git a/platform/dbops/binaries/go/go/src/runtime/hash32.go b/platform/dbops/binaries/go/go/src/runtime/hash32.go new file mode 100644 index 0000000000000000000000000000000000000000..0616c7dd050751eaab48674c8f49a667988b4a34 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/hash32.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hashing algorithm inspired by +// wyhash: https://github.com/wangyi-fudan/wyhash/blob/ceb019b530e2c1c14d70b79bfa2bc49de7d95bc1/Modern%20Non-Cryptographic%20Hash%20Function%20and%20Pseudorandom%20Number%20Generator.pdf + +//go:build 386 || arm || mips || mipsle + +package runtime + +import "unsafe" + +func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr { + a, b := mix32(uint32(seed), uint32(4^hashkey[0])) + t := readUnaligned32(p) + a ^= t + b ^= t + a, b = mix32(a, b) + a, b = mix32(a, b) + return uintptr(a ^ b) +} + +func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr { + a, b := mix32(uint32(seed), uint32(8^hashkey[0])) + a ^= readUnaligned32(p) + b ^= readUnaligned32(add(p, 4)) + a, b = mix32(a, b) + a, b = mix32(a, b) + return uintptr(a ^ b) +} + +func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr { + + a, b := mix32(uint32(seed), uint32(s^hashkey[0])) + if s == 0 { + return uintptr(a ^ b) + } + for ; s > 8; s -= 8 { + a ^= readUnaligned32(p) + b ^= readUnaligned32(add(p, 4)) + a, b = mix32(a, b) + p = add(p, 8) + } + if s >= 4 { + a ^= readUnaligned32(p) + b ^= readUnaligned32(add(p, s-4)) + } else { + t := uint32(*(*byte)(p)) + t |= uint32(*(*byte)(add(p, s>>1))) << 8 + t |= uint32(*(*byte)(add(p, s-1))) << 16 + b ^= t + } + a, b = mix32(a, b) + a, b = mix32(a, b) + return uintptr(a ^ b) +} + +func mix32(a, b uint32) (uint32, uint32) { + c := uint64(a^uint32(hashkey[1])) * uint64(b^uint32(hashkey[2])) + return uint32(c), uint32(c >> 32) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/hash64.go b/platform/dbops/binaries/go/go/src/runtime/hash64.go new file mode 100644 index 0000000000000000000000000000000000000000..2864a4b963ddde59cbd43f7328c2d26974e8b5b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/hash64.go @@ -0,0 +1,92 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hashing algorithm inspired by +// wyhash: https://github.com/wangyi-fudan/wyhash + +//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm + +package runtime + +import ( + "runtime/internal/math" + "unsafe" +) + +const ( + m1 = 0xa0761d6478bd642f + m2 = 0xe7037ed1a0b428db + m3 = 0x8ebc6af09c88c6e3 + m4 = 0x589965cc75374cc3 + m5 = 0x1d8e4e27c47d124f +) + +func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr { + var a, b uintptr + seed ^= hashkey[0] ^ m1 + switch { + case s == 0: + return seed + case s < 4: + a = uintptr(*(*byte)(p)) + a |= uintptr(*(*byte)(add(p, s>>1))) << 8 + a |= uintptr(*(*byte)(add(p, s-1))) << 16 + case s == 4: + a = r4(p) + b = a + case s < 8: + a = r4(p) + b = r4(add(p, s-4)) + case s == 8: + a = r8(p) + b = a + case s <= 16: + a = r8(p) + b = r8(add(p, s-8)) + default: + l := s + if l > 48 { + seed1 := seed + seed2 := seed + for ; l > 48; l -= 48 { + seed = mix(r8(p)^m2, r8(add(p, 8))^seed) + seed1 = mix(r8(add(p, 16))^m3, r8(add(p, 24))^seed1) + seed2 = mix(r8(add(p, 32))^m4, r8(add(p, 40))^seed2) + p = add(p, 48) + } + seed ^= seed1 ^ seed2 + } + for ; l > 16; l -= 16 { + seed = mix(r8(p)^m2, r8(add(p, 8))^seed) + p = add(p, 16) + } + a = r8(add(p, l-16)) + b = r8(add(p, l-8)) + } + + return mix(m5^s, mix(a^m2, b^seed)) +} + +func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr { + a := r4(p) + return mix(m5^4, mix(a^m2, a^seed^hashkey[0]^m1)) +} + +func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr { + a := r8(p) + return mix(m5^8, mix(a^m2, a^seed^hashkey[0]^m1)) +} + +func mix(a, b uintptr) uintptr { + hi, lo := math.Mul64(uint64(a), uint64(b)) + return uintptr(hi ^ lo) +} + +func r4(p unsafe.Pointer) uintptr { + return uintptr(readUnaligned32(p)) +} + +func r8(p unsafe.Pointer) uintptr { + return uintptr(readUnaligned64(p)) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/hash_test.go b/platform/dbops/binaries/go/go/src/runtime/hash_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c1d4bfa0806f434a38a28f1a021e940c04714b60 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/hash_test.go @@ -0,0 +1,786 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/race" + "math" + "math/rand" + . "runtime" + "strings" + "testing" + "unsafe" +) + +func TestMemHash32Equality(t *testing.T) { + if *UseAeshash { + t.Skip("skipping since AES hash implementation is used") + } + var b [4]byte + r := rand.New(rand.NewSource(1234)) + seed := uintptr(r.Uint64()) + for i := 0; i < 100; i++ { + randBytes(r, b[:]) + got := MemHash32(unsafe.Pointer(&b), seed) + want := MemHash(unsafe.Pointer(&b), seed, 4) + if got != want { + t.Errorf("MemHash32(%x, %v) = %v; want %v", b, seed, got, want) + } + } +} + +func TestMemHash64Equality(t *testing.T) { + if *UseAeshash { + t.Skip("skipping since AES hash implementation is used") + } + var b [8]byte + r := rand.New(rand.NewSource(1234)) + seed := uintptr(r.Uint64()) + for i := 0; i < 100; i++ { + randBytes(r, b[:]) + got := MemHash64(unsafe.Pointer(&b), seed) + want := MemHash(unsafe.Pointer(&b), seed, 8) + if got != want { + t.Errorf("MemHash64(%x, %v) = %v; want %v", b, seed, got, want) + } + } +} + +// Smhasher is a torture test for hash functions. +// https://code.google.com/p/smhasher/ +// This code is a port of some of the Smhasher tests to Go. +// +// The current AES hash function passes Smhasher. Our fallback +// hash functions don't, so we only enable the difficult tests when +// we know the AES implementation is available. + +// Sanity checks. +// hash should not depend on values outside key. +// hash should not depend on alignment. +func TestSmhasherSanity(t *testing.T) { + r := rand.New(rand.NewSource(1234)) + const REP = 10 + const KEYMAX = 128 + const PAD = 16 + const OFFMAX = 16 + for k := 0; k < REP; k++ { + for n := 0; n < KEYMAX; n++ { + for i := 0; i < OFFMAX; i++ { + var b [KEYMAX + OFFMAX + 2*PAD]byte + var c [KEYMAX + OFFMAX + 2*PAD]byte + randBytes(r, b[:]) + randBytes(r, c[:]) + copy(c[PAD+i:PAD+i+n], b[PAD:PAD+n]) + if BytesHash(b[PAD:PAD+n], 0) != BytesHash(c[PAD+i:PAD+i+n], 0) { + t.Errorf("hash depends on bytes outside key") + } + } + } + } +} + +type HashSet struct { + m map[uintptr]struct{} // set of hashes added + n int // number of hashes added +} + +func newHashSet() *HashSet { + return &HashSet{make(map[uintptr]struct{}), 0} +} +func (s *HashSet) add(h uintptr) { + s.m[h] = struct{}{} + s.n++ +} +func (s *HashSet) addS(x string) { + s.add(StringHash(x, 0)) +} +func (s *HashSet) addB(x []byte) { + s.add(BytesHash(x, 0)) +} +func (s *HashSet) addS_seed(x string, seed uintptr) { + s.add(StringHash(x, seed)) +} +func (s *HashSet) check(t *testing.T) { + const SLOP = 50.0 + collisions := s.n - len(s.m) + pairs := int64(s.n) * int64(s.n-1) / 2 + expected := float64(pairs) / math.Pow(2.0, float64(hashSize)) + stddev := math.Sqrt(expected) + if float64(collisions) > expected+SLOP*(3*stddev+1) { + t.Errorf("unexpected number of collisions: got=%d mean=%f stddev=%f threshold=%f", collisions, expected, stddev, expected+SLOP*(3*stddev+1)) + } +} + +// a string plus adding zeros must make distinct hashes +func TestSmhasherAppendedZeros(t *testing.T) { + s := "hello" + strings.Repeat("\x00", 256) + h := newHashSet() + for i := 0; i <= len(s); i++ { + h.addS(s[:i]) + } + h.check(t) +} + +// All 0-3 byte strings have distinct hashes. +func TestSmhasherSmallKeys(t *testing.T) { + if race.Enabled { + t.Skip("Too long for race mode") + } + h := newHashSet() + var b [3]byte + for i := 0; i < 256; i++ { + b[0] = byte(i) + h.addB(b[:1]) + for j := 0; j < 256; j++ { + b[1] = byte(j) + h.addB(b[:2]) + if !testing.Short() { + for k := 0; k < 256; k++ { + b[2] = byte(k) + h.addB(b[:3]) + } + } + } + } + h.check(t) +} + +// Different length strings of all zeros have distinct hashes. +func TestSmhasherZeros(t *testing.T) { + N := 256 * 1024 + if testing.Short() { + N = 1024 + } + h := newHashSet() + b := make([]byte, N) + for i := 0; i <= N; i++ { + h.addB(b[:i]) + } + h.check(t) +} + +// Strings with up to two nonzero bytes all have distinct hashes. +func TestSmhasherTwoNonzero(t *testing.T) { + if GOARCH == "wasm" { + t.Skip("Too slow on wasm") + } + if testing.Short() { + t.Skip("Skipping in short mode") + } + if race.Enabled { + t.Skip("Too long for race mode") + } + h := newHashSet() + for n := 2; n <= 16; n++ { + twoNonZero(h, n) + } + h.check(t) +} +func twoNonZero(h *HashSet, n int) { + b := make([]byte, n) + + // all zero + h.addB(b) + + // one non-zero byte + for i := 0; i < n; i++ { + for x := 1; x < 256; x++ { + b[i] = byte(x) + h.addB(b) + b[i] = 0 + } + } + + // two non-zero bytes + for i := 0; i < n; i++ { + for x := 1; x < 256; x++ { + b[i] = byte(x) + for j := i + 1; j < n; j++ { + for y := 1; y < 256; y++ { + b[j] = byte(y) + h.addB(b) + b[j] = 0 + } + } + b[i] = 0 + } + } +} + +// Test strings with repeats, like "abcdabcdabcdabcd..." +func TestSmhasherCyclic(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + if race.Enabled { + t.Skip("Too long for race mode") + } + r := rand.New(rand.NewSource(1234)) + const REPEAT = 8 + const N = 1000000 + for n := 4; n <= 12; n++ { + h := newHashSet() + b := make([]byte, REPEAT*n) + for i := 0; i < N; i++ { + b[0] = byte(i * 79 % 97) + b[1] = byte(i * 43 % 137) + b[2] = byte(i * 151 % 197) + b[3] = byte(i * 199 % 251) + randBytes(r, b[4:n]) + for j := n; j < n*REPEAT; j++ { + b[j] = b[j-n] + } + h.addB(b) + } + h.check(t) + } +} + +// Test strings with only a few bits set +func TestSmhasherSparse(t *testing.T) { + if GOARCH == "wasm" { + t.Skip("Too slow on wasm") + } + if testing.Short() { + t.Skip("Skipping in short mode") + } + sparse(t, 32, 6) + sparse(t, 40, 6) + sparse(t, 48, 5) + sparse(t, 56, 5) + sparse(t, 64, 5) + sparse(t, 96, 4) + sparse(t, 256, 3) + sparse(t, 2048, 2) +} +func sparse(t *testing.T, n int, k int) { + b := make([]byte, n/8) + h := newHashSet() + setbits(h, b, 0, k) + h.check(t) +} + +// set up to k bits at index i and greater +func setbits(h *HashSet, b []byte, i int, k int) { + h.addB(b) + if k == 0 { + return + } + for j := i; j < len(b)*8; j++ { + b[j/8] |= byte(1 << uint(j&7)) + setbits(h, b, j+1, k-1) + b[j/8] &= byte(^(1 << uint(j&7))) + } +} + +// Test all possible combinations of n blocks from the set s. +// "permutation" is a bad name here, but it is what Smhasher uses. +func TestSmhasherPermutation(t *testing.T) { + if GOARCH == "wasm" { + t.Skip("Too slow on wasm") + } + if testing.Short() { + t.Skip("Skipping in short mode") + } + if race.Enabled { + t.Skip("Too long for race mode") + } + permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8) + permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8) + permutation(t, []uint32{0, 1}, 20) + permutation(t, []uint32{0, 1 << 31}, 20) + permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 6) +} +func permutation(t *testing.T, s []uint32, n int) { + b := make([]byte, n*4) + h := newHashSet() + genPerm(h, b, s, 0) + h.check(t) +} +func genPerm(h *HashSet, b []byte, s []uint32, n int) { + h.addB(b[:n]) + if n == len(b) { + return + } + for _, v := range s { + b[n] = byte(v) + b[n+1] = byte(v >> 8) + b[n+2] = byte(v >> 16) + b[n+3] = byte(v >> 24) + genPerm(h, b, s, n+4) + } +} + +type Key interface { + clear() // set bits all to 0 + random(r *rand.Rand) // set key to something random + bits() int // how many bits key has + flipBit(i int) // flip bit i of the key + hash() uintptr // hash the key + name() string // for error reporting +} + +type BytesKey struct { + b []byte +} + +func (k *BytesKey) clear() { + for i := range k.b { + k.b[i] = 0 + } +} +func (k *BytesKey) random(r *rand.Rand) { + randBytes(r, k.b) +} +func (k *BytesKey) bits() int { + return len(k.b) * 8 +} +func (k *BytesKey) flipBit(i int) { + k.b[i>>3] ^= byte(1 << uint(i&7)) +} +func (k *BytesKey) hash() uintptr { + return BytesHash(k.b, 0) +} +func (k *BytesKey) name() string { + return fmt.Sprintf("bytes%d", len(k.b)) +} + +type Int32Key struct { + i uint32 +} + +func (k *Int32Key) clear() { + k.i = 0 +} +func (k *Int32Key) random(r *rand.Rand) { + k.i = r.Uint32() +} +func (k *Int32Key) bits() int { + return 32 +} +func (k *Int32Key) flipBit(i int) { + k.i ^= 1 << uint(i) +} +func (k *Int32Key) hash() uintptr { + return Int32Hash(k.i, 0) +} +func (k *Int32Key) name() string { + return "int32" +} + +type Int64Key struct { + i uint64 +} + +func (k *Int64Key) clear() { + k.i = 0 +} +func (k *Int64Key) random(r *rand.Rand) { + k.i = uint64(r.Uint32()) + uint64(r.Uint32())<<32 +} +func (k *Int64Key) bits() int { + return 64 +} +func (k *Int64Key) flipBit(i int) { + k.i ^= 1 << uint(i) +} +func (k *Int64Key) hash() uintptr { + return Int64Hash(k.i, 0) +} +func (k *Int64Key) name() string { + return "int64" +} + +type EfaceKey struct { + i any +} + +func (k *EfaceKey) clear() { + k.i = nil +} +func (k *EfaceKey) random(r *rand.Rand) { + k.i = uint64(r.Int63()) +} +func (k *EfaceKey) bits() int { + // use 64 bits. This tests inlined interfaces + // on 64-bit targets and indirect interfaces on + // 32-bit targets. + return 64 +} +func (k *EfaceKey) flipBit(i int) { + k.i = k.i.(uint64) ^ uint64(1)<>= 1 + } + } + } + + // Each entry in the grid should be about REP/2. + // More precisely, we did N = k.bits() * hashSize experiments where + // each is the sum of REP coin flips. We want to find bounds on the + // sum of coin flips such that a truly random experiment would have + // all sums inside those bounds with 99% probability. + N := n * hashSize + var c float64 + // find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999 + for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 { + } + c *= 11.0 // allowed slack: 40% to 60% - we don't need to be perfectly random + mean := .5 * REP + stddev := .5 * math.Sqrt(REP) + low := int(mean - c*stddev) + high := int(mean + c*stddev) + for i := 0; i < n; i++ { + for j := 0; j < hashSize; j++ { + x := grid[i][j] + if x < low || x > high { + t.Errorf("bad bias for %s bit %d -> bit %d: %d/%d\n", k.name(), i, j, x, REP) + } + } + } +} + +// All bit rotations of a set of distinct keys +func TestSmhasherWindowed(t *testing.T) { + if race.Enabled { + t.Skip("Too long for race mode") + } + t.Logf("32 bit keys") + windowed(t, &Int32Key{}) + t.Logf("64 bit keys") + windowed(t, &Int64Key{}) + t.Logf("string keys") + windowed(t, &BytesKey{make([]byte, 128)}) +} +func windowed(t *testing.T, k Key) { + if GOARCH == "wasm" { + t.Skip("Too slow on wasm") + } + if PtrSize == 4 { + // This test tends to be flaky on 32-bit systems. + // There's not enough bits in the hash output, so we + // expect a nontrivial number of collisions, and it is + // often quite a bit higher than expected. See issue 43130. + t.Skip("Flaky on 32-bit systems") + } + if testing.Short() { + t.Skip("Skipping in short mode") + } + const BITS = 16 + + for r := 0; r < k.bits(); r++ { + h := newHashSet() + for i := 0; i < 1<>uint(j)&1 != 0 { + k.flipBit((j + r) % k.bits()) + } + } + h.add(k.hash()) + } + h.check(t) + } +} + +// All keys of the form prefix + [A-Za-z0-9]*N + suffix. +func TestSmhasherText(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + text(t, "Foo", "Bar") + text(t, "FooBar", "") + text(t, "", "FooBar") +} +func text(t *testing.T, prefix, suffix string) { + const N = 4 + const S = "ABCDEFGHIJKLMNOPQRSTabcdefghijklmnopqrst0123456789" + const L = len(S) + b := make([]byte, len(prefix)+N+len(suffix)) + copy(b, prefix) + copy(b[len(prefix)+N:], suffix) + h := newHashSet() + c := b[len(prefix):] + for i := 0; i < L; i++ { + c[0] = S[i] + for j := 0; j < L; j++ { + c[1] = S[j] + for k := 0; k < L; k++ { + c[2] = S[k] + for x := 0; x < L; x++ { + c[3] = S[x] + h.addB(b) + } + } + } + } + h.check(t) +} + +// Make sure different seed values generate different hashes. +func TestSmhasherSeed(t *testing.T) { + h := newHashSet() + const N = 100000 + s := "hello" + for i := 0; i < N; i++ { + h.addS_seed(s, uintptr(i)) + } + h.check(t) +} + +// size of the hash output (32 or 64 bits) +const hashSize = 32 + int(^uintptr(0)>>63<<5) + +func randBytes(r *rand.Rand, b []byte) { + for i := range b { + b[i] = byte(r.Uint32()) + } +} + +func benchmarkHash(b *testing.B, n int) { + s := strings.Repeat("A", n) + + for i := 0; i < b.N; i++ { + StringHash(s, 0) + } + b.SetBytes(int64(n)) +} + +func BenchmarkHash5(b *testing.B) { benchmarkHash(b, 5) } +func BenchmarkHash16(b *testing.B) { benchmarkHash(b, 16) } +func BenchmarkHash64(b *testing.B) { benchmarkHash(b, 64) } +func BenchmarkHash1024(b *testing.B) { benchmarkHash(b, 1024) } +func BenchmarkHash65536(b *testing.B) { benchmarkHash(b, 65536) } + +func TestArrayHash(t *testing.T) { + // Make sure that "" in arrays hash correctly. The hash + // should at least scramble the input seed so that, e.g., + // {"","foo"} and {"foo",""} have different hashes. + + // If the hash is bad, then all (8 choose 4) = 70 keys + // have the same hash. If so, we allocate 70/8 = 8 + // overflow buckets. If the hash is good we don't + // normally allocate any overflow buckets, and the + // probability of even one or two overflows goes down rapidly. + // (There is always 1 allocation of the bucket array. The map + // header is allocated on the stack.) + f := func() { + // Make the key type at most 128 bytes. Otherwise, + // we get an allocation per key. + type key [8]string + m := make(map[key]bool, 70) + + // fill m with keys that have 4 "foo"s and 4 ""s. + for i := 0; i < 256; i++ { + var k key + cnt := 0 + for j := uint(0); j < 8; j++ { + if i>>j&1 != 0 { + k[j] = "foo" + cnt++ + } + } + if cnt == 4 { + m[k] = true + } + } + if len(m) != 70 { + t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m)) + } + } + if n := testing.AllocsPerRun(10, f); n > 6 { + t.Errorf("too many allocs %f - hash not balanced", n) + } +} +func TestStructHash(t *testing.T) { + // See the comment in TestArrayHash. + f := func() { + type key struct { + a, b, c, d, e, f, g, h string + } + m := make(map[key]bool, 70) + + // fill m with keys that have 4 "foo"s and 4 ""s. + for i := 0; i < 256; i++ { + var k key + cnt := 0 + if i&1 != 0 { + k.a = "foo" + cnt++ + } + if i&2 != 0 { + k.b = "foo" + cnt++ + } + if i&4 != 0 { + k.c = "foo" + cnt++ + } + if i&8 != 0 { + k.d = "foo" + cnt++ + } + if i&16 != 0 { + k.e = "foo" + cnt++ + } + if i&32 != 0 { + k.f = "foo" + cnt++ + } + if i&64 != 0 { + k.g = "foo" + cnt++ + } + if i&128 != 0 { + k.h = "foo" + cnt++ + } + if cnt == 4 { + m[k] = true + } + } + if len(m) != 70 { + t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m)) + } + } + if n := testing.AllocsPerRun(10, f); n > 6 { + t.Errorf("too many allocs %f - hash not balanced", n) + } +} + +var sink uint64 + +func BenchmarkAlignedLoad(b *testing.B) { + var buf [16]byte + p := unsafe.Pointer(&buf[0]) + var s uint64 + for i := 0; i < b.N; i++ { + s += ReadUnaligned64(p) + } + sink = s +} + +func BenchmarkUnalignedLoad(b *testing.B) { + var buf [16]byte + p := unsafe.Pointer(&buf[1]) + var s uint64 + for i := 0; i < b.N; i++ { + s += ReadUnaligned64(p) + } + sink = s +} + +func TestCollisions(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + for i := 0; i < 16; i++ { + for j := 0; j < 16; j++ { + if j == i { + continue + } + var a [16]byte + m := make(map[uint16]struct{}, 1<<16) + for n := 0; n < 1<<16; n++ { + a[i] = byte(n) + a[j] = byte(n >> 8) + m[uint16(BytesHash(a[:], 0))] = struct{}{} + } + // N balls in N bins, for N=65536 + avg := 41427 + stdDev := 123 + if len(m) < avg-40*stdDev || len(m) > avg+40*stdDev { + t.Errorf("bad number of collisions i=%d j=%d outputs=%d out of 65536\n", i, j, len(m)) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/heap_test.go b/platform/dbops/binaries/go/go/src/runtime/heap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4b73ab54fc2471c951a1900d68f767dafe42078e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/heap_test.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "testing" + _ "unsafe" +) + +//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove +func heapObjectsCanMove() bool + +func TestHeapObjectsCanMove(t *testing.T) { + if heapObjectsCanMove() { + // If this happens (or this test stops building), + // it will break go4.org/unsafe/assume-no-moving-gc. + t.Fatalf("heap objects can move!") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/heapdump.go b/platform/dbops/binaries/go/go/src/runtime/heapdump.go new file mode 100644 index 0000000000000000000000000000000000000000..276c5bfaf615532ab16070f33099d634e5b36ed1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/heapdump.go @@ -0,0 +1,765 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Implementation of runtime/debug.WriteHeapDump. Writes all +// objects in the heap plus additional info (roots, threads, +// finalizers, etc.) to a file. + +// The format of the dumped file is described at +// https://golang.org/s/go15heapdump. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "internal/goexperiment" + "unsafe" +) + +//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump +func runtime_debug_WriteHeapDump(fd uintptr) { + stw := stopTheWorld(stwWriteHeapDump) + + // Keep m on this G's stack instead of the system stack. + // Both readmemstats_m and writeheapdump_m have pretty large + // peak stack depths and we risk blowing the system stack. + // This is safe because the world is stopped, so we don't + // need to worry about anyone shrinking and therefore moving + // our stack. + var m MemStats + systemstack(func() { + // Call readmemstats_m here instead of deeper in + // writeheapdump_m because we might blow the system stack + // otherwise. + readmemstats_m(&m) + writeheapdump_m(fd, &m) + }) + + startTheWorld(stw) +} + +const ( + fieldKindEol = 0 + fieldKindPtr = 1 + fieldKindIface = 2 + fieldKindEface = 3 + tagEOF = 0 + tagObject = 1 + tagOtherRoot = 2 + tagType = 3 + tagGoroutine = 4 + tagStackFrame = 5 + tagParams = 6 + tagFinalizer = 7 + tagItab = 8 + tagOSThread = 9 + tagMemStats = 10 + tagQueuedFinalizer = 11 + tagData = 12 + tagBSS = 13 + tagDefer = 14 + tagPanic = 15 + tagMemProf = 16 + tagAllocSample = 17 +) + +var dumpfd uintptr // fd to write the dump to. +var tmpbuf []byte + +// buffer of pending write data +const ( + bufSize = 4096 +) + +var buf [bufSize]byte +var nbuf uintptr + +func dwrite(data unsafe.Pointer, len uintptr) { + if len == 0 { + return + } + if nbuf+len <= bufSize { + copy(buf[nbuf:], (*[bufSize]byte)(data)[:len]) + nbuf += len + return + } + + write(dumpfd, unsafe.Pointer(&buf), int32(nbuf)) + if len >= bufSize { + write(dumpfd, data, int32(len)) + nbuf = 0 + } else { + copy(buf[:], (*[bufSize]byte)(data)[:len]) + nbuf = len + } +} + +func dwritebyte(b byte) { + dwrite(unsafe.Pointer(&b), 1) +} + +func flush() { + write(dumpfd, unsafe.Pointer(&buf), int32(nbuf)) + nbuf = 0 +} + +// Cache of types that have been serialized already. +// We use a type's hash field to pick a bucket. +// Inside a bucket, we keep a list of types that +// have been serialized so far, most recently used first. +// Note: when a bucket overflows we may end up +// serializing a type more than once. That's ok. +const ( + typeCacheBuckets = 256 + typeCacheAssoc = 4 +) + +type typeCacheBucket struct { + t [typeCacheAssoc]*_type +} + +var typecache [typeCacheBuckets]typeCacheBucket + +// dump a uint64 in a varint format parseable by encoding/binary. +func dumpint(v uint64) { + var buf [10]byte + var n int + for v >= 0x80 { + buf[n] = byte(v | 0x80) + n++ + v >>= 7 + } + buf[n] = byte(v) + n++ + dwrite(unsafe.Pointer(&buf), uintptr(n)) +} + +func dumpbool(b bool) { + if b { + dumpint(1) + } else { + dumpint(0) + } +} + +// dump varint uint64 length followed by memory contents. +func dumpmemrange(data unsafe.Pointer, len uintptr) { + dumpint(uint64(len)) + dwrite(data, len) +} + +func dumpslice(b []byte) { + dumpint(uint64(len(b))) + if len(b) > 0 { + dwrite(unsafe.Pointer(&b[0]), uintptr(len(b))) + } +} + +func dumpstr(s string) { + dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s))) +} + +// dump information for a type. +func dumptype(t *_type) { + if t == nil { + return + } + + // If we've definitely serialized the type before, + // no need to do it again. + b := &typecache[t.Hash&(typeCacheBuckets-1)] + if t == b.t[0] { + return + } + for i := 1; i < typeCacheAssoc; i++ { + if t == b.t[i] { + // Move-to-front + for j := i; j > 0; j-- { + b.t[j] = b.t[j-1] + } + b.t[0] = t + return + } + } + + // Might not have been dumped yet. Dump it and + // remember we did so. + for j := typeCacheAssoc - 1; j > 0; j-- { + b.t[j] = b.t[j-1] + } + b.t[0] = t + + // dump the type + dumpint(tagType) + dumpint(uint64(uintptr(unsafe.Pointer(t)))) + dumpint(uint64(t.Size_)) + rt := toRType(t) + if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" { + dumpstr(rt.string()) + } else { + pkgpath := rt.nameOff(x.PkgPath).Name() + name := rt.name() + dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name)))) + dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath))) + dwritebyte('.') + dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name))) + } + dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0) +} + +// dump an object. +func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) { + dumpint(tagObject) + dumpint(uint64(uintptr(obj))) + dumpmemrange(obj, size) + dumpfields(bv) +} + +func dumpotherroot(description string, to unsafe.Pointer) { + dumpint(tagOtherRoot) + dumpstr(description) + dumpint(uint64(uintptr(to))) +} + +func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) { + dumpint(tagFinalizer) + dumpint(uint64(uintptr(obj))) + dumpint(uint64(uintptr(unsafe.Pointer(fn)))) + dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) + dumpint(uint64(uintptr(unsafe.Pointer(fint)))) + dumpint(uint64(uintptr(unsafe.Pointer(ot)))) +} + +type childInfo struct { + // Information passed up from the callee frame about + // the layout of the outargs region. + argoff uintptr // where the arguments start in the frame + arglen uintptr // size of args region + args bitvector // if args.n >= 0, pointer map of args region + sp *uint8 // callee sp + depth uintptr // depth in call stack (0 == most recent) +} + +// dump kinds & offsets of interesting fields in bv. +func dumpbv(cbv *bitvector, offset uintptr) { + for i := uintptr(0); i < uintptr(cbv.n); i++ { + if cbv.ptrbit(i) == 1 { + dumpint(fieldKindPtr) + dumpint(uint64(offset + i*goarch.PtrSize)) + } + } +} + +func dumpframe(s *stkframe, child *childInfo) { + f := s.fn + + // Figure out what we can about our stack map + pc := s.pc + pcdata := int32(-1) // Use the entry map at function entry + if pc != f.entry() { + pc-- + pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc) + } + if pcdata == -1 { + // We do not have a valid pcdata value but there might be a + // stackmap for this function. It is likely that we are looking + // at the function prologue, assume so and hope for the best. + pcdata = 0 + } + stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps)) + + var bv bitvector + if stkmap != nil && stkmap.n > 0 { + bv = stackmapdata(stkmap, pcdata) + } else { + bv.n = -1 + } + + // Dump main body of stack frame. + dumpint(tagStackFrame) + dumpint(uint64(s.sp)) // lowest address in frame + dumpint(uint64(child.depth)) // # of frames deep on the stack + dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack + dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents + dumpint(uint64(f.entry())) + dumpint(uint64(s.pc)) + dumpint(uint64(s.continpc)) + name := funcname(f) + if name == "" { + name = "unknown function" + } + dumpstr(name) + + // Dump fields in the outargs section + if child.args.n >= 0 { + dumpbv(&child.args, child.argoff) + } else { + // conservative - everything might be a pointer + for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize { + dumpint(fieldKindPtr) + dumpint(uint64(off)) + } + } + + // Dump fields in the local vars section + if stkmap == nil { + // No locals information, dump everything. + for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize { + dumpint(fieldKindPtr) + dumpint(uint64(off)) + } + } else if stkmap.n < 0 { + // Locals size information, dump just the locals. + size := uintptr(-stkmap.n) + for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize { + dumpint(fieldKindPtr) + dumpint(uint64(off)) + } + } else if stkmap.n > 0 { + // Locals bitmap information, scan just the pointers in + // locals. + dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp) + } + dumpint(fieldKindEol) + + // Record arg info for parent. + child.argoff = s.argp - s.fp + child.arglen = s.argBytes() + child.sp = (*uint8)(unsafe.Pointer(s.sp)) + child.depth++ + stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps)) + if stkmap != nil { + child.args = stackmapdata(stkmap, pcdata) + } else { + child.args.n = -1 + } + return +} + +func dumpgoroutine(gp *g) { + var sp, pc, lr uintptr + if gp.syscallsp != 0 { + sp = gp.syscallsp + pc = gp.syscallpc + lr = 0 + } else { + sp = gp.sched.sp + pc = gp.sched.pc + lr = gp.sched.lr + } + + dumpint(tagGoroutine) + dumpint(uint64(uintptr(unsafe.Pointer(gp)))) + dumpint(uint64(sp)) + dumpint(gp.goid) + dumpint(uint64(gp.gopc)) + dumpint(uint64(readgstatus(gp))) + dumpbool(isSystemGoroutine(gp, false)) + dumpbool(false) // isbackground + dumpint(uint64(gp.waitsince)) + dumpstr(gp.waitreason.String()) + dumpint(uint64(uintptr(gp.sched.ctxt))) + dumpint(uint64(uintptr(unsafe.Pointer(gp.m)))) + dumpint(uint64(uintptr(unsafe.Pointer(gp._defer)))) + dumpint(uint64(uintptr(unsafe.Pointer(gp._panic)))) + + // dump stack + var child childInfo + child.args.n = -1 + child.arglen = 0 + child.sp = nil + child.depth = 0 + var u unwinder + for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() { + dumpframe(&u.frame, &child) + } + + // dump defer & panic records + for d := gp._defer; d != nil; d = d.link { + dumpint(tagDefer) + dumpint(uint64(uintptr(unsafe.Pointer(d)))) + dumpint(uint64(uintptr(unsafe.Pointer(gp)))) + dumpint(uint64(d.sp)) + dumpint(uint64(d.pc)) + fn := *(**funcval)(unsafe.Pointer(&d.fn)) + dumpint(uint64(uintptr(unsafe.Pointer(fn)))) + if d.fn == nil { + // d.fn can be nil for open-coded defers + dumpint(uint64(0)) + } else { + dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) + } + dumpint(uint64(uintptr(unsafe.Pointer(d.link)))) + } + for p := gp._panic; p != nil; p = p.link { + dumpint(tagPanic) + dumpint(uint64(uintptr(unsafe.Pointer(p)))) + dumpint(uint64(uintptr(unsafe.Pointer(gp)))) + eface := efaceOf(&p.arg) + dumpint(uint64(uintptr(unsafe.Pointer(eface._type)))) + dumpint(uint64(uintptr(eface.data))) + dumpint(0) // was p->defer, no longer recorded + dumpint(uint64(uintptr(unsafe.Pointer(p.link)))) + } +} + +func dumpgs() { + assertWorldStopped() + + // goroutines & stacks + forEachG(func(gp *g) { + status := readgstatus(gp) // The world is stopped so gp will not be in a scan state. + switch status { + default: + print("runtime: unexpected G.status ", hex(status), "\n") + throw("dumpgs in STW - bad status") + case _Gdead: + // ok + case _Grunnable, + _Gsyscall, + _Gwaiting: + dumpgoroutine(gp) + } + }) +} + +func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) { + dumpint(tagQueuedFinalizer) + dumpint(uint64(uintptr(obj))) + dumpint(uint64(uintptr(unsafe.Pointer(fn)))) + dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) + dumpint(uint64(uintptr(unsafe.Pointer(fint)))) + dumpint(uint64(uintptr(unsafe.Pointer(ot)))) +} + +func dumproots() { + // To protect mheap_.allspans. + assertWorldStopped() + + // TODO(mwhudson): dump datamask etc from all objects + // data segment + dumpint(tagData) + dumpint(uint64(firstmoduledata.data)) + dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data) + dumpfields(firstmoduledata.gcdatamask) + + // bss segment + dumpint(tagBSS) + dumpint(uint64(firstmoduledata.bss)) + dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss) + dumpfields(firstmoduledata.gcbssmask) + + // mspan.types + for _, s := range mheap_.allspans { + if s.state.get() == mSpanInUse { + // Finalizers + for sp := s.specials; sp != nil; sp = sp.next { + if sp.kind != _KindSpecialFinalizer { + continue + } + spf := (*specialfinalizer)(unsafe.Pointer(sp)) + p := unsafe.Pointer(s.base() + uintptr(spf.special.offset)) + dumpfinalizer(p, spf.fn, spf.fint, spf.ot) + } + } + } + + // Finalizer queue + iterate_finq(finq_callback) +} + +// Bit vector of free marks. +// Needs to be as big as the largest number of objects per span. +var freemark [_PageSize / 8]bool + +func dumpobjs() { + // To protect mheap_.allspans. + assertWorldStopped() + + for _, s := range mheap_.allspans { + if s.state.get() != mSpanInUse { + continue + } + p := s.base() + size := s.elemsize + n := (s.npages << _PageShift) / size + if n > uintptr(len(freemark)) { + throw("freemark array doesn't have enough entries") + } + + for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ { + if s.isFree(uintptr(freeIndex)) { + freemark[freeIndex] = true + } + } + + for j := uintptr(0); j < n; j, p = j+1, p+size { + if freemark[j] { + freemark[j] = false + continue + } + dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size)) + } + } +} + +func dumpparams() { + dumpint(tagParams) + x := uintptr(1) + if *(*byte)(unsafe.Pointer(&x)) == 1 { + dumpbool(false) // little-endian ptrs + } else { + dumpbool(true) // big-endian ptrs + } + dumpint(goarch.PtrSize) + var arenaStart, arenaEnd uintptr + for i1 := range mheap_.arenas { + if mheap_.arenas[i1] == nil { + continue + } + for i, ha := range mheap_.arenas[i1] { + if ha == nil { + continue + } + base := arenaBase(arenaIdx(i1)< arenaEnd { + arenaEnd = base + heapArenaBytes + } + } + } + dumpint(uint64(arenaStart)) + dumpint(uint64(arenaEnd)) + dumpstr(goarch.GOARCH) + dumpstr(buildVersion) + dumpint(uint64(ncpu)) +} + +func itab_callback(tab *itab) { + t := tab._type + dumptype(t) + dumpint(tagItab) + dumpint(uint64(uintptr(unsafe.Pointer(tab)))) + dumpint(uint64(uintptr(unsafe.Pointer(t)))) +} + +func dumpitabs() { + iterate_itabs(itab_callback) +} + +func dumpms() { + for mp := allm; mp != nil; mp = mp.alllink { + dumpint(tagOSThread) + dumpint(uint64(uintptr(unsafe.Pointer(mp)))) + dumpint(uint64(mp.id)) + dumpint(mp.procid) + } +} + +//go:systemstack +func dumpmemstats(m *MemStats) { + assertWorldStopped() + + // These ints should be identical to the exported + // MemStats structure and should be ordered the same + // way too. + dumpint(tagMemStats) + dumpint(m.Alloc) + dumpint(m.TotalAlloc) + dumpint(m.Sys) + dumpint(m.Lookups) + dumpint(m.Mallocs) + dumpint(m.Frees) + dumpint(m.HeapAlloc) + dumpint(m.HeapSys) + dumpint(m.HeapIdle) + dumpint(m.HeapInuse) + dumpint(m.HeapReleased) + dumpint(m.HeapObjects) + dumpint(m.StackInuse) + dumpint(m.StackSys) + dumpint(m.MSpanInuse) + dumpint(m.MSpanSys) + dumpint(m.MCacheInuse) + dumpint(m.MCacheSys) + dumpint(m.BuckHashSys) + dumpint(m.GCSys) + dumpint(m.OtherSys) + dumpint(m.NextGC) + dumpint(m.LastGC) + dumpint(m.PauseTotalNs) + for i := 0; i < 256; i++ { + dumpint(m.PauseNs[i]) + } + dumpint(uint64(m.NumGC)) +} + +func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) { + stk := (*[100000]uintptr)(unsafe.Pointer(pstk)) + dumpint(tagMemProf) + dumpint(uint64(uintptr(unsafe.Pointer(b)))) + dumpint(uint64(size)) + dumpint(uint64(nstk)) + for i := uintptr(0); i < nstk; i++ { + pc := stk[i] + f := findfunc(pc) + if !f.valid() { + var buf [64]byte + n := len(buf) + n-- + buf[n] = ')' + if pc == 0 { + n-- + buf[n] = '0' + } else { + for pc > 0 { + n-- + buf[n] = "0123456789abcdef"[pc&15] + pc >>= 4 + } + } + n-- + buf[n] = 'x' + n-- + buf[n] = '0' + n-- + buf[n] = '(' + dumpslice(buf[n:]) + dumpstr("?") + dumpint(0) + } else { + dumpstr(funcname(f)) + if i > 0 && pc > f.entry() { + pc-- + } + file, line := funcline(f, pc) + dumpstr(file) + dumpint(uint64(line)) + } + } + dumpint(uint64(allocs)) + dumpint(uint64(frees)) +} + +func dumpmemprof() { + // To protect mheap_.allspans. + assertWorldStopped() + + iterate_memprof(dumpmemprof_callback) + for _, s := range mheap_.allspans { + if s.state.get() != mSpanInUse { + continue + } + for sp := s.specials; sp != nil; sp = sp.next { + if sp.kind != _KindSpecialProfile { + continue + } + spp := (*specialprofile)(unsafe.Pointer(sp)) + p := s.base() + uintptr(spp.special.offset) + dumpint(tagAllocSample) + dumpint(uint64(p)) + dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) + } + } +} + +var dumphdr = []byte("go1.7 heap dump\n") + +func mdump(m *MemStats) { + assertWorldStopped() + + // make sure we're done sweeping + for _, s := range mheap_.allspans { + if s.state.get() == mSpanInUse { + s.ensureSwept() + } + } + memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache)) + dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr))) + dumpparams() + dumpitabs() + dumpobjs() + dumpgs() + dumpms() + dumproots() + dumpmemstats(m) + dumpmemprof() + dumpint(tagEOF) + flush() +} + +func writeheapdump_m(fd uintptr, m *MemStats) { + assertWorldStopped() + + gp := getg() + casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap) + + // Set dump file. + dumpfd = fd + + // Call dump routine. + mdump(m) + + // Reset dump file. + dumpfd = 0 + if tmpbuf != nil { + sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) + tmpbuf = nil + } + + casgstatus(gp.m.curg, _Gwaiting, _Grunning) +} + +// dumpint() the kind & offset of each field in an object. +func dumpfields(bv bitvector) { + dumpbv(&bv, 0) + dumpint(fieldKindEol) +} + +func makeheapobjbv(p uintptr, size uintptr) bitvector { + // Extend the temp buffer if necessary. + nptr := size / goarch.PtrSize + if uintptr(len(tmpbuf)) < nptr/8+1 { + if tmpbuf != nil { + sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) + } + n := nptr/8 + 1 + p := sysAlloc(n, &memstats.other_sys) + if p == nil { + throw("heapdump: out of memory") + } + tmpbuf = (*[1 << 30]byte)(p)[:n] + } + // Convert heap bitmap to pointer bitmap. + for i := uintptr(0); i < nptr/8+1; i++ { + tmpbuf[i] = 0 + } + if goexperiment.AllocHeaders { + s := spanOf(p) + tp := s.typePointersOf(p, size) + for { + var addr uintptr + if tp, addr = tp.next(p + size); addr == 0 { + break + } + i := (addr - p) / goarch.PtrSize + tmpbuf[i/8] |= 1 << (i % 8) + } + } else { + hbits := heapBitsForAddr(p, size) + for { + var addr uintptr + hbits, addr = hbits.next() + if addr == 0 { + break + } + i := (addr - p) / goarch.PtrSize + tmpbuf[i/8] |= 1 << (i % 8) + } + } + return bitvector{int32(nptr), &tmpbuf[0]} +} diff --git a/platform/dbops/binaries/go/go/src/runtime/histogram.go b/platform/dbops/binaries/go/go/src/runtime/histogram.go new file mode 100644 index 0000000000000000000000000000000000000000..f243667b5542a3c16bf2cbd819fa64dd66574829 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/histogram.go @@ -0,0 +1,203 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const ( + // For the time histogram type, we use an HDR histogram. + // Values are placed in buckets based solely on the most + // significant set bit. Thus, buckets are power-of-2 sized. + // Values are then placed into sub-buckets based on the value of + // the next timeHistSubBucketBits most significant bits. Thus, + // sub-buckets are linear within a bucket. + // + // Therefore, the number of sub-buckets (timeHistNumSubBuckets) + // defines the error. This error may be computed as + // 1/timeHistNumSubBuckets*100%. For example, for 16 sub-buckets + // per bucket the error is approximately 6%. + // + // The number of buckets (timeHistNumBuckets), on the + // other hand, defines the range. To avoid producing a large number + // of buckets that are close together, especially for small numbers + // (e.g. 1, 2, 3, 4, 5 ns) that aren't very useful, timeHistNumBuckets + // is defined in terms of the least significant bit (timeHistMinBucketBits) + // that needs to be set before we start bucketing and the most + // significant bit (timeHistMaxBucketBits) that we bucket before we just + // dump it into a catch-all bucket. + // + // As an example, consider the configuration: + // + // timeHistMinBucketBits = 9 + // timeHistMaxBucketBits = 48 + // timeHistSubBucketBits = 2 + // + // Then: + // + // 011000001 + // ^-- + // │ ^ + // │ └---- Next 2 bits -> sub-bucket 3 + // └------- Bit 9 unset -> bucket 0 + // + // 110000001 + // ^-- + // │ ^ + // │ └---- Next 2 bits -> sub-bucket 2 + // └------- Bit 9 set -> bucket 1 + // + // 1000000010 + // ^-- ^ + // │ ^ └-- Lower bits ignored + // │ └---- Next 2 bits -> sub-bucket 0 + // └------- Bit 10 set -> bucket 2 + // + // Following this pattern, bucket 38 will have the bit 46 set. We don't + // have any buckets for higher values, so we spill the rest into an overflow + // bucket containing values of 2^47-1 nanoseconds or approx. 1 day or more. + // This range is more than enough to handle durations produced by the runtime. + timeHistMinBucketBits = 9 + timeHistMaxBucketBits = 48 // Note that this is exclusive; 1 higher than the actual range. + timeHistSubBucketBits = 2 + timeHistNumSubBuckets = 1 << timeHistSubBucketBits + timeHistNumBuckets = timeHistMaxBucketBits - timeHistMinBucketBits + 1 + // Two extra buckets, one for underflow, one for overflow. + timeHistTotalBuckets = timeHistNumBuckets*timeHistNumSubBuckets + 2 +) + +// timeHistogram represents a distribution of durations in +// nanoseconds. +// +// The accuracy and range of the histogram is defined by the +// timeHistSubBucketBits and timeHistNumBuckets constants. +// +// It is an HDR histogram with exponentially-distributed +// buckets and linearly distributed sub-buckets. +// +// The histogram is safe for concurrent reads and writes. +type timeHistogram struct { + counts [timeHistNumBuckets * timeHistNumSubBuckets]atomic.Uint64 + + // underflow counts all the times we got a negative duration + // sample. Because of how time works on some platforms, it's + // possible to measure negative durations. We could ignore them, + // but we record them anyway because it's better to have some + // signal that it's happening than just missing samples. + underflow atomic.Uint64 + + // overflow counts all the times we got a duration that exceeded + // the range counts represents. + overflow atomic.Uint64 +} + +// record adds the given duration to the distribution. +// +// Disallow preemptions and stack growths because this function +// may run in sensitive locations. +// +//go:nosplit +func (h *timeHistogram) record(duration int64) { + // If the duration is negative, capture that in underflow. + if duration < 0 { + h.underflow.Add(1) + return + } + // bucketBit is the target bit for the bucket which is usually the + // highest 1 bit, but if we're less than the minimum, is the highest + // 1 bit of the minimum (which will be zero in the duration). + // + // bucket is the bucket index, which is the bucketBit minus the + // highest bit of the minimum, plus one to leave room for the catch-all + // bucket for samples lower than the minimum. + var bucketBit, bucket uint + if l := sys.Len64(uint64(duration)); l < timeHistMinBucketBits { + bucketBit = timeHistMinBucketBits + bucket = 0 // bucketBit - timeHistMinBucketBits + } else { + bucketBit = uint(l) + bucket = bucketBit - timeHistMinBucketBits + 1 + } + // If the bucket we computed is greater than the number of buckets, + // count that in overflow. + if bucket >= timeHistNumBuckets { + h.overflow.Add(1) + return + } + // The sub-bucket index is just next timeHistSubBucketBits after the bucketBit. + subBucket := uint(duration>>(bucketBit-1-timeHistSubBucketBits)) % timeHistNumSubBuckets + h.counts[bucket*timeHistNumSubBuckets+subBucket].Add(1) +} + +// write dumps the histogram to the passed metricValue as a float64 histogram. +func (h *timeHistogram) write(out *metricValue) { + hist := out.float64HistOrInit(timeHistBuckets) + // The bottom-most bucket, containing negative values, is tracked + // separately as underflow, so fill that in manually and then iterate + // over the rest. + hist.counts[0] = h.underflow.Load() + for i := range h.counts { + hist.counts[i+1] = h.counts[i].Load() + } + hist.counts[len(hist.counts)-1] = h.overflow.Load() +} + +const ( + fInf = 0x7FF0000000000000 + fNegInf = 0xFFF0000000000000 +) + +func float64Inf() float64 { + inf := uint64(fInf) + return *(*float64)(unsafe.Pointer(&inf)) +} + +func float64NegInf() float64 { + inf := uint64(fNegInf) + return *(*float64)(unsafe.Pointer(&inf)) +} + +// timeHistogramMetricsBuckets generates a slice of boundaries for +// the timeHistogram. These boundaries are represented in seconds, +// not nanoseconds like the timeHistogram represents durations. +func timeHistogramMetricsBuckets() []float64 { + b := make([]float64, timeHistTotalBuckets+1) + // Underflow bucket. + b[0] = float64NegInf() + + for j := 0; j < timeHistNumSubBuckets; j++ { + // No bucket bit for the first few buckets. Just sub-bucket bits after the + // min bucket bit. + bucketNanos := uint64(j) << (timeHistMinBucketBits - 1 - timeHistSubBucketBits) + // Convert nanoseconds to seconds via a division. + // These values will all be exactly representable by a float64. + b[j+1] = float64(bucketNanos) / 1e9 + } + // Generate the rest of the buckets. It's easier to reason + // about if we cut out the 0'th bucket. + for i := timeHistMinBucketBits; i < timeHistMaxBucketBits; i++ { + for j := 0; j < timeHistNumSubBuckets; j++ { + // Set the bucket bit. + bucketNanos := uint64(1) << (i - 1) + // Set the sub-bucket bits. + bucketNanos |= uint64(j) << (i - 1 - timeHistSubBucketBits) + // The index for this bucket is going to be the (i+1)'th bucket + // (note that we're starting from zero, but handled the first bucket + // earlier, so we need to compensate), and the j'th sub bucket. + // Add 1 because we left space for -Inf. + bucketIndex := (i-timeHistMinBucketBits+1)*timeHistNumSubBuckets + j + 1 + // Convert nanoseconds to seconds via a division. + // These values will all be exactly representable by a float64. + b[bucketIndex] = float64(bucketNanos) / 1e9 + } + } + // Overflow bucket. + b[len(b)-2] = float64(uint64(1)<<(timeHistMaxBucketBits-1)) / 1e9 + b[len(b)-1] = float64Inf() + return b +} diff --git a/platform/dbops/binaries/go/go/src/runtime/histogram_test.go b/platform/dbops/binaries/go/go/src/runtime/histogram_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5246e868104a727266cc4e0936b96d7d9621fe0e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/histogram_test.go @@ -0,0 +1,112 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math" + . "runtime" + "testing" +) + +var dummyTimeHistogram TimeHistogram + +func TestTimeHistogram(t *testing.T) { + // We need to use a global dummy because this + // could get stack-allocated with a non-8-byte alignment. + // The result of this bad alignment is a segfault on + // 32-bit platforms when calling Record. + h := &dummyTimeHistogram + + // Record exactly one sample in each bucket. + for j := 0; j < TimeHistNumSubBuckets; j++ { + v := int64(j) << (TimeHistMinBucketBits - 1 - TimeHistSubBucketBits) + for k := 0; k < j; k++ { + // Record a number of times equal to the bucket index. + h.Record(v) + } + } + for i := TimeHistMinBucketBits; i < TimeHistMaxBucketBits; i++ { + base := int64(1) << (i - 1) + for j := 0; j < TimeHistNumSubBuckets; j++ { + v := int64(j) << (i - 1 - TimeHistSubBucketBits) + for k := 0; k < (i+1-TimeHistMinBucketBits)*TimeHistNumSubBuckets+j; k++ { + // Record a number of times equal to the bucket index. + h.Record(base + v) + } + } + } + // Hit the underflow and overflow buckets. + h.Record(int64(-1)) + h.Record(math.MaxInt64) + h.Record(math.MaxInt64) + + // Check to make sure there's exactly one count in each + // bucket. + for i := 0; i < TimeHistNumBuckets; i++ { + for j := 0; j < TimeHistNumSubBuckets; j++ { + c, ok := h.Count(i, j) + if !ok { + t.Errorf("unexpected invalid bucket: (%d, %d)", i, j) + } else if idx := uint64(i*TimeHistNumSubBuckets + j); c != idx { + t.Errorf("bucket (%d, %d) has count that is not %d: %d", i, j, idx, c) + } + } + } + c, ok := h.Count(-1, 0) + if ok { + t.Errorf("expected to hit underflow bucket: (%d, %d)", -1, 0) + } + if c != 1 { + t.Errorf("overflow bucket has count that is not 1: %d", c) + } + + c, ok = h.Count(TimeHistNumBuckets+1, 0) + if ok { + t.Errorf("expected to hit overflow bucket: (%d, %d)", TimeHistNumBuckets+1, 0) + } + if c != 2 { + t.Errorf("overflow bucket has count that is not 2: %d", c) + } + + dummyTimeHistogram = TimeHistogram{} +} + +func TestTimeHistogramMetricsBuckets(t *testing.T) { + buckets := TimeHistogramMetricsBuckets() + + nonInfBucketsLen := TimeHistNumSubBuckets * TimeHistNumBuckets + expBucketsLen := nonInfBucketsLen + 3 // Count -Inf, the edge for the overflow bucket, and +Inf. + if len(buckets) != expBucketsLen { + t.Fatalf("unexpected length of buckets: got %d, want %d", len(buckets), expBucketsLen) + } + // Check some values. + idxToBucket := map[int]float64{ + 0: math.Inf(-1), + 1: 0.0, + 2: float64(0x040) / 1e9, + 3: float64(0x080) / 1e9, + 4: float64(0x0c0) / 1e9, + 5: float64(0x100) / 1e9, + 6: float64(0x140) / 1e9, + 7: float64(0x180) / 1e9, + 8: float64(0x1c0) / 1e9, + 9: float64(0x200) / 1e9, + 10: float64(0x280) / 1e9, + 11: float64(0x300) / 1e9, + 12: float64(0x380) / 1e9, + 13: float64(0x400) / 1e9, + 15: float64(0x600) / 1e9, + 81: float64(0x8000000) / 1e9, + 82: float64(0xa000000) / 1e9, + 108: float64(0x380000000) / 1e9, + expBucketsLen - 2: float64(0x1<<47) / 1e9, + expBucketsLen - 1: math.Inf(1), + } + for idx, bucket := range idxToBucket { + if got, want := buckets[idx], bucket; got != want { + t.Errorf("expected bucket %d to have value %e, got %e", idx, want, got) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/iface.go b/platform/dbops/binaries/go/go/src/runtime/iface.go new file mode 100644 index 0000000000000000000000000000000000000000..bad49a346e86d3af8327847a1460300fd40f23d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/iface.go @@ -0,0 +1,686 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const itabInitSize = 512 + +var ( + itabLock mutex // lock for accessing itab table + itabTable = &itabTableInit // pointer to current table + itabTableInit = itabTableType{size: itabInitSize} // starter table +) + +// Note: change the formula in the mallocgc call in itabAdd if you change these fields. +type itabTableType struct { + size uintptr // length of entries array. Always a power of 2. + count uintptr // current number of filled entries. + entries [itabInitSize]*itab // really [size] large +} + +func itabHashFunc(inter *interfacetype, typ *_type) uintptr { + // compiler has provided some good hash codes for us. + return uintptr(inter.Type.Hash ^ typ.Hash) +} + +func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { + if len(inter.Methods) == 0 { + throw("internal error - misuse of itab") + } + + // easy case + if typ.TFlag&abi.TFlagUncommon == 0 { + if canfail { + return nil + } + name := toRType(&inter.Type).nameOff(inter.Methods[0].Name) + panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()}) + } + + var m *itab + + // First, look in the existing table to see if we can find the itab we need. + // This is by far the most common case, so do it without locks. + // Use atomic to ensure we see any previous writes done by the thread + // that updates the itabTable field (with atomic.Storep in itabAdd). + t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable))) + if m = t.find(inter, typ); m != nil { + goto finish + } + + // Not found. Grab the lock and try again. + lock(&itabLock) + if m = itabTable.find(inter, typ); m != nil { + unlock(&itabLock) + goto finish + } + + // Entry doesn't exist yet. Make a new entry & add it. + m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys)) + m.inter = inter + m._type = typ + // The hash is used in type switches. However, compiler statically generates itab's + // for all interface/type pairs used in switches (which are added to itabTable + // in itabsinit). The dynamically-generated itab's never participate in type switches, + // and thus the hash is irrelevant. + // Note: m.hash is _not_ the hash used for the runtime itabTable hash table. + m.hash = 0 + m.init() + itabAdd(m) + unlock(&itabLock) +finish: + if m.fun[0] != 0 { + return m + } + if canfail { + return nil + } + // this can only happen if the conversion + // was already done once using the , ok form + // and we have a cached negative result. + // The cached result doesn't record which + // interface function was missing, so initialize + // the itab again to get the missing function name. + panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: m.init()}) +} + +// find finds the given interface/type pair in t. +// Returns nil if the given interface/type pair isn't present. +func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab { + // Implemented using quadratic probing. + // Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k. + // We're guaranteed to hit all table entries using this probe sequence. + mask := t.size - 1 + h := itabHashFunc(inter, typ) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize)) + // Use atomic read here so if we see m != nil, we also see + // the initializations of the fields of m. + // m := *p + m := (*itab)(atomic.Loadp(unsafe.Pointer(p))) + if m == nil { + return nil + } + if m.inter == inter && m._type == typ { + return m + } + h += i + h &= mask + } +} + +// itabAdd adds the given itab to the itab hash table. +// itabLock must be held. +func itabAdd(m *itab) { + // Bugs can lead to calling this while mallocing is set, + // typically because this is called while panicking. + // Crash reliably, rather than only when we need to grow + // the hash table. + if getg().m.mallocing != 0 { + throw("malloc deadlock") + } + + t := itabTable + if t.count >= 3*(t.size/4) { // 75% load factor + // Grow hash table. + // t2 = new(itabTableType) + some additional entries + // We lie and tell malloc we want pointer-free memory because + // all the pointed-to values are not in the heap. + t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true)) + t2.size = t.size * 2 + + // Copy over entries. + // Note: while copying, other threads may look for an itab and + // fail to find it. That's ok, they will then try to get the itab lock + // and as a consequence wait until this copying is complete. + iterate_itabs(t2.add) + if t2.count != t.count { + throw("mismatched count during itab table copy") + } + // Publish new hash table. Use an atomic write: see comment in getitab. + atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2)) + // Adopt the new table as our own. + t = itabTable + // Note: the old table can be GC'ed here. + } + t.add(m) +} + +// add adds the given itab to itab table t. +// itabLock must be held. +func (t *itabTableType) add(m *itab) { + // See comment in find about the probe sequence. + // Insert new itab in the first empty spot in the probe sequence. + mask := t.size - 1 + h := itabHashFunc(m.inter, m._type) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize)) + m2 := *p + if m2 == m { + // A given itab may be used in more than one module + // and thanks to the way global symbol resolution works, the + // pointed-to itab may already have been inserted into the + // global 'hash'. + return + } + if m2 == nil { + // Use atomic write here so if a reader sees m, it also + // sees the correctly initialized fields of m. + // NoWB is ok because m is not in heap memory. + // *p = m + atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m)) + t.count++ + return + } + h += i + h &= mask + } +} + +// init fills in the m.fun array with all the code pointers for +// the m.inter/m._type pair. If the type does not implement the interface, +// it sets m.fun[0] to 0 and returns the name of an interface function that is missing. +// It is ok to call this multiple times on the same m, even concurrently. +func (m *itab) init() string { + inter := m.inter + typ := m._type + x := typ.Uncommon() + + // both inter and typ have method sorted by name, + // and interface names are unique, + // so can iterate over both in lock step; + // the loop is O(ni+nt) not O(ni*nt). + ni := len(inter.Methods) + nt := int(x.Mcount) + xmhdr := (*[1 << 16]abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff)))[:nt:nt] + j := 0 + methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.fun[0]))[:ni:ni] + var fun0 unsafe.Pointer +imethods: + for k := 0; k < ni; k++ { + i := &inter.Methods[k] + itype := toRType(&inter.Type).typeOff(i.Typ) + name := toRType(&inter.Type).nameOff(i.Name) + iname := name.Name() + ipkg := pkgPath(name) + if ipkg == "" { + ipkg = inter.PkgPath.Name() + } + for ; j < nt; j++ { + t := &xmhdr[j] + rtyp := toRType(typ) + tname := rtyp.nameOff(t.Name) + if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname { + pkgPath := pkgPath(tname) + if pkgPath == "" { + pkgPath = rtyp.nameOff(x.PkgPath).Name() + } + if tname.IsExported() || pkgPath == ipkg { + ifn := rtyp.textOff(t.Ifn) + if k == 0 { + fun0 = ifn // we'll set m.fun[0] at the end + } else { + methods[k] = ifn + } + continue imethods + } + } + } + // didn't find method + m.fun[0] = 0 + return iname + } + m.fun[0] = uintptr(fun0) + return "" +} + +func itabsinit() { + lockInit(&itabLock, lockRankItab) + lock(&itabLock) + for _, md := range activeModules() { + for _, i := range md.itablinks { + itabAdd(i) + } + } + unlock(&itabLock) +} + +// panicdottypeE is called when doing an e.(T) conversion and the conversion fails. +// have = the dynamic type we have. +// want = the static type we're trying to convert to. +// iface = the static type we're converting from. +func panicdottypeE(have, want, iface *_type) { + panic(&TypeAssertionError{iface, have, want, ""}) +} + +// panicdottypeI is called when doing an i.(T) conversion and the conversion fails. +// Same args as panicdottypeE, but "have" is the dynamic itab we have. +func panicdottypeI(have *itab, want, iface *_type) { + var t *_type + if have != nil { + t = have._type + } + panicdottypeE(t, want, iface) +} + +// panicnildottype is called when doing an i.(T) conversion and the interface i is nil. +// want = the static type we're trying to convert to. +func panicnildottype(want *_type) { + panic(&TypeAssertionError{nil, nil, want, ""}) + // TODO: Add the static type we're converting from as well. + // It might generate a better error message. + // Just to match other nil conversion errors, we don't for now. +} + +// The specialized convTx routines need a type descriptor to use when calling mallocgc. +// We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. +// However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, +// so we use named types here. +// We then construct interface values of these types, +// and then extract the type word to use as needed. +type ( + uint16InterfacePtr uint16 + uint32InterfacePtr uint32 + uint64InterfacePtr uint64 + stringInterfacePtr string + sliceInterfacePtr []byte +) + +var ( + uint16Eface any = uint16InterfacePtr(0) + uint32Eface any = uint32InterfacePtr(0) + uint64Eface any = uint64InterfacePtr(0) + stringEface any = stringInterfacePtr("") + sliceEface any = sliceInterfacePtr(nil) + + uint16Type *_type = efaceOf(&uint16Eface)._type + uint32Type *_type = efaceOf(&uint32Eface)._type + uint64Type *_type = efaceOf(&uint64Eface)._type + stringType *_type = efaceOf(&stringEface)._type + sliceType *_type = efaceOf(&sliceEface)._type +) + +// The conv and assert functions below do very similar things. +// The convXXX functions are guaranteed by the compiler to succeed. +// The assertXXX functions may fail (either panicking or returning false, +// depending on whether they are 1-result or 2-result). +// The convXXX functions succeed on a nil input, whereas the assertXXX +// functions fail on a nil input. + +// convT converts a value of type t, which is pointed to by v, to a pointer that can +// be used as the second word of an interface value. +func convT(t *_type, v unsafe.Pointer) unsafe.Pointer { + if raceenabled { + raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT)) + } + if msanenabled { + msanread(v, t.Size_) + } + if asanenabled { + asanread(v, t.Size_) + } + x := mallocgc(t.Size_, t, true) + typedmemmove(t, x, v) + return x +} +func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer { + // TODO: maybe take size instead of type? + if raceenabled { + raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr)) + } + if msanenabled { + msanread(v, t.Size_) + } + if asanenabled { + asanread(v, t.Size_) + } + + x := mallocgc(t.Size_, t, false) + memmove(x, v, t.Size_) + return x +} + +func convT16(val uint16) (x unsafe.Pointer) { + if val < uint16(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if goarch.BigEndian { + x = add(x, 6) + } + } else { + x = mallocgc(2, uint16Type, false) + *(*uint16)(x) = val + } + return +} + +func convT32(val uint32) (x unsafe.Pointer) { + if val < uint32(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if goarch.BigEndian { + x = add(x, 4) + } + } else { + x = mallocgc(4, uint32Type, false) + *(*uint32)(x) = val + } + return +} + +func convT64(val uint64) (x unsafe.Pointer) { + if val < uint64(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + } else { + x = mallocgc(8, uint64Type, false) + *(*uint64)(x) = val + } + return +} + +func convTstring(val string) (x unsafe.Pointer) { + if val == "" { + x = unsafe.Pointer(&zeroVal[0]) + } else { + x = mallocgc(unsafe.Sizeof(val), stringType, true) + *(*string)(x) = val + } + return +} + +func convTslice(val []byte) (x unsafe.Pointer) { + // Note: this must work for any element type, not just byte. + if (*slice)(unsafe.Pointer(&val)).array == nil { + x = unsafe.Pointer(&zeroVal[0]) + } else { + x = mallocgc(unsafe.Sizeof(val), sliceType, true) + *(*[]byte)(x) = val + } + return +} + +func assertE2I(inter *interfacetype, t *_type) *itab { + if t == nil { + // explicit conversions require non-nil interface value. + panic(&TypeAssertionError{nil, nil, &inter.Type, ""}) + } + return getitab(inter, t, false) +} + +func assertE2I2(inter *interfacetype, t *_type) *itab { + if t == nil { + return nil + } + return getitab(inter, t, true) +} + +// typeAssert builds an itab for the concrete type t and the +// interface type s.Inter. If the conversion is not possible it +// panics if s.CanFail is false and returns nil if s.CanFail is true. +func typeAssert(s *abi.TypeAssert, t *_type) *itab { + var tab *itab + if t == nil { + if !s.CanFail { + panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""}) + } + } else { + tab = getitab(s.Inter, t, s.CanFail) + } + + if !abi.UseInterfaceSwitchCache(GOARCH) { + return tab + } + + // Maybe update the cache, so the next time the generated code + // doesn't need to call into the runtime. + if cheaprand()&1023 != 0 { + // Only bother updating the cache ~1 in 1000 times. + return tab + } + // Load the current cache. + oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache))) + + if cheaprand()&uint32(oldC.Mask) != 0 { + // As cache gets larger, choose to update it less often + // so we can amortize the cost of building a new cache. + return tab + } + + // Make a new cache. + newC := buildTypeAssertCache(oldC, t, tab) + + // Update cache. Use compare-and-swap so if multiple threads + // are fighting to update the cache, at least one of their + // updates will stick. + atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC)) + + return tab +} + +func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache { + oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1) + + // Count the number of entries we need. + n := 1 + for _, e := range oldEntries { + if e.Typ != 0 { + n++ + } + } + + // Figure out how big a table we need. + // We need at least one more slot than the number of entries + // so that we are guaranteed an empty slot (for termination). + newN := n * 2 // make it at most 50% full + newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2 + + // Allocate the new table. + newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{}) + newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true)) + newC.Mask = uintptr(newN - 1) + newEntries := unsafe.Slice(&newC.Entries[0], newN) + + // Fill the new table. + addEntry := func(typ *_type, tab *itab) { + h := int(typ.Hash) & (newN - 1) + for { + if newEntries[h].Typ == 0 { + newEntries[h].Typ = uintptr(unsafe.Pointer(typ)) + newEntries[h].Itab = uintptr(unsafe.Pointer(tab)) + return + } + h = (h + 1) & (newN - 1) + } + } + for _, e := range oldEntries { + if e.Typ != 0 { + addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab))) + } + } + addEntry(typ, tab) + + return newC +} + +// Empty type assert cache. Contains one entry with a nil Typ (which +// causes a cache lookup to fail immediately.) +var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0} + +// interfaceSwitch compares t against the list of cases in s. +// If t matches case i, interfaceSwitch returns the case index i and +// an itab for the pair . +// If there is no match, return N,nil, where N is the number +// of cases. +func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) { + cases := unsafe.Slice(&s.Cases[0], s.NCases) + + // Results if we don't find a match. + case_ := len(cases) + var tab *itab + + // Look through each case in order. + for i, c := range cases { + tab = getitab(c, t, true) + if tab != nil { + case_ = i + break + } + } + + if !abi.UseInterfaceSwitchCache(GOARCH) { + return case_, tab + } + + // Maybe update the cache, so the next time the generated code + // doesn't need to call into the runtime. + if cheaprand()&1023 != 0 { + // Only bother updating the cache ~1 in 1000 times. + // This ensures we don't waste memory on switches, or + // switch arguments, that only happen a few times. + return case_, tab + } + // Load the current cache. + oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache))) + + if cheaprand()&uint32(oldC.Mask) != 0 { + // As cache gets larger, choose to update it less often + // so we can amortize the cost of building a new cache + // (that cost is linear in oldc.Mask). + return case_, tab + } + + // Make a new cache. + newC := buildInterfaceSwitchCache(oldC, t, case_, tab) + + // Update cache. Use compare-and-swap so if multiple threads + // are fighting to update the cache, at least one of their + // updates will stick. + atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC)) + + return case_, tab +} + +// buildInterfaceSwitchCache constructs an interface switch cache +// containing all the entries from oldC plus the new entry +// (typ,case_,tab). +func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache { + oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1) + + // Count the number of entries we need. + n := 1 + for _, e := range oldEntries { + if e.Typ != 0 { + n++ + } + } + + // Figure out how big a table we need. + // We need at least one more slot than the number of entries + // so that we are guaranteed an empty slot (for termination). + newN := n * 2 // make it at most 50% full + newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2 + + // Allocate the new table. + newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{}) + newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true)) + newC.Mask = uintptr(newN - 1) + newEntries := unsafe.Slice(&newC.Entries[0], newN) + + // Fill the new table. + addEntry := func(typ *_type, case_ int, tab *itab) { + h := int(typ.Hash) & (newN - 1) + for { + if newEntries[h].Typ == 0 { + newEntries[h].Typ = uintptr(unsafe.Pointer(typ)) + newEntries[h].Case = case_ + newEntries[h].Itab = uintptr(unsafe.Pointer(tab)) + return + } + h = (h + 1) & (newN - 1) + } + } + for _, e := range oldEntries { + if e.Typ != 0 { + addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab))) + } + } + addEntry(typ, case_, tab) + + return newC +} + +// Empty interface switch cache. Contains one entry with a nil Typ (which +// causes a cache lookup to fail immediately.) +var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0} + +//go:linkname reflect_ifaceE2I reflect.ifaceE2I +func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) { + *dst = iface{assertE2I(inter, e._type), e.data} +} + +//go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I +func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) { + *dst = iface{assertE2I(inter, e._type), e.data} +} + +func iterate_itabs(fn func(*itab)) { + // Note: only runs during stop the world or with itabLock held, + // so no other locks/atomics needed. + t := itabTable + for i := uintptr(0); i < t.size; i++ { + m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize)) + if m != nil { + fn(m) + } + } +} + +// staticuint64s is used to avoid allocating in convTx for small integer values. +var staticuint64s = [...]uint64{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +} + +// The linker redirects a reference of a method that it determined +// unreachable to a reference to this function, so it will throw if +// ever called. +func unreachableMethod() { + throw("unreachable method called. linker bug?") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/iface_test.go b/platform/dbops/binaries/go/go/src/runtime/iface_test.go new file mode 100644 index 0000000000000000000000000000000000000000..06f6eeb95246379a336e39dd2e16856dfff16cf2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/iface_test.go @@ -0,0 +1,439 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "testing" +) + +type I1 interface { + Method1() +} + +type I2 interface { + Method1() + Method2() +} + +type TS uint16 +type TM uintptr +type TL [2]uintptr + +func (TS) Method1() {} +func (TS) Method2() {} +func (TM) Method1() {} +func (TM) Method2() {} +func (TL) Method1() {} +func (TL) Method2() {} + +type T8 uint8 +type T16 uint16 +type T32 uint32 +type T64 uint64 +type Tstr string +type Tslice []byte + +func (T8) Method1() {} +func (T16) Method1() {} +func (T32) Method1() {} +func (T64) Method1() {} +func (Tstr) Method1() {} +func (Tslice) Method1() {} + +var ( + e any + e_ any + i1 I1 + i2 I2 + ts TS + tm TM + tl TL + ok bool +) + +// Issue 9370 +func TestCmpIfaceConcreteAlloc(t *testing.T) { + if runtime.Compiler != "gc" { + t.Skip("skipping on non-gc compiler") + } + + n := testing.AllocsPerRun(1, func() { + _ = e == ts + _ = i1 == ts + _ = e == 1 + }) + + if n > 0 { + t.Fatalf("iface cmp allocs=%v; want 0", n) + } +} + +func BenchmarkEqEfaceConcrete(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = e == ts + } +} + +func BenchmarkEqIfaceConcrete(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = i1 == ts + } +} + +func BenchmarkNeEfaceConcrete(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = e != ts + } +} + +func BenchmarkNeIfaceConcrete(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = i1 != ts + } +} + +func BenchmarkConvT2EByteSized(b *testing.B) { + b.Run("bool", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = yes + } + }) + b.Run("uint8", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = eight8 + } + }) +} + +func BenchmarkConvT2ESmall(b *testing.B) { + for i := 0; i < b.N; i++ { + e = ts + } +} + +func BenchmarkConvT2EUintptr(b *testing.B) { + for i := 0; i < b.N; i++ { + e = tm + } +} + +func BenchmarkConvT2ELarge(b *testing.B) { + for i := 0; i < b.N; i++ { + e = tl + } +} + +func BenchmarkConvT2ISmall(b *testing.B) { + for i := 0; i < b.N; i++ { + i1 = ts + } +} + +func BenchmarkConvT2IUintptr(b *testing.B) { + for i := 0; i < b.N; i++ { + i1 = tm + } +} + +func BenchmarkConvT2ILarge(b *testing.B) { + for i := 0; i < b.N; i++ { + i1 = tl + } +} + +func BenchmarkConvI2E(b *testing.B) { + i2 = tm + for i := 0; i < b.N; i++ { + e = i2 + } +} + +func BenchmarkConvI2I(b *testing.B) { + i2 = tm + for i := 0; i < b.N; i++ { + i1 = i2 + } +} + +func BenchmarkAssertE2T(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + tm = e.(TM) + } +} + +func BenchmarkAssertE2TLarge(b *testing.B) { + e = tl + for i := 0; i < b.N; i++ { + tl = e.(TL) + } +} + +func BenchmarkAssertE2I(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + i1 = e.(I1) + } +} + +func BenchmarkAssertI2T(b *testing.B) { + i1 = tm + for i := 0; i < b.N; i++ { + tm = i1.(TM) + } +} + +func BenchmarkAssertI2I(b *testing.B) { + i1 = tm + for i := 0; i < b.N; i++ { + i2 = i1.(I2) + } +} + +func BenchmarkAssertI2E(b *testing.B) { + i1 = tm + for i := 0; i < b.N; i++ { + e = i1.(any) + } +} + +func BenchmarkAssertE2E(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + e_ = e + } +} + +func BenchmarkAssertE2T2(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + tm, ok = e.(TM) + } +} + +func BenchmarkAssertE2T2Blank(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + _, ok = e.(TM) + } +} + +func BenchmarkAssertI2E2(b *testing.B) { + i1 = tm + for i := 0; i < b.N; i++ { + e, ok = i1.(any) + } +} + +func BenchmarkAssertI2E2Blank(b *testing.B) { + i1 = tm + for i := 0; i < b.N; i++ { + _, ok = i1.(any) + } +} + +func BenchmarkAssertE2E2(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + e_, ok = e.(any) + } +} + +func BenchmarkAssertE2E2Blank(b *testing.B) { + e = tm + for i := 0; i < b.N; i++ { + _, ok = e.(any) + } +} + +func TestNonEscapingConvT2E(t *testing.T) { + m := make(map[any]bool) + m[42] = true + if !m[42] { + t.Fatalf("42 is not present in the map") + } + if m[0] { + t.Fatalf("0 is present in the map") + } + + n := testing.AllocsPerRun(1000, func() { + if m[0] { + t.Fatalf("0 is present in the map") + } + }) + if n != 0 { + t.Fatalf("want 0 allocs, got %v", n) + } +} + +func TestNonEscapingConvT2I(t *testing.T) { + m := make(map[I1]bool) + m[TM(42)] = true + if !m[TM(42)] { + t.Fatalf("42 is not present in the map") + } + if m[TM(0)] { + t.Fatalf("0 is present in the map") + } + + n := testing.AllocsPerRun(1000, func() { + if m[TM(0)] { + t.Fatalf("0 is present in the map") + } + }) + if n != 0 { + t.Fatalf("want 0 allocs, got %v", n) + } +} + +func TestZeroConvT2x(t *testing.T) { + tests := []struct { + name string + fn func() + }{ + {name: "E8", fn: func() { e = eight8 }}, // any byte-sized value does not allocate + {name: "E16", fn: func() { e = zero16 }}, // zero values do not allocate + {name: "E32", fn: func() { e = zero32 }}, + {name: "E64", fn: func() { e = zero64 }}, + {name: "Estr", fn: func() { e = zerostr }}, + {name: "Eslice", fn: func() { e = zeroslice }}, + {name: "Econstflt", fn: func() { e = 99.0 }}, // constants do not allocate + {name: "Econststr", fn: func() { e = "change" }}, + {name: "I8", fn: func() { i1 = eight8I }}, + {name: "I16", fn: func() { i1 = zero16I }}, + {name: "I32", fn: func() { i1 = zero32I }}, + {name: "I64", fn: func() { i1 = zero64I }}, + {name: "Istr", fn: func() { i1 = zerostrI }}, + {name: "Islice", fn: func() { i1 = zerosliceI }}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + n := testing.AllocsPerRun(1000, test.fn) + if n != 0 { + t.Errorf("want zero allocs, got %v", n) + } + }) + } +} + +var ( + eight8 uint8 = 8 + eight8I T8 = 8 + yes bool = true + + zero16 uint16 = 0 + zero16I T16 = 0 + one16 uint16 = 1 + thousand16 uint16 = 1000 + + zero32 uint32 = 0 + zero32I T32 = 0 + one32 uint32 = 1 + thousand32 uint32 = 1000 + + zero64 uint64 = 0 + zero64I T64 = 0 + one64 uint64 = 1 + thousand64 uint64 = 1000 + + zerostr string = "" + zerostrI Tstr = "" + nzstr string = "abc" + + zeroslice []byte = nil + zerosliceI Tslice = nil + nzslice []byte = []byte("abc") + + zerobig [512]byte + nzbig [512]byte = [512]byte{511: 1} +) + +func BenchmarkConvT2Ezero(b *testing.B) { + b.Run("zero", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zero16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zero32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zero64 + } + }) + b.Run("str", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zerostr + } + }) + b.Run("slice", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zeroslice + } + }) + b.Run("big", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = zerobig + } + }) + }) + b.Run("nonzero", func(b *testing.B) { + b.Run("str", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = nzstr + } + }) + b.Run("slice", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = nzslice + } + }) + b.Run("big", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = nzbig + } + }) + }) + b.Run("smallint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one64 + } + }) + }) + b.Run("largeint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand64 + } + }) + }) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/import_test.go b/platform/dbops/binaries/go/go/src/runtime/import_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2bf80aaf4937046972eb71de5abc8909d965b85f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/import_test.go @@ -0,0 +1,45 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file and importx_test.go make it possible to write tests in the runtime +// package, which is generally more convenient for testing runtime internals. +// For tests that mostly touch public APIs, it's generally easier to write them +// in the runtime_test package and export any runtime internals via +// export_test.go. +// +// There are a few limitations on runtime package tests that this bridges: +// +// 1. Tests use the signature "XTest(t TestingT)". Since runtime can't import +// testing, test functions can't use testing.T, so instead we have the T +// interface, which *testing.T satisfies. And we start names with "XTest" +// because otherwise go test will complain about Test functions with the wrong +// signature. To actually expose these as test functions, this file contains +// trivial wrappers. +// +// 2. Runtime package tests can't directly import other std packages, so we +// inject any necessary functions from std. + +// TODO: Generate this + +package runtime_test + +import ( + "fmt" + "internal/testenv" + "runtime" + "testing" +) + +func init() { + runtime.FmtSprintf = fmt.Sprintf + runtime.TestenvOptimizationOff = testenv.OptimizationOff +} + +func TestInlineUnwinder(t *testing.T) { + runtime.XTestInlineUnwinder(t) +} + +func TestSPWrite(t *testing.T) { + runtime.XTestSPWrite(t) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/importx_test.go b/platform/dbops/binaries/go/go/src/runtime/importx_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4574af73e46ba29fc27b6d69bdc796b71a79abf9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/importx_test.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// See import_test.go. This is the half that lives in the runtime package. + +// TODO: Generate this + +package runtime + +type TestingT interface { + Cleanup(func()) + Error(args ...any) + Errorf(format string, args ...any) + Fail() + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Helper() + Log(args ...any) + Logf(format string, args ...any) + Name() string + Setenv(key, value string) + Skip(args ...any) + SkipNow() + Skipf(format string, args ...any) + Skipped() bool + TempDir() string +} + +var FmtSprintf func(format string, a ...any) string +var TestenvOptimizationOff func() bool diff --git a/platform/dbops/binaries/go/go/src/runtime/lfstack.go b/platform/dbops/binaries/go/go/src/runtime/lfstack.go new file mode 100644 index 0000000000000000000000000000000000000000..a91ae64e5383ad8bfdd5c0c32099d8c11e4c591a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lfstack.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lock-free stack. + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +// lfstack is the head of a lock-free stack. +// +// The zero value of lfstack is an empty list. +// +// This stack is intrusive. Nodes must embed lfnode as the first field. +// +// The stack does not keep GC-visible pointers to nodes, so the caller +// must ensure the nodes are allocated outside the Go heap. +type lfstack uint64 + +func (head *lfstack) push(node *lfnode) { + node.pushcnt++ + new := lfstackPack(node, node.pushcnt) + if node1 := lfstackUnpack(new); node1 != node { + print("runtime: lfstack.push invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n") + throw("lfstack.push") + } + for { + old := atomic.Load64((*uint64)(head)) + node.next = old + if atomic.Cas64((*uint64)(head), old, new) { + break + } + } +} + +func (head *lfstack) pop() unsafe.Pointer { + for { + old := atomic.Load64((*uint64)(head)) + if old == 0 { + return nil + } + node := lfstackUnpack(old) + next := atomic.Load64(&node.next) + if atomic.Cas64((*uint64)(head), old, next) { + return unsafe.Pointer(node) + } + } +} + +func (head *lfstack) empty() bool { + return atomic.Load64((*uint64)(head)) == 0 +} + +// lfnodeValidate panics if node is not a valid address for use with +// lfstack.push. This only needs to be called when node is allocated. +func lfnodeValidate(node *lfnode) { + if base, _, _ := findObject(uintptr(unsafe.Pointer(node)), 0, 0); base != 0 { + throw("lfstack node allocated from the heap") + } + if lfstackUnpack(lfstackPack(node, ^uintptr(0))) != node { + printlock() + println("runtime: bad lfnode address", hex(uintptr(unsafe.Pointer(node)))) + throw("bad lfnode address") + } +} + +func lfstackPack(node *lfnode, cnt uintptr) uint64 { + return uint64(taggedPointerPack(unsafe.Pointer(node), cnt)) +} + +func lfstackUnpack(val uint64) *lfnode { + return (*lfnode)(taggedPointer(val).pointer()) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/lfstack_test.go b/platform/dbops/binaries/go/go/src/runtime/lfstack_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e36297e541cc0776e3471ab722f860cb387f41b0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lfstack_test.go @@ -0,0 +1,137 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math/rand" + . "runtime" + "testing" + "unsafe" +) + +type MyNode struct { + LFNode + data int +} + +// allocMyNode allocates nodes that are stored in an lfstack +// outside the Go heap. +// We require lfstack objects to live outside the heap so that +// checkptr passes on the unsafe shenanigans used. +func allocMyNode(data int) *MyNode { + n := (*MyNode)(PersistentAlloc(unsafe.Sizeof(MyNode{}))) + LFNodeValidate(&n.LFNode) + n.data = data + return n +} + +func fromMyNode(node *MyNode) *LFNode { + return (*LFNode)(unsafe.Pointer(node)) +} + +func toMyNode(node *LFNode) *MyNode { + return (*MyNode)(unsafe.Pointer(node)) +} + +var global any + +func TestLFStack(t *testing.T) { + stack := new(uint64) + global = stack // force heap allocation + + // Check the stack is initially empty. + if LFStackPop(stack) != nil { + t.Fatalf("stack is not empty") + } + + // Push one element. + node := allocMyNode(42) + LFStackPush(stack, fromMyNode(node)) + + // Push another. + node = allocMyNode(43) + LFStackPush(stack, fromMyNode(node)) + + // Pop one element. + node = toMyNode(LFStackPop(stack)) + if node == nil { + t.Fatalf("stack is empty") + } + if node.data != 43 { + t.Fatalf("no lifo") + } + + // Pop another. + node = toMyNode(LFStackPop(stack)) + if node == nil { + t.Fatalf("stack is empty") + } + if node.data != 42 { + t.Fatalf("no lifo") + } + + // Check the stack is empty again. + if LFStackPop(stack) != nil { + t.Fatalf("stack is not empty") + } + if *stack != 0 { + t.Fatalf("stack is not empty") + } +} + +func TestLFStackStress(t *testing.T) { + const K = 100 + P := 4 * GOMAXPROCS(-1) + N := 100000 + if testing.Short() { + N /= 10 + } + // Create 2 stacks. + stacks := [2]*uint64{new(uint64), new(uint64)} + // Push K elements randomly onto the stacks. + sum := 0 + for i := 0; i < K; i++ { + sum += i + node := allocMyNode(i) + LFStackPush(stacks[i%2], fromMyNode(node)) + } + c := make(chan bool, P) + for p := 0; p < P; p++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + // Pop a node from a random stack, then push it onto a random stack. + for i := 0; i < N; i++ { + node := toMyNode(LFStackPop(stacks[r.Intn(2)])) + if node != nil { + LFStackPush(stacks[r.Intn(2)], fromMyNode(node)) + } + } + c <- true + }() + } + for i := 0; i < P; i++ { + <-c + } + // Pop all elements from both stacks, and verify that nothing lost. + sum2 := 0 + cnt := 0 + for i := 0; i < 2; i++ { + for { + node := toMyNode(LFStackPop(stacks[i])) + if node == nil { + break + } + cnt++ + sum2 += node.data + node.Next = 0 + } + } + if cnt != K { + t.Fatalf("Wrong number of nodes %d/%d", cnt, K) + } + if sum2 != sum { + t.Fatalf("Wrong sum %d/%d", sum2, sum) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/libfuzzer.go b/platform/dbops/binaries/go/go/src/runtime/libfuzzer.go new file mode 100644 index 0000000000000000000000000000000000000000..0ece035405f7aa7e6de881c6262e6f7013d60d83 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/libfuzzer.go @@ -0,0 +1,160 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build libfuzzer + +package runtime + +import "unsafe" + +func libfuzzerCallWithTwoByteBuffers(fn, start, end *byte) +func libfuzzerCallTraceIntCmp(fn *byte, arg0, arg1, fakePC uintptr) +func libfuzzerCall4(fn *byte, fakePC uintptr, s1, s2 unsafe.Pointer, result uintptr) + +// Keep in sync with the definition of ret_sled in src/runtime/libfuzzer_amd64.s +const retSledSize = 512 + +// In libFuzzer mode, the compiler inserts calls to libfuzzerTraceCmpN and libfuzzerTraceConstCmpN +// (where N can be 1, 2, 4, or 8) for encountered integer comparisons in the code to be instrumented. +// This may result in these functions having callers that are nosplit. That is why they must be nosplit. +// +//go:nosplit +func libfuzzerTraceCmp1(arg0, arg1 uint8, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp1, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceCmp2(arg0, arg1 uint16, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp2, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceCmp4(arg0, arg1 uint32, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp4, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceCmp8(arg0, arg1 uint64, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp8, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceConstCmp1(arg0, arg1 uint8, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp1, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceConstCmp2(arg0, arg1 uint16, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp2, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceConstCmp4(arg0, arg1 uint32, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp4, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +//go:nosplit +func libfuzzerTraceConstCmp8(arg0, arg1 uint64, fakePC uint) { + fakePC = fakePC % retSledSize + libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp8, uintptr(arg0), uintptr(arg1), uintptr(fakePC)) +} + +var pcTables []byte + +func init() { + libfuzzerCallWithTwoByteBuffers(&__sanitizer_cov_8bit_counters_init, &__start___sancov_cntrs, &__stop___sancov_cntrs) + start := unsafe.Pointer(&__start___sancov_cntrs) + end := unsafe.Pointer(&__stop___sancov_cntrs) + + // PC tables are arrays of ptr-sized integers representing pairs [PC,PCFlags] for every instrumented block. + // The number of PCs and PCFlags is the same as the number of 8-bit counters. Each PC table entry has + // the size of two ptr-sized integers. We allocate one more byte than what we actually need so that we can + // get a pointer representing the end of the PC table array. + size := (uintptr(end)-uintptr(start))*unsafe.Sizeof(uintptr(0))*2 + 1 + pcTables = make([]byte, size) + libfuzzerCallWithTwoByteBuffers(&__sanitizer_cov_pcs_init, &pcTables[0], &pcTables[size-1]) +} + +// We call libFuzzer's __sanitizer_weak_hook_strcmp function which takes the +// following four arguments: +// +// 1. caller_pc: location of string comparison call site +// 2. s1: first string used in the comparison +// 3. s2: second string used in the comparison +// 4. result: an integer representing the comparison result. 0 indicates +// equality (comparison will ignored by libfuzzer), non-zero indicates a +// difference (comparison will be taken into consideration). +// +//go:nosplit +func libfuzzerHookStrCmp(s1, s2 string, fakePC int) { + if s1 != s2 { + libfuzzerCall4(&__sanitizer_weak_hook_strcmp, uintptr(fakePC), cstring(s1), cstring(s2), uintptr(1)) + } + // if s1 == s2 we could call the hook with a last argument of 0 but this is unnecessary since this case will be then + // ignored by libfuzzer +} + +// This function has now the same implementation as libfuzzerHookStrCmp because we lack better checks +// for case-insensitive string equality in the runtime package. +// +//go:nosplit +func libfuzzerHookEqualFold(s1, s2 string, fakePC int) { + if s1 != s2 { + libfuzzerCall4(&__sanitizer_weak_hook_strcmp, uintptr(fakePC), cstring(s1), cstring(s2), uintptr(1)) + } +} + +//go:linkname __sanitizer_cov_trace_cmp1 __sanitizer_cov_trace_cmp1 +//go:cgo_import_static __sanitizer_cov_trace_cmp1 +var __sanitizer_cov_trace_cmp1 byte + +//go:linkname __sanitizer_cov_trace_cmp2 __sanitizer_cov_trace_cmp2 +//go:cgo_import_static __sanitizer_cov_trace_cmp2 +var __sanitizer_cov_trace_cmp2 byte + +//go:linkname __sanitizer_cov_trace_cmp4 __sanitizer_cov_trace_cmp4 +//go:cgo_import_static __sanitizer_cov_trace_cmp4 +var __sanitizer_cov_trace_cmp4 byte + +//go:linkname __sanitizer_cov_trace_cmp8 __sanitizer_cov_trace_cmp8 +//go:cgo_import_static __sanitizer_cov_trace_cmp8 +var __sanitizer_cov_trace_cmp8 byte + +//go:linkname __sanitizer_cov_trace_const_cmp1 __sanitizer_cov_trace_const_cmp1 +//go:cgo_import_static __sanitizer_cov_trace_const_cmp1 +var __sanitizer_cov_trace_const_cmp1 byte + +//go:linkname __sanitizer_cov_trace_const_cmp2 __sanitizer_cov_trace_const_cmp2 +//go:cgo_import_static __sanitizer_cov_trace_const_cmp2 +var __sanitizer_cov_trace_const_cmp2 byte + +//go:linkname __sanitizer_cov_trace_const_cmp4 __sanitizer_cov_trace_const_cmp4 +//go:cgo_import_static __sanitizer_cov_trace_const_cmp4 +var __sanitizer_cov_trace_const_cmp4 byte + +//go:linkname __sanitizer_cov_trace_const_cmp8 __sanitizer_cov_trace_const_cmp8 +//go:cgo_import_static __sanitizer_cov_trace_const_cmp8 +var __sanitizer_cov_trace_const_cmp8 byte + +//go:linkname __sanitizer_cov_8bit_counters_init __sanitizer_cov_8bit_counters_init +//go:cgo_import_static __sanitizer_cov_8bit_counters_init +var __sanitizer_cov_8bit_counters_init byte + +// start, stop markers of counters, set by the linker +var __start___sancov_cntrs, __stop___sancov_cntrs byte + +//go:linkname __sanitizer_cov_pcs_init __sanitizer_cov_pcs_init +//go:cgo_import_static __sanitizer_cov_pcs_init +var __sanitizer_cov_pcs_init byte + +//go:linkname __sanitizer_weak_hook_strcmp __sanitizer_weak_hook_strcmp +//go:cgo_import_static __sanitizer_weak_hook_strcmp +var __sanitizer_weak_hook_strcmp byte diff --git a/platform/dbops/binaries/go/go/src/runtime/libfuzzer_amd64.s b/platform/dbops/binaries/go/go/src/runtime/libfuzzer_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..e30b768a05a49a9908d335c944ccc9db7865d510 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/libfuzzer_amd64.s @@ -0,0 +1,158 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build libfuzzer + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// Based on race_amd64.s; see commentary there. + +#ifdef GOOS_windows +#define RARG0 CX +#define RARG1 DX +#define RARG2 R8 +#define RARG3 R9 +#else +#define RARG0 DI +#define RARG1 SI +#define RARG2 DX +#define RARG3 CX +#endif + +// void runtime·libfuzzerCall4(fn, hookId int, s1, s2 unsafe.Pointer, result uintptr) +// Calls C function fn from libFuzzer and passes 4 arguments to it. +TEXT runtime·libfuzzerCall4(SB), NOSPLIT, $0-40 + MOVQ fn+0(FP), AX + MOVQ hookId+8(FP), RARG0 + MOVQ s1+16(FP), RARG1 + MOVQ s2+24(FP), RARG2 + MOVQ result+32(FP), RARG3 + + get_tls(R12) + MOVQ g(R12), R14 + MOVQ g_m(R14), R13 + + // Switch to g0 stack. + MOVQ SP, R12 // callee-saved, preserved across the CALL + MOVQ m_g0(R13), R10 + CMPQ R10, R14 + JE call // already on g0 + MOVQ (g_sched+gobuf_sp)(R10), SP +call: + ANDQ $~15, SP // alignment for gcc ABI + CALL AX + MOVQ R12, SP + RET + +// void runtime·libfuzzerCallTraceIntCmp(fn, arg0, arg1, fakePC uintptr) +// Calls C function fn from libFuzzer and passes 2 arguments to it after +// manipulating the return address so that libfuzzer's integer compare hooks +// work +// libFuzzer's compare hooks obtain the caller's address from the compiler +// builtin __builtin_return_address. Since we invoke the hooks always +// from the same native function, this builtin would always return the same +// value. Internally, the libFuzzer hooks call through to the always inlined +// HandleCmp and thus can't be mimicked without patching libFuzzer. +// +// We solve this problem via an inline assembly trampoline construction that +// translates a runtime argument `fake_pc` in the range [0, 512) into a call to +// a hook with a fake return address whose lower 9 bits are `fake_pc` up to a +// constant shift. This is achieved by pushing a return address pointing into +// 512 ret instructions at offset `fake_pc` onto the stack and then jumping +// directly to the address of the hook. +// +// Note: We only set the lowest 9 bits of the return address since only these +// bits are used by the libFuzzer value profiling mode for integer compares, see +// https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp#L390 +// as well as +// https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h#L34 +// ValueProfileMap.AddValue() truncates its argument to 16 bits and shifts the +// PC to the left by log_2(128)=7, which means that only the lowest 16 - 7 bits +// of the return address matter. String compare hooks use the lowest 12 bits, +// but take the return address as an argument and thus don't require the +// indirection through a trampoline. +// TODO: Remove the inline assembly trampoline once a PC argument has been added to libfuzzer's int compare hooks. +TEXT runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $0-32 + MOVQ fn+0(FP), AX + MOVQ arg0+8(FP), RARG0 + MOVQ arg1+16(FP), RARG1 + MOVQ fakePC+24(FP), R8 + + get_tls(R12) + MOVQ g(R12), R14 + MOVQ g_m(R14), R13 + + // Switch to g0 stack. + MOVQ SP, R12 // callee-saved, preserved across the CALL + MOVQ m_g0(R13), R10 + CMPQ R10, R14 + JE call // already on g0 + MOVQ (g_sched+gobuf_sp)(R10), SP +call: + ANDQ $~15, SP // alignment for gcc ABI + SUBQ $8, SP + // Load the address of the end of the function and push it into the stack. + // This address will be jumped to after executing the return instruction + // from the return sled. There we reset the stack pointer and return. + MOVQ $end_of_function<>(SB), BX + PUSHQ BX + // Load the starting address of the return sled into BX. + MOVQ $ret_sled<>(SB), BX + // Load the address of the i'th return instruction from the return sled. + // The index is given in the fakePC argument. + ADDQ R8, BX + PUSHQ BX + // Call the original function with the fakePC return address on the stack. + // Function arguments arg0 and arg1 are passed in the registers specified + // by the x64 calling convention. + JMP AX +// This code will not be executed and is only there to satisfy assembler +// check of a balanced stack. +not_reachable: + POPQ BX + POPQ BX + RET + +TEXT end_of_function<>(SB), NOSPLIT, $0-0 + MOVQ R12, SP + RET + +#define REPEAT_8(a) a \ + a \ + a \ + a \ + a \ + a \ + a \ + a + +#define REPEAT_512(a) REPEAT_8(REPEAT_8(REPEAT_8(a))) + +TEXT ret_sled<>(SB), NOSPLIT, $0-0 + REPEAT_512(RET) + +// void runtime·libfuzzerCallWithTwoByteBuffers(fn, start, end *byte) +// Calls C function fn from libFuzzer and passes 2 arguments of type *byte to it. +TEXT runtime·libfuzzerCallWithTwoByteBuffers(SB), NOSPLIT, $0-24 + MOVQ fn+0(FP), AX + MOVQ start+8(FP), RARG0 + MOVQ end+16(FP), RARG1 + + get_tls(R12) + MOVQ g(R12), R14 + MOVQ g_m(R14), R13 + + // Switch to g0 stack. + MOVQ SP, R12 // callee-saved, preserved across the CALL + MOVQ m_g0(R13), R10 + CMPQ R10, R14 + JE call // already on g0 + MOVQ (g_sched+gobuf_sp)(R10), SP +call: + ANDQ $~15, SP // alignment for gcc ABI + CALL AX + MOVQ R12, SP + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/libfuzzer_arm64.s b/platform/dbops/binaries/go/go/src/runtime/libfuzzer_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..37b35173c3b52f7d5dd7fabe11bb2469c7cf50f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/libfuzzer_arm64.s @@ -0,0 +1,115 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build libfuzzer + +#include "go_asm.h" +#include "textflag.h" + +// Based on race_arm64.s; see commentary there. + +#define RARG0 R0 +#define RARG1 R1 +#define RARG2 R2 +#define RARG3 R3 + +#define REPEAT_2(a) a a +#define REPEAT_8(a) REPEAT_2(REPEAT_2(REPEAT_2(a))) +#define REPEAT_128(a) REPEAT_2(REPEAT_8(REPEAT_8(a))) + +// void runtime·libfuzzerCallTraceIntCmp(fn, arg0, arg1, fakePC uintptr) +// Calls C function fn from libFuzzer and passes 2 arguments to it after +// manipulating the return address so that libfuzzer's integer compare hooks +// work. +// The problem statement and solution are documented in detail in libfuzzer_amd64.s. +// See commentary there. +TEXT runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $8-32 + MOVD fn+0(FP), R9 + MOVD arg0+8(FP), RARG0 + MOVD arg1+16(FP), RARG1 + MOVD fakePC+24(FP), R8 + // Save the original return address in a local variable + MOVD R30, savedRetAddr-8(SP) + + MOVD g_m(g), R10 + + // Switch to g0 stack. + MOVD RSP, R19 // callee-saved, preserved across the CALL + MOVD m_g0(R10), R11 + CMP R11, g + BEQ call // already on g0 + MOVD (g_sched+gobuf_sp)(R11), R12 + MOVD R12, RSP +call: + // Load address of the ret sled into the default register for the return + // address. + ADR ret_sled, R30 + // Clear the lowest 2 bits of fakePC. All ARM64 instructions are four + // bytes long, so we cannot get better return address granularity than + // multiples of 4. + AND $-4, R8, R8 + // Add the offset of the fake_pc-th ret. + ADD R8, R30, R30 + // Call the function by jumping to it and reusing all registers except + // for the modified return address register R30. + JMP (R9) + +// The ret sled for ARM64 consists of 128 br instructions jumping to the +// end of the function. Each instruction is 4 bytes long. The sled thus +// has the same byte length of 4 * 128 = 512 as the x86_64 sled, but +// coarser granularity. +#define RET_SLED \ + JMP end_of_function; + +ret_sled: + REPEAT_128(RET_SLED); + +end_of_function: + MOVD R19, RSP + MOVD savedRetAddr-8(SP), R30 + RET + +// void runtime·libfuzzerCall4(fn, hookId int, s1, s2 unsafe.Pointer, result uintptr) +// Calls C function fn from libFuzzer and passes 4 arguments to it. +TEXT runtime·libfuzzerCall4(SB), NOSPLIT, $0-40 + MOVD fn+0(FP), R9 + MOVD hookId+8(FP), RARG0 + MOVD s1+16(FP), RARG1 + MOVD s2+24(FP), RARG2 + MOVD result+32(FP), RARG3 + + MOVD g_m(g), R10 + + // Switch to g0 stack. + MOVD RSP, R19 // callee-saved, preserved across the CALL + MOVD m_g0(R10), R11 + CMP R11, g + BEQ call // already on g0 + MOVD (g_sched+gobuf_sp)(R11), R12 + MOVD R12, RSP +call: + BL R9 + MOVD R19, RSP + RET + +// void runtime·libfuzzerCallWithTwoByteBuffers(fn, start, end *byte) +// Calls C function fn from libFuzzer and passes 2 arguments of type *byte to it. +TEXT runtime·libfuzzerCallWithTwoByteBuffers(SB), NOSPLIT, $0-24 + MOVD fn+0(FP), R9 + MOVD start+8(FP), R0 + MOVD end+16(FP), R1 + + MOVD g_m(g), R10 + + // Switch to g0 stack. + MOVD RSP, R19 // callee-saved, preserved across the CALL + MOVD m_g0(R10), R11 + CMP R11, g + BEQ call // already on g0 + MOVD (g_sched+gobuf_sp)(R11), R12 + MOVD R12, RSP +call: + BL R9 + MOVD R19, RSP + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/lock_futex.go b/platform/dbops/binaries/go/go/src/runtime/lock_futex.go new file mode 100644 index 0000000000000000000000000000000000000000..867e2b34d0662279311ac1407f27bfda3b6a7b37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lock_futex.go @@ -0,0 +1,256 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || linux + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +// This implementation depends on OS-specific implementations of +// +// futexsleep(addr *uint32, val uint32, ns int64) +// Atomically, +// if *addr == val { sleep } +// Might be woken up spuriously; that's allowed. +// Don't sleep longer than ns; ns < 0 means forever. +// +// futexwakeup(addr *uint32, cnt uint32) +// If any procs are sleeping on addr, wake up at most cnt. + +const ( + mutex_unlocked = 0 + mutex_locked = 1 + mutex_sleeping = 2 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. +// mutex_sleeping means that there is presumably at least one sleeping thread. +// Note that there can be spinning threads during all states - they do not +// affect mutex's state. + +// We use the uintptr mutex.key and note.key as a uint32. +// +//go:nosplit +func key32(p *uintptr) *uint32 { + return (*uint32)(unsafe.Pointer(p)) +} + +func mutexContended(l *mutex) bool { + return atomic.Load(key32(&l.key)) > mutex_locked +} + +func lock(l *mutex) { + lockWithRank(l, getLockRank(l)) +} + +func lock2(l *mutex) { + gp := getg() + + if gp.m.locks < 0 { + throw("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + v := atomic.Xchg(key32(&l.key), mutex_locked) + if v == mutex_unlocked { + return + } + + // wait is either MUTEX_LOCKED or MUTEX_SLEEPING + // depending on whether there is a thread sleeping + // on this mutex. If we ever change l->key from + // MUTEX_SLEEPING to some other value, we must be + // careful to change it back to MUTEX_SLEEPING before + // returning, to ensure that the sleeping thread gets + // its wakeup call. + wait := v + + timer := &lockTimer{lock: l} + timer.begin() + // On uniprocessors, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } + for { + // Try for lock, spinning. + for i := 0; i < spin; i++ { + for l.key == mutex_unlocked { + if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { + timer.end() + return + } + } + procyield(active_spin_cnt) + } + + // Try for lock, rescheduling. + for i := 0; i < passive_spin; i++ { + for l.key == mutex_unlocked { + if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { + timer.end() + return + } + } + osyield() + } + + // Sleep. + v = atomic.Xchg(key32(&l.key), mutex_sleeping) + if v == mutex_unlocked { + timer.end() + return + } + wait = mutex_sleeping + futexsleep(key32(&l.key), mutex_sleeping, -1) + } +} + +func unlock(l *mutex) { + unlockWithRank(l) +} + +func unlock2(l *mutex) { + v := atomic.Xchg(key32(&l.key), mutex_unlocked) + if v == mutex_unlocked { + throw("unlock of unlocked lock") + } + if v == mutex_sleeping { + futexwakeup(key32(&l.key), 1) + } + + gp := getg() + gp.m.mLockProfile.recordUnlock(l) + gp.m.locks-- + if gp.m.locks < 0 { + throw("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + old := atomic.Xchg(key32(&n.key), 1) + if old != 0 { + print("notewakeup - double wakeup (", old, ")\n") + throw("notewakeup - double wakeup") + } + futexwakeup(key32(&n.key), 1) +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + throw("notesleep not on g0") + } + ns := int64(-1) + if *cgo_yield != nil { + // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. + ns = 10e6 + } + for atomic.Load(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, ns) + if *cgo_yield != nil { + asmcgocall(*cgo_yield, nil) + } + gp.m.blocked = false + } +} + +// May run with m.p==nil if called from notetsleep, so write barriers +// are not allowed. +// +//go:nosplit +//go:nowritebarrier +func notetsleep_internal(n *note, ns int64) bool { + gp := getg() + + if ns < 0 { + if *cgo_yield != nil { + // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. + ns = 10e6 + } + for atomic.Load(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, ns) + if *cgo_yield != nil { + asmcgocall(*cgo_yield, nil) + } + gp.m.blocked = false + } + return true + } + + if atomic.Load(key32(&n.key)) != 0 { + return true + } + + deadline := nanotime() + ns + for { + if *cgo_yield != nil && ns > 10e6 { + ns = 10e6 + } + gp.m.blocked = true + futexsleep(key32(&n.key), 0, ns) + if *cgo_yield != nil { + asmcgocall(*cgo_yield, nil) + } + gp.m.blocked = false + if atomic.Load(key32(&n.key)) != 0 { + break + } + now := nanotime() + if now >= deadline { + break + } + ns = deadline - now + } + return atomic.Load(key32(&n.key)) != 0 +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 && gp.m.preemptoff != "" { + throw("notetsleep not on g0") + } + + return notetsleep_internal(n, ns) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall. +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + throw("notetsleepg on g0") + } + + entersyscallblock() + ok := notetsleep_internal(n, ns) + exitsyscall() + return ok +} + +func beforeIdle(int64, int64) (*g, bool) { + return nil, false +} + +func checkTimeouts() {} diff --git a/platform/dbops/binaries/go/go/src/runtime/lock_js.go b/platform/dbops/binaries/go/go/src/runtime/lock_js.go new file mode 100644 index 0000000000000000000000000000000000000000..b6ee5ec7afe2697b75c8dae63b3035bb4b4c867a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lock_js.go @@ -0,0 +1,313 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm + +package runtime + +import _ "unsafe" // for go:linkname + +// js/wasm has no support for threads yet. There is no preemption. + +const ( + mutex_unlocked = 0 + mutex_locked = 1 + + note_cleared = 0 + note_woken = 1 + note_timeout = 2 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +func mutexContended(l *mutex) bool { + return false +} + +func lock(l *mutex) { + lockWithRank(l, getLockRank(l)) +} + +func lock2(l *mutex) { + if l.key == mutex_locked { + // js/wasm is single-threaded so we should never + // observe this. + throw("self deadlock") + } + gp := getg() + if gp.m.locks < 0 { + throw("lock count") + } + gp.m.locks++ + l.key = mutex_locked +} + +func unlock(l *mutex) { + unlockWithRank(l) +} + +func unlock2(l *mutex) { + if l.key == mutex_unlocked { + throw("unlock of unlocked lock") + } + gp := getg() + gp.m.locks-- + if gp.m.locks < 0 { + throw("lock count") + } + l.key = mutex_unlocked +} + +// One-time notifications. + +type noteWithTimeout struct { + gp *g + deadline int64 +} + +var ( + notes = make(map[*note]*g) + notesWithTimeout = make(map[*note]noteWithTimeout) +) + +func noteclear(n *note) { + n.key = note_cleared +} + +func notewakeup(n *note) { + // gp := getg() + if n.key == note_woken { + throw("notewakeup - double wakeup") + } + cleared := n.key == note_cleared + n.key = note_woken + if cleared { + goready(notes[n], 1) + } +} + +func notesleep(n *note) { + throw("notesleep not supported by js") +} + +func notetsleep(n *note, ns int64) bool { + throw("notetsleep not supported by js") + return false +} + +// same as runtime·notetsleep, but called on user g (not g0) +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + throw("notetsleepg on g0") + } + + if ns >= 0 { + deadline := nanotime() + ns + delay := ns/1000000 + 1 // round up + if delay > 1<<31-1 { + delay = 1<<31 - 1 // cap to max int32 + } + + id := scheduleTimeoutEvent(delay) + mp := acquirem() + notes[n] = gp + notesWithTimeout[n] = noteWithTimeout{gp: gp, deadline: deadline} + releasem(mp) + + gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1) + + clearTimeoutEvent(id) // note might have woken early, clear timeout + + mp = acquirem() + delete(notes, n) + delete(notesWithTimeout, n) + releasem(mp) + + return n.key == note_woken + } + + for n.key != note_woken { + mp := acquirem() + notes[n] = gp + releasem(mp) + + gopark(nil, nil, waitReasonZero, traceBlockGeneric, 1) + + mp = acquirem() + delete(notes, n) + releasem(mp) + } + return true +} + +// checkTimeouts resumes goroutines that are waiting on a note which has reached its deadline. +// TODO(drchase): need to understand if write barriers are really okay in this context. +// +//go:yeswritebarrierrec +func checkTimeouts() { + now := nanotime() + // TODO: map iteration has the write barriers in it; is that okay? + for n, nt := range notesWithTimeout { + if n.key == note_cleared && now >= nt.deadline { + n.key = note_timeout + goready(nt.gp, 1) + } + } +} + +// events is a stack of calls from JavaScript into Go. +var events []*event + +type event struct { + // g was the active goroutine when the call from JavaScript occurred. + // It needs to be active when returning to JavaScript. + gp *g + // returned reports whether the event handler has returned. + // When all goroutines are idle and the event handler has returned, + // then g gets resumed and returns the execution to JavaScript. + returned bool +} + +type timeoutEvent struct { + id int32 + // The time when this timeout will be triggered. + time int64 +} + +// diff calculates the difference of the event's trigger time and x. +func (e *timeoutEvent) diff(x int64) int64 { + if e == nil { + return 0 + } + + diff := x - idleTimeout.time + if diff < 0 { + diff = -diff + } + return diff +} + +// clear cancels this timeout event. +func (e *timeoutEvent) clear() { + if e == nil { + return + } + + clearTimeoutEvent(e.id) +} + +// The timeout event started by beforeIdle. +var idleTimeout *timeoutEvent + +// beforeIdle gets called by the scheduler if no goroutine is awake. +// If we are not already handling an event, then we pause for an async event. +// If an event handler returned, we resume it and it will pause the execution. +// beforeIdle either returns the specific goroutine to schedule next or +// indicates with otherReady that some goroutine became ready. +// TODO(drchase): need to understand if write barriers are really okay in this context. +// +//go:yeswritebarrierrec +func beforeIdle(now, pollUntil int64) (gp *g, otherReady bool) { + delay := int64(-1) + if pollUntil != 0 { + // round up to prevent setTimeout being called early + delay = (pollUntil-now-1)/1e6 + 1 + if delay > 1e9 { + // An arbitrary cap on how long to wait for a timer. + // 1e9 ms == ~11.5 days. + delay = 1e9 + } + } + + if delay > 0 && (idleTimeout == nil || idleTimeout.diff(pollUntil) > 1e6) { + // If the difference is larger than 1 ms, we should reschedule the timeout. + idleTimeout.clear() + + idleTimeout = &timeoutEvent{ + id: scheduleTimeoutEvent(delay), + time: pollUntil, + } + } + + if len(events) == 0 { + // TODO: this is the line that requires the yeswritebarrierrec + go handleAsyncEvent() + return nil, true + } + + e := events[len(events)-1] + if e.returned { + return e.gp, false + } + return nil, false +} + +var idleStart int64 + +func handleAsyncEvent() { + idleStart = nanotime() + pause(getcallersp() - 16) +} + +// clearIdleTimeout clears our record of the timeout started by beforeIdle. +func clearIdleTimeout() { + idleTimeout.clear() + idleTimeout = nil +} + +// pause sets SP to newsp and pauses the execution of Go's WebAssembly code until an event is triggered. +func pause(newsp uintptr) + +// scheduleTimeoutEvent tells the WebAssembly environment to trigger an event after ms milliseconds. +// It returns a timer id that can be used with clearTimeoutEvent. +// +//go:wasmimport gojs runtime.scheduleTimeoutEvent +func scheduleTimeoutEvent(ms int64) int32 + +// clearTimeoutEvent clears a timeout event scheduled by scheduleTimeoutEvent. +// +//go:wasmimport gojs runtime.clearTimeoutEvent +func clearTimeoutEvent(id int32) + +// handleEvent gets invoked on a call from JavaScript into Go. It calls the event handler of the syscall/js package +// and then parks the handler goroutine to allow other goroutines to run before giving execution back to JavaScript. +// When no other goroutine is awake any more, beforeIdle resumes the handler goroutine. Now that the same goroutine +// is running as was running when the call came in from JavaScript, execution can be safely passed back to JavaScript. +func handleEvent() { + sched.idleTime.Add(nanotime() - idleStart) + + e := &event{ + gp: getg(), + returned: false, + } + events = append(events, e) + + if !eventHandler() { + // If we did not handle a window event, the idle timeout was triggered, so we can clear it. + clearIdleTimeout() + } + + // wait until all goroutines are idle + e.returned = true + gopark(nil, nil, waitReasonZero, traceBlockGeneric, 1) + + events[len(events)-1] = nil + events = events[:len(events)-1] + + // return execution to JavaScript + idleStart = nanotime() + pause(getcallersp() - 16) +} + +// eventHandler retrieves and executes handlers for pending JavaScript events. +// It returns true if an event was handled. +var eventHandler func() bool + +//go:linkname setEventHandler syscall/js.setEventHandler +func setEventHandler(fn func() bool) { + eventHandler = fn +} diff --git a/platform/dbops/binaries/go/go/src/runtime/lock_sema.go b/platform/dbops/binaries/go/go/src/runtime/lock_sema.go new file mode 100644 index 0000000000000000000000000000000000000000..073e7d410e4c74bceb6a0d1ca1e4cc4078df127e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lock_sema.go @@ -0,0 +1,306 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +// This implementation depends on OS-specific implementations of +// +// func semacreate(mp *m) +// Create a semaphore for mp, if it does not already have one. +// +// func semasleep(ns int64) int32 +// If ns < 0, acquire m's semaphore and return 0. +// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds. +// Return 0 if the semaphore was acquired, -1 if interrupted or timed out. +// +// func semawakeup(mp *m) +// Wake up mp, which is or will soon be sleeping on its semaphore. +const ( + locked uintptr = 1 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +func mutexContended(l *mutex) bool { + return atomic.Loaduintptr(&l.key) > locked +} + +func lock(l *mutex) { + lockWithRank(l, getLockRank(l)) +} + +func lock2(l *mutex) { + gp := getg() + if gp.m.locks < 0 { + throw("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + if atomic.Casuintptr(&l.key, 0, locked) { + return + } + semacreate(gp.m) + + timer := &lockTimer{lock: l} + timer.begin() + // On uniprocessor's, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } +Loop: + for i := 0; ; i++ { + v := atomic.Loaduintptr(&l.key) + if v&locked == 0 { + // Unlocked. Try to lock. + if atomic.Casuintptr(&l.key, v, v|locked) { + timer.end() + return + } + i = 0 + } + if i < spin { + procyield(active_spin_cnt) + } else if i < spin+passive_spin { + osyield() + } else { + // Someone else has it. + // l->waitm points to a linked list of M's waiting + // for this lock, chained through m->nextwaitm. + // Queue this M. + for { + gp.m.nextwaitm = muintptr(v &^ locked) + if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { + break + } + v = atomic.Loaduintptr(&l.key) + if v&locked == 0 { + continue Loop + } + } + if v&locked != 0 { + // Queued. Wait. + semasleep(-1) + i = 0 + } + } + } +} + +func unlock(l *mutex) { + unlockWithRank(l) +} + +// We might not be holding a p in this code. +// +//go:nowritebarrier +func unlock2(l *mutex) { + gp := getg() + var mp *m + for { + v := atomic.Loaduintptr(&l.key) + if v == locked { + if atomic.Casuintptr(&l.key, locked, 0) { + break + } + } else { + // Other M's are waiting for the lock. + // Dequeue an M. + mp = muintptr(v &^ locked).ptr() + if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) { + // Dequeued an M. Wake it. + semawakeup(mp) + break + } + } + } + gp.m.mLockProfile.recordUnlock(l) + gp.m.locks-- + if gp.m.locks < 0 { + throw("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + var v uintptr + for { + v = atomic.Loaduintptr(&n.key) + if atomic.Casuintptr(&n.key, v, locked) { + break + } + } + + // Successfully set waitm to locked. + // What was it before? + switch { + case v == 0: + // Nothing was waiting. Done. + case v == locked: + // Two notewakeups! Not allowed. + throw("notewakeup - double wakeup") + default: + // Must be the waiting m. Wake it up. + semawakeup((*m)(unsafe.Pointer(v))) + } +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + throw("notesleep not on g0") + } + semacreate(gp.m) + if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + throw("notesleep - waitm out of sync") + } + return + } + // Queued. Sleep. + gp.m.blocked = true + if *cgo_yield == nil { + semasleep(-1) + } else { + // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. + const ns = 10e6 + for atomic.Loaduintptr(&n.key) == 0 { + semasleep(ns) + asmcgocall(*cgo_yield, nil) + } + } + gp.m.blocked = false +} + +//go:nosplit +func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { + // gp and deadline are logically local variables, but they are written + // as parameters so that the stack space they require is charged + // to the caller. + // This reduces the nosplit footprint of notetsleep_internal. + gp = getg() + + // Register for wakeup on n->waitm. + if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + throw("notetsleep - waitm out of sync") + } + return true + } + if ns < 0 { + // Queued. Sleep. + gp.m.blocked = true + if *cgo_yield == nil { + semasleep(-1) + } else { + // Sleep in arbitrary-but-moderate intervals to poll libc interceptors. + const ns = 10e6 + for semasleep(ns) < 0 { + asmcgocall(*cgo_yield, nil) + } + } + gp.m.blocked = false + return true + } + + deadline = nanotime() + ns + for { + // Registered. Sleep. + gp.m.blocked = true + if *cgo_yield != nil && ns > 10e6 { + ns = 10e6 + } + if semasleep(ns) >= 0 { + gp.m.blocked = false + // Acquired semaphore, semawakeup unregistered us. + // Done. + return true + } + if *cgo_yield != nil { + asmcgocall(*cgo_yield, nil) + } + gp.m.blocked = false + // Interrupted or timed out. Still registered. Semaphore not acquired. + ns = deadline - nanotime() + if ns <= 0 { + break + } + // Deadline hasn't arrived. Keep sleeping. + } + + // Deadline arrived. Still registered. Semaphore not acquired. + // Want to give up and return, but have to unregister first, + // so that any notewakeup racing with the return does not + // try to grant us the semaphore when we don't expect it. + for { + v := atomic.Loaduintptr(&n.key) + switch v { + case uintptr(unsafe.Pointer(gp.m)): + // No wakeup yet; unregister if possible. + if atomic.Casuintptr(&n.key, v, 0) { + return false + } + case locked: + // Wakeup happened so semaphore is available. + // Grab it to avoid getting out of sync. + gp.m.blocked = true + if semasleep(-1) < 0 { + throw("runtime: unable to acquire - semaphore out of sync") + } + gp.m.blocked = false + return true + default: + throw("runtime: unexpected waitm - semaphore out of sync") + } + } +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 { + throw("notetsleep not on g0") + } + semacreate(gp.m) + return notetsleep_internal(n, ns, nil, 0) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall. +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + throw("notetsleepg on g0") + } + semacreate(gp.m) + entersyscallblock() + ok := notetsleep_internal(n, ns, nil, 0) + exitsyscall() + return ok +} + +func beforeIdle(int64, int64) (*g, bool) { + return nil, false +} + +func checkTimeouts() {} diff --git a/platform/dbops/binaries/go/go/src/runtime/lock_wasip1.go b/platform/dbops/binaries/go/go/src/runtime/lock_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..acfc62acb48e90d58ec7accbeea0cd41b184bfa1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lock_wasip1.go @@ -0,0 +1,111 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package runtime + +// wasm has no support for threads yet. There is no preemption. +// See proposal: https://github.com/WebAssembly/threads +// Waiting for a mutex or timeout is implemented as a busy loop +// while allowing other goroutines to run. + +const ( + mutex_unlocked = 0 + mutex_locked = 1 + + active_spin = 4 + active_spin_cnt = 30 +) + +func mutexContended(l *mutex) bool { + return false +} + +func lock(l *mutex) { + lockWithRank(l, getLockRank(l)) +} + +func lock2(l *mutex) { + if l.key == mutex_locked { + // wasm is single-threaded so we should never + // observe this. + throw("self deadlock") + } + gp := getg() + if gp.m.locks < 0 { + throw("lock count") + } + gp.m.locks++ + l.key = mutex_locked +} + +func unlock(l *mutex) { + unlockWithRank(l) +} + +func unlock2(l *mutex) { + if l.key == mutex_unlocked { + throw("unlock of unlocked lock") + } + gp := getg() + gp.m.locks-- + if gp.m.locks < 0 { + throw("lock count") + } + l.key = mutex_unlocked +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + if n.key != 0 { + print("notewakeup - double wakeup (", n.key, ")\n") + throw("notewakeup - double wakeup") + } + n.key = 1 +} + +func notesleep(n *note) { + throw("notesleep not supported by wasi") +} + +func notetsleep(n *note, ns int64) bool { + throw("notetsleep not supported by wasi") + return false +} + +// same as runtime·notetsleep, but called on user g (not g0) +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + throw("notetsleepg on g0") + } + + deadline := nanotime() + ns + for { + if n.key != 0 { + return true + } + if sched_yield() != 0 { + throw("sched_yield failed") + } + Gosched() + if ns >= 0 && nanotime() >= deadline { + return false + } + } +} + +func beforeIdle(int64, int64) (*g, bool) { + return nil, false +} + +func checkTimeouts() {} + +//go:wasmimport wasi_snapshot_preview1 sched_yield +func sched_yield() errno diff --git a/platform/dbops/binaries/go/go/src/runtime/lockrank.go b/platform/dbops/binaries/go/go/src/runtime/lockrank.go new file mode 100644 index 0000000000000000000000000000000000000000..b27e6c560615aa1fd197b05a0d1a9bf56d175d70 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lockrank.go @@ -0,0 +1,212 @@ +// Code generated by mklockrank.go; DO NOT EDIT. + +package runtime + +type lockRank int + +// Constants representing the ranks of all non-leaf runtime locks, in rank order. +// Locks with lower rank must be taken before locks with higher rank, +// in addition to satisfying the partial order in lockPartialOrder. +// A few ranks allow self-cycles, which are specified in lockPartialOrder. +const ( + lockRankUnknown lockRank = iota + + lockRankSysmon + lockRankScavenge + lockRankForcegc + lockRankDefer + lockRankSweepWaiters + lockRankAssistQueue + lockRankSweep + lockRankTestR + lockRankTestW + lockRankAllocmW + lockRankExecW + lockRankCpuprof + lockRankPollDesc + lockRankWakeableSleep + // SCHED + lockRankAllocmR + lockRankExecR + lockRankSched + lockRankAllg + lockRankAllp + lockRankTimers + lockRankNetpollInit + lockRankHchan + lockRankNotifyList + lockRankSudog + lockRankRoot + lockRankItab + lockRankReflectOffs + lockRankUserArenaState + // TRACEGLOBAL + lockRankTraceBuf + lockRankTraceStrings + // MALLOC + lockRankFin + lockRankSpanSetSpine + lockRankMspanSpecial + // MPROF + lockRankGcBitsArenas + lockRankProfInsert + lockRankProfBlock + lockRankProfMemActive + lockRankProfMemFuture + // STACKGROW + lockRankGscan + lockRankStackpool + lockRankStackLarge + lockRankHchanLeaf + // WB + lockRankWbufSpans + lockRankMheap + lockRankMheapSpecial + lockRankGlobalAlloc + // TRACE + lockRankTrace + lockRankTraceStackTab + lockRankPanic + lockRankDeadlock + lockRankRaceFini + lockRankAllocmRInternal + lockRankExecRInternal + lockRankTestRInternal +) + +// lockRankLeafRank is the rank of lock that does not have a declared rank, +// and hence is a leaf lock. +const lockRankLeafRank lockRank = 1000 + +// lockNames gives the names associated with each of the above ranks. +var lockNames = []string{ + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankTimers: "timers", + lockRankNetpollInit: "netpollInit", + lockRankHchan: "hchan", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", +} + +func (rank lockRank) String() string { + if rank == 0 { + return "UNKNOWN" + } + if rank == lockRankLeafRank { + return "LEAF" + } + if rank < 0 || int(rank) >= len(lockNames) { + return "BAD RANK" + } + return lockNames[rank] +} + +// lockPartialOrder is the transitive closure of the lock rank graph. +// An entry for rank X lists all of the ranks that can already be held +// when rank X is acquired. +// +// Lock ranks that allow self-cycles list themselves. +var lockPartialOrder [][]lockRank = [][]lockRank{ + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, +} diff --git a/platform/dbops/binaries/go/go/src/runtime/lockrank_off.go b/platform/dbops/binaries/go/go/src/runtime/lockrank_off.go new file mode 100644 index 0000000000000000000000000000000000000000..c86726f3dd7d9fe47ad294078e4dc7676d3c9c7f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lockrank_off.go @@ -0,0 +1,68 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.staticlockranking + +package runtime + +const staticLockRanking = false + +// // lockRankStruct is embedded in mutex, but is empty when staticklockranking is +// disabled (the default) +type lockRankStruct struct { +} + +func lockInit(l *mutex, rank lockRank) { +} + +func getLockRank(l *mutex) lockRank { + return 0 +} + +func lockWithRank(l *mutex, rank lockRank) { + lock2(l) +} + +// This function may be called in nosplit context and thus must be nosplit. +// +//go:nosplit +func acquireLockRank(rank lockRank) { +} + +func unlockWithRank(l *mutex) { + unlock2(l) +} + +// This function may be called in nosplit context and thus must be nosplit. +// +//go:nosplit +func releaseLockRank(rank lockRank) { +} + +func lockWithRankMayAcquire(l *mutex, rank lockRank) { +} + +//go:nosplit +func assertLockHeld(l *mutex) { +} + +//go:nosplit +func assertRankHeld(r lockRank) { +} + +//go:nosplit +func worldStopped() { +} + +//go:nosplit +func worldStarted() { +} + +//go:nosplit +func assertWorldStopped() { +} + +//go:nosplit +func assertWorldStoppedOrLockHeld(l *mutex) { +} diff --git a/platform/dbops/binaries/go/go/src/runtime/lockrank_on.go b/platform/dbops/binaries/go/go/src/runtime/lockrank_on.go new file mode 100644 index 0000000000000000000000000000000000000000..b1d99997947ae4443e5542dab471f5514a3ff2a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lockrank_on.go @@ -0,0 +1,391 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.staticlockranking + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +const staticLockRanking = true + +// worldIsStopped is accessed atomically to track world-stops. 1 == world +// stopped. +var worldIsStopped atomic.Uint32 + +// lockRankStruct is embedded in mutex +type lockRankStruct struct { + // static lock ranking of the lock + rank lockRank + // pad field to make sure lockRankStruct is a multiple of 8 bytes, even on + // 32-bit systems. + pad int +} + +// lockInit(l *mutex, rank int) sets the rank of lock before it is used. +// If there is no clear place to initialize a lock, then the rank of a lock can be +// specified during the lock call itself via lockWithRank(l *mutex, rank int). +func lockInit(l *mutex, rank lockRank) { + l.rank = rank +} + +func getLockRank(l *mutex) lockRank { + return l.rank +} + +// lockWithRank is like lock(l), but allows the caller to specify a lock rank +// when acquiring a non-static lock. +// +// Note that we need to be careful about stack splits: +// +// This function is not nosplit, thus it may split at function entry. This may +// introduce a new edge in the lock order, but it is no different from any +// other (nosplit) call before this call (including the call to lock() itself). +// +// However, we switch to the systemstack to record the lock held to ensure that +// we record an accurate lock ordering. e.g., without systemstack, a stack +// split on entry to lock2() would record stack split locks as taken after l, +// even though l is not actually locked yet. +func lockWithRank(l *mutex, rank lockRank) { + if l == &debuglock || l == &paniclk || l == &raceFiniLock { + // debuglock is only used for println/printlock(). Don't do lock + // rank recording for it, since print/println are used when + // printing out a lock ordering problem below. + // + // paniclk is only used for fatal throw/panic. Don't do lock + // ranking recording for it, since we throw after reporting a + // lock ordering problem. Additionally, paniclk may be taken + // after effectively any lock (anywhere we might panic), which + // the partial order doesn't cover. + // + // raceFiniLock is held while exiting when running + // the race detector. Don't do lock rank recording for it, + // since we are exiting. + lock2(l) + return + } + if rank == 0 { + rank = lockRankLeafRank + } + gp := getg() + // Log the new class. + systemstack(func() { + i := gp.m.locksHeldLen + if i >= len(gp.m.locksHeld) { + throw("too many locks held concurrently for rank checking") + } + gp.m.locksHeld[i].rank = rank + gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l)) + gp.m.locksHeldLen++ + + // i is the index of the lock being acquired + if i > 0 { + checkRanks(gp, gp.m.locksHeld[i-1].rank, rank) + } + lock2(l) + }) +} + +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func printHeldLocks(gp *g) { + if gp.m.locksHeldLen == 0 { + println("") + return + } + + for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] { + println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr)) + } +} + +// acquireLockRank acquires a rank which is not associated with a mutex lock +// +// This function may be called in nosplit context and thus must be nosplit. +// +//go:nosplit +func acquireLockRank(rank lockRank) { + gp := getg() + // Log the new class. See comment on lockWithRank. + systemstack(func() { + i := gp.m.locksHeldLen + if i >= len(gp.m.locksHeld) { + throw("too many locks held concurrently for rank checking") + } + gp.m.locksHeld[i].rank = rank + gp.m.locksHeld[i].lockAddr = 0 + gp.m.locksHeldLen++ + + // i is the index of the lock being acquired + if i > 0 { + checkRanks(gp, gp.m.locksHeld[i-1].rank, rank) + } + }) +} + +// checkRanks checks if goroutine g, which has mostly recently acquired a lock +// with rank 'prevRank', can now acquire a lock with rank 'rank'. +// +//go:systemstack +func checkRanks(gp *g, prevRank, rank lockRank) { + rankOK := false + if rank < prevRank { + // If rank < prevRank, then we definitely have a rank error + rankOK = false + } else if rank == lockRankLeafRank { + // If new lock is a leaf lock, then the preceding lock can + // be anything except another leaf lock. + rankOK = prevRank < lockRankLeafRank + } else { + // We've now verified the total lock ranking, but we + // also enforce the partial ordering specified by + // lockPartialOrder as well. Two locks with the same rank + // can only be acquired at the same time if explicitly + // listed in the lockPartialOrder table. + list := lockPartialOrder[rank] + for _, entry := range list { + if entry == prevRank { + rankOK = true + break + } + } + } + if !rankOK { + printlock() + println(gp.m.procid, " ======") + printHeldLocks(gp) + throw("lock ordering problem") + } +} + +// See comment on lockWithRank regarding stack splitting. +func unlockWithRank(l *mutex) { + if l == &debuglock || l == &paniclk || l == &raceFiniLock { + // See comment at beginning of lockWithRank. + unlock2(l) + return + } + gp := getg() + systemstack(func() { + found := false + for i := gp.m.locksHeldLen - 1; i >= 0; i-- { + if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) { + found = true + copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen]) + gp.m.locksHeldLen-- + break + } + } + if !found { + println(gp.m.procid, ":", l.rank.String(), l.rank, l) + throw("unlock without matching lock acquire") + } + unlock2(l) + }) +} + +// releaseLockRank releases a rank which is not associated with a mutex lock +// +// This function may be called in nosplit context and thus must be nosplit. +// +//go:nosplit +func releaseLockRank(rank lockRank) { + gp := getg() + systemstack(func() { + found := false + for i := gp.m.locksHeldLen - 1; i >= 0; i-- { + if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 { + found = true + copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen]) + gp.m.locksHeldLen-- + break + } + } + if !found { + println(gp.m.procid, ":", rank.String(), rank) + throw("lockRank release without matching lockRank acquire") + } + }) +} + +// nosplit because it may be called from nosplit contexts. +// +//go:nosplit +func lockWithRankMayAcquire(l *mutex, rank lockRank) { + gp := getg() + if gp.m.locksHeldLen == 0 { + // No possibility of lock ordering problem if no other locks held + return + } + + systemstack(func() { + i := gp.m.locksHeldLen + if i >= len(gp.m.locksHeld) { + throw("too many locks held concurrently for rank checking") + } + // Temporarily add this lock to the locksHeld list, so + // checkRanks() will print out list, including this lock, if there + // is a lock ordering problem. + gp.m.locksHeld[i].rank = rank + gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l)) + gp.m.locksHeldLen++ + checkRanks(gp, gp.m.locksHeld[i-1].rank, rank) + gp.m.locksHeldLen-- + }) +} + +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func checkLockHeld(gp *g, l *mutex) bool { + for i := gp.m.locksHeldLen - 1; i >= 0; i-- { + if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) { + return true + } + } + return false +} + +// assertLockHeld throws if l is not held by the caller. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func assertLockHeld(l *mutex) { + gp := getg() + + held := checkLockHeld(gp, l) + if held { + return + } + + // Crash from system stack to avoid splits that may cause + // additional issues. + systemstack(func() { + printlock() + print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n") + printHeldLocks(gp) + throw("not holding required lock!") + }) +} + +// assertRankHeld throws if a mutex with rank r is not held by the caller. +// +// This is less precise than assertLockHeld, but can be used in places where a +// pointer to the exact mutex is not available. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func assertRankHeld(r lockRank) { + gp := getg() + + for i := gp.m.locksHeldLen - 1; i >= 0; i-- { + if gp.m.locksHeld[i].rank == r { + return + } + } + + // Crash from system stack to avoid splits that may cause + // additional issues. + systemstack(func() { + printlock() + print("caller requires lock with rank ", r.String(), "), holding:\n") + printHeldLocks(gp) + throw("not holding required lock!") + }) +} + +// worldStopped notes that the world is stopped. +// +// Caller must hold worldsema. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func worldStopped() { + if stopped := worldIsStopped.Add(1); stopped != 1 { + systemstack(func() { + print("world stop count=", stopped, "\n") + throw("recursive world stop") + }) + } +} + +// worldStarted that the world is starting. +// +// Caller must hold worldsema. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func worldStarted() { + if stopped := worldIsStopped.Add(-1); stopped != 0 { + systemstack(func() { + print("world stop count=", stopped, "\n") + throw("released non-stopped world stop") + }) + } +} + +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func checkWorldStopped() bool { + stopped := worldIsStopped.Load() + if stopped > 1 { + systemstack(func() { + print("inconsistent world stop count=", stopped, "\n") + throw("inconsistent world stop count") + }) + } + + return stopped == 1 +} + +// assertWorldStopped throws if the world is not stopped. It does not check +// which M stopped the world. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func assertWorldStopped() { + if checkWorldStopped() { + return + } + + throw("world not stopped") +} + +// assertWorldStoppedOrLockHeld throws if the world is not stopped and the +// passed lock is not held. +// +// nosplit to ensure it can be called in as many contexts as possible. +// +//go:nosplit +func assertWorldStoppedOrLockHeld(l *mutex) { + if checkWorldStopped() { + return + } + + gp := getg() + held := checkLockHeld(gp, l) + if held { + return + } + + // Crash from system stack to avoid splits that may cause + // additional issues. + systemstack(func() { + printlock() + print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n") + println("") + printHeldLocks(gp) + throw("no world stop or required lock!") + }) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/lockrank_test.go b/platform/dbops/binaries/go/go/src/runtime/lockrank_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd99eb4565a2165da2cb96743e585a58339d909e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/lockrank_test.go @@ -0,0 +1,33 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "internal/testenv" + "os" + "os/exec" + "testing" +) + +// Test that the generated code for the lock rank graph is up-to-date. +func TestLockRankGenerated(t *testing.T) { + testenv.MustHaveGoRun(t) + cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "run", "mklockrank.go")) + want, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Fatalf("%v: %v\n%s", cmd, err, ee.Stderr) + } + t.Fatalf("%v: %v", cmd, err) + } + got, err := os.ReadFile("lockrank.go") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(want, got) { + t.Fatalf("lockrank.go is out of date. Please run go generate.") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/malloc.go b/platform/dbops/binaries/go/go/src/runtime/malloc.go new file mode 100644 index 0000000000000000000000000000000000000000..e2cb2e456e86107d1ef5a7e9bbfc1886806714c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/malloc.go @@ -0,0 +1,1698 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Memory allocator. +// +// This was originally based on tcmalloc, but has diverged quite a bit. +// http://goog-perftools.sourceforge.net/doc/tcmalloc.html + +// The main allocator works in runs of pages. +// Small allocation sizes (up to and including 32 kB) are +// rounded to one of about 70 size classes, each of which +// has its own free set of objects of exactly that size. +// Any free page of memory can be split into a set of objects +// of one size class, which are then managed using a free bitmap. +// +// The allocator's data structures are: +// +// fixalloc: a free-list allocator for fixed-size off-heap objects, +// used to manage storage used by the allocator. +// mheap: the malloc heap, managed at page (8192-byte) granularity. +// mspan: a run of in-use pages managed by the mheap. +// mcentral: collects all spans of a given size class. +// mcache: a per-P cache of mspans with free space. +// mstats: allocation statistics. +// +// Allocating a small object proceeds up a hierarchy of caches: +// +// 1. Round the size up to one of the small size classes +// and look in the corresponding mspan in this P's mcache. +// Scan the mspan's free bitmap to find a free slot. +// If there is a free slot, allocate it. +// This can all be done without acquiring a lock. +// +// 2. If the mspan has no free slots, obtain a new mspan +// from the mcentral's list of mspans of the required size +// class that have free space. +// Obtaining a whole span amortizes the cost of locking +// the mcentral. +// +// 3. If the mcentral's mspan list is empty, obtain a run +// of pages from the mheap to use for the mspan. +// +// 4. If the mheap is empty or has no page runs large enough, +// allocate a new group of pages (at least 1MB) from the +// operating system. Allocating a large run of pages +// amortizes the cost of talking to the operating system. +// +// Sweeping an mspan and freeing objects on it proceeds up a similar +// hierarchy: +// +// 1. If the mspan is being swept in response to allocation, it +// is returned to the mcache to satisfy the allocation. +// +// 2. Otherwise, if the mspan still has allocated objects in it, +// it is placed on the mcentral free list for the mspan's size +// class. +// +// 3. Otherwise, if all objects in the mspan are free, the mspan's +// pages are returned to the mheap and the mspan is now dead. +// +// Allocating and freeing a large object uses the mheap +// directly, bypassing the mcache and mcentral. +// +// If mspan.needzero is false, then free object slots in the mspan are +// already zeroed. Otherwise if needzero is true, objects are zeroed as +// they are allocated. There are various benefits to delaying zeroing +// this way: +// +// 1. Stack frame allocation can avoid zeroing altogether. +// +// 2. It exhibits better temporal locality, since the program is +// probably about to write to the memory. +// +// 3. We don't zero pages that never get reused. + +// Virtual memory layout +// +// The heap consists of a set of arenas, which are 64MB on 64-bit and +// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also +// aligned to the arena size. +// +// Each arena has an associated heapArena object that stores the +// metadata for that arena: the heap bitmap for all words in the arena +// and the span map for all pages in the arena. heapArena objects are +// themselves allocated off-heap. +// +// Since arenas are aligned, the address space can be viewed as a +// series of arena frames. The arena map (mheap_.arenas) maps from +// arena frame number to *heapArena, or nil for parts of the address +// space not backed by the Go heap. The arena map is structured as a +// two-level array consisting of a "L1" arena map and many "L2" arena +// maps; however, since arenas are large, on many architectures, the +// arena map consists of a single, large L2 map. +// +// The arena map covers the entire possible address space, allowing +// the Go heap to use any part of the address space. The allocator +// attempts to keep arenas contiguous so that large spans (and hence +// large objects) can cross arenas. + +package runtime + +import ( + "internal/goarch" + "internal/goexperiment" + "internal/goos" + "runtime/internal/atomic" + "runtime/internal/math" + "runtime/internal/sys" + "unsafe" +) + +const ( + maxTinySize = _TinySize + tinySizeClass = _TinySizeClass + maxSmallSize = _MaxSmallSize + + pageShift = _PageShift + pageSize = _PageSize + + _PageSize = 1 << _PageShift + _PageMask = _PageSize - 1 + + // _64bit = 1 on 64-bit systems, 0 on 32-bit systems + _64bit = 1 << (^uintptr(0) >> 63) / 2 + + // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. + _TinySize = 16 + _TinySizeClass = int8(2) + + _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc + + // Per-P, per order stack segment cache size. + _StackCacheSize = 32 * 1024 + + // Number of orders that get caching. Order 0 is FixedStack + // and each successive order is twice as large. + // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks + // will be allocated directly. + // Since FixedStack is different on different systems, we + // must vary NumStackOrders to keep the same maximum cached size. + // OS | FixedStack | NumStackOrders + // -----------------+------------+--------------- + // linux/darwin/bsd | 2KB | 4 + // windows/32 | 4KB | 3 + // windows/64 | 8KB | 2 + // plan9 | 4KB | 3 + _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 + + // heapAddrBits is the number of bits in a heap address. On + // amd64, addresses are sign-extended beyond heapAddrBits. On + // other arches, they are zero-extended. + // + // On most 64-bit platforms, we limit this to 48 bits based on a + // combination of hardware and OS limitations. + // + // amd64 hardware limits addresses to 48 bits, sign-extended + // to 64 bits. Addresses where the top 16 bits are not either + // all 0 or all 1 are "non-canonical" and invalid. Because of + // these "negative" addresses, we offset addresses by 1<<47 + // (arenaBaseOffset) on amd64 before computing indexes into + // the heap arenas index. In 2017, amd64 hardware added + // support for 57 bit addresses; however, currently only Linux + // supports this extension and the kernel will never choose an + // address above 1<<47 unless mmap is called with a hint + // address above 1<<47 (which we never do). + // + // arm64 hardware (as of ARMv8) limits user addresses to 48 + // bits, in the range [0, 1<<48). + // + // ppc64, mips64, and s390x support arbitrary 64 bit addresses + // in hardware. On Linux, Go leans on stricter OS limits. Based + // on Linux's processor.h, the user address space is limited as + // follows on 64-bit architectures: + // + // Architecture Name Maximum Value (exclusive) + // --------------------------------------------------------------------- + // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) + // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) + // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) + // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) + // s390x TASK_SIZE 1<<64 (64 bit addresses) + // + // These limits may increase over time, but are currently at + // most 48 bits except on s390x. On all architectures, Linux + // starts placing mmap'd regions at addresses that are + // significantly below 48 bits, so even if it's possible to + // exceed Go's 48 bit limit, it's extremely unlikely in + // practice. + // + // On 32-bit platforms, we accept the full 32-bit address + // space because doing so is cheap. + // mips32 only has access to the low 2GB of virtual memory, so + // we further limit it to 31 bits. + // + // On ios/arm64, although 64-bit pointers are presumably + // available, pointers are truncated to 33 bits in iOS <14. + // Furthermore, only the top 4 GiB of the address space are + // actually available to the application. In iOS >=14, more + // of the address space is available, and the OS can now + // provide addresses outside of those 33 bits. Pick 40 bits + // as a reasonable balance between address space usage by the + // page allocator, and flexibility for what mmap'd regions + // we'll accept for the heap. We can't just move to the full + // 48 bits because this uses too much address space for older + // iOS versions. + // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 + // to a 48-bit address space like every other arm64 platform. + // + // WebAssembly currently has a limit of 4GB linear memory. + heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 + + // maxAlloc is the maximum size of an allocation. On 64-bit, + // it's theoretically possible to allocate 1< maxPhysPageSize { + print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") + throw("bad system page size") + } + if physPageSize < minPhysPageSize { + print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") + throw("bad system page size") + } + if physPageSize&(physPageSize-1) != 0 { + print("system page size (", physPageSize, ") must be a power of 2\n") + throw("bad system page size") + } + if physHugePageSize&(physHugePageSize-1) != 0 { + print("system huge page size (", physHugePageSize, ") must be a power of 2\n") + throw("bad system huge page size") + } + if physHugePageSize > maxPhysHugePageSize { + // physHugePageSize is greater than the maximum supported huge page size. + // Don't throw here, like in the other cases, since a system configured + // in this way isn't wrong, we just don't have the code to support them. + // Instead, silently set the huge page size to zero. + physHugePageSize = 0 + } + if physHugePageSize != 0 { + // Since physHugePageSize is a power of 2, it suffices to increase + // physHugePageShift until 1< 8*goarch.PtrSize { + throw("max pointer/scan bitmap size for headerless objects is too large") + } + } + + if minTagBits > taggedPointerBits { + throw("taggedPointerbits too small") + } + + // Initialize the heap. + mheap_.init() + mcache0 = allocmcache() + lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) + lockInit(&profInsertLock, lockRankProfInsert) + lockInit(&profBlockLock, lockRankProfBlock) + lockInit(&profMemActiveLock, lockRankProfMemActive) + for i := range profMemFutureLock { + lockInit(&profMemFutureLock[i], lockRankProfMemFuture) + } + lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) + + // Create initial arena growth hints. + if goarch.PtrSize == 8 { + // On a 64-bit machine, we pick the following hints + // because: + // + // 1. Starting from the middle of the address space + // makes it easier to grow out a contiguous range + // without running in to some other mapping. + // + // 2. This makes Go heap addresses more easily + // recognizable when debugging. + // + // 3. Stack scanning in gccgo is still conservative, + // so it's important that addresses be distinguishable + // from other data. + // + // Starting at 0x00c0 means that the valid memory addresses + // will begin 0x00c0, 0x00c1, ... + // In little-endian, that's c0 00, c1 00, ... None of those are valid + // UTF-8 sequences, and they are otherwise as far away from + // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 + // addresses. An earlier attempt to use 0x11f8 caused out of memory errors + // on OS X during thread allocations. 0x00c0 causes conflicts with + // AddressSanitizer which reserves all memory up to 0x0100. + // These choices reduce the odds of a conservative garbage collector + // not collecting memory because some non-pointer block of memory + // had a bit pattern that matched a memory address. + // + // However, on arm64, we ignore all this advice above and slam the + // allocation at 0x40 << 32 because when using 4k pages with 3-level + // translation buffers, the user address space is limited to 39 bits + // On ios/arm64, the address space is even smaller. + // + // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. + // processes. + // + // Space mapped for user arenas comes immediately after the range + // originally reserved for the regular heap when race mode is not + // enabled because user arena chunks can never be used for regular heap + // allocations and we want to avoid fragmenting the address space. + // + // In race mode we have no choice but to just use the same hints because + // the race detector requires that the heap be mapped contiguously. + for i := 0x7f; i >= 0; i-- { + var p uintptr + switch { + case raceenabled: + // The TSAN runtime requires the heap + // to be in the range [0x00c000000000, + // 0x00e000000000). + p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) + if p >= uintptrMask&0x00e000000000 { + continue + } + case GOARCH == "arm64" && GOOS == "ios": + p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) + case GOARCH == "arm64": + p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) + case GOOS == "aix": + if i == 0 { + // We don't use addresses directly after 0x0A00000000000000 + // to avoid collisions with others mmaps done by non-go programs. + continue + } + p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) + default: + p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) + } + // Switch to generating hints for user arenas if we've gone + // through about half the hints. In race mode, take only about + // a quarter; we don't have very much space to work with. + hintList := &mheap_.arenaHints + if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { + hintList = &mheap_.userArena.arenaHints + } + hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) + hint.addr = p + hint.next, *hintList = *hintList, hint + } + } else { + // On a 32-bit machine, we're much more concerned + // about keeping the usable heap contiguous. + // Hence: + // + // 1. We reserve space for all heapArenas up front so + // they don't get interleaved with the heap. They're + // ~258MB, so this isn't too bad. (We could reserve a + // smaller amount of space up front if this is a + // problem.) + // + // 2. We hint the heap to start right above the end of + // the binary so we have the best chance of keeping it + // contiguous. + // + // 3. We try to stake out a reasonably large initial + // heap reservation. + + const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) + meta := uintptr(sysReserve(nil, arenaMetaSize)) + if meta != 0 { + mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) + } + + // We want to start the arena low, but if we're linked + // against C code, it's possible global constructors + // have called malloc and adjusted the process' brk. + // Query the brk so we can avoid trying to map the + // region over it (which will cause the kernel to put + // the region somewhere else, likely at a high + // address). + procBrk := sbrk0() + + // If we ask for the end of the data segment but the + // operating system requires a little more space + // before we can start allocating, it will give out a + // slightly higher pointer. Except QEMU, which is + // buggy, as usual: it won't adjust the pointer + // upward. So adjust it upward a little bit ourselves: + // 1/4 MB to get away from the running binary image. + p := firstmoduledata.end + if p < procBrk { + p = procBrk + } + if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { + p = mheap_.heapArenaAlloc.end + } + p = alignUp(p+(256<<10), heapArenaBytes) + // Because we're worried about fragmentation on + // 32-bit, we try to make a large initial reservation. + arenaSizes := []uintptr{ + 512 << 20, + 256 << 20, + 128 << 20, + } + for _, arenaSize := range arenaSizes { + a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) + if a != nil { + mheap_.arena.init(uintptr(a), size, false) + p = mheap_.arena.end // For hint below + break + } + } + hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) + hint.addr = p + hint.next, mheap_.arenaHints = mheap_.arenaHints, hint + + // Place the hint for user arenas just after the large reservation. + // + // While this potentially competes with the hint above, in practice we probably + // aren't going to be getting this far anyway on 32-bit platforms. + userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) + userArenaHint.addr = p + userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint + } + // Initialize the memory limit here because the allocator is going to look at it + // but we haven't called gcinit yet and we're definitely going to allocate memory before then. + gcController.memoryLimit.Store(maxInt64) +} + +// sysAlloc allocates heap arena space for at least n bytes. The +// returned pointer is always heapArenaBytes-aligned and backed by +// h.arenas metadata. The returned size is always a multiple of +// heapArenaBytes. sysAlloc returns nil on failure. +// There is no corresponding free function. +// +// hintList is a list of hint addresses for where to allocate new +// heap arenas. It must be non-nil. +// +// register indicates whether the heap arena should be registered +// in allArenas. +// +// sysAlloc returns a memory region in the Reserved state. This region must +// be transitioned to Prepared and then Ready before use. +// +// h must be locked. +func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) { + assertLockHeld(&h.lock) + + n = alignUp(n, heapArenaBytes) + + if hintList == &h.arenaHints { + // First, try the arena pre-reservation. + // Newly-used mappings are considered released. + // + // Only do this if we're using the regular heap arena hints. + // This behavior is only for the heap. + v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) + if v != nil { + size = n + goto mapped + } + } + + // Try to grow the heap at a hint address. + for *hintList != nil { + hint := *hintList + p := hint.addr + if hint.down { + p -= n + } + if p+n < p { + // We can't use this, so don't ask. + v = nil + } else if arenaIndex(p+n-1) >= 1<= 1<= 1< 0 { + sysFreeOS(unsafe.Pointer(end), endLen) + } + return unsafe.Pointer(pAligned), size + } +} + +// enableMetadataHugePages enables huge pages for various sources of heap metadata. +// +// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant +// time, but may take time proportional to the size of the mapped heap beyond that. +// +// This function is idempotent. +// +// The heap lock must not be held over this operation, since it will briefly acquire +// the heap lock. +// +// Must be called on the system stack because it acquires the heap lock. +// +//go:systemstack +func (h *mheap) enableMetadataHugePages() { + // Enable huge pages for page structure. + h.pages.enableChunkHugePages() + + // Grab the lock and set arenasHugePages if it's not. + // + // Once arenasHugePages is set, all new L2 entries will be eligible for + // huge pages. We'll set all the old entries after we release the lock. + lock(&h.lock) + if h.arenasHugePages { + unlock(&h.lock) + return + } + h.arenasHugePages = true + unlock(&h.lock) + + // N.B. The arenas L1 map is quite small on all platforms, so it's fine to + // just iterate over the whole thing. + for i := range h.arenas { + l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) + if l2 == nil { + continue + } + sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) + } +} + +// base address for all 0-byte allocations +var zerobase uintptr + +// nextFreeFast returns the next free object if one is quickly available. +// Otherwise it returns 0. +func nextFreeFast(s *mspan) gclinkptr { + theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? + if theBit < 64 { + result := s.freeindex + uint16(theBit) + if result < s.nelems { + freeidx := result + 1 + if freeidx%64 == 0 && freeidx != s.nelems { + return 0 + } + s.allocCache >>= uint(theBit + 1) + s.freeindex = freeidx + s.allocCount++ + return gclinkptr(uintptr(result)*s.elemsize + s.base()) + } + } + return 0 +} + +// nextFree returns the next free object from the cached span if one is available. +// Otherwise it refills the cache with a span with an available object and +// returns that object along with a flag indicating that this was a heavy +// weight allocation. If it is a heavy weight allocation the caller must +// determine whether a new GC cycle needs to be started or if the GC is active +// whether this goroutine needs to assist the GC. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. +func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { + s = c.alloc[spc] + shouldhelpgc = false + freeIndex := s.nextFreeIndex() + if freeIndex == s.nelems { + // The span is full. + if s.allocCount != s.nelems { + println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) + throw("s.allocCount != s.nelems && freeIndex == s.nelems") + } + c.refill(spc) + shouldhelpgc = true + s = c.alloc[spc] + + freeIndex = s.nextFreeIndex() + } + + if freeIndex >= s.nelems { + throw("freeIndex is not valid") + } + + v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) + s.allocCount++ + if s.allocCount > s.nelems { + println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) + throw("s.allocCount > s.nelems") + } + return +} + +// Allocate an object of size bytes. +// Small objects are allocated from the per-P cache's free lists. +// Large objects (> 32 kB) are allocated straight from the heap. +func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + + if size == 0 { + return unsafe.Pointer(&zerobase) + } + + // It's possible for any malloc to trigger sweeping, which may in + // turn queue finalizers. Record this dynamic lock edge. + lockRankMayQueueFinalizer() + + userSize := size + if asanenabled { + // Refer to ASAN runtime library, the malloc() function allocates extra memory, + // the redzone, around the user requested memory region. And the redzones are marked + // as unaddressable. We perform the same operations in Go to detect the overflows or + // underflows. + size += computeRZlog(size) + } + + if debug.malloc { + if debug.sbrk != 0 { + align := uintptr(16) + if typ != nil { + // TODO(austin): This should be just + // align = uintptr(typ.align) + // but that's only 4 on 32-bit platforms, + // even if there's a uint64 field in typ (see #599). + // This causes 64-bit atomic accesses to panic. + // Hence, we use stricter alignment that matches + // the normal allocator better. + if size&7 == 0 { + align = 8 + } else if size&3 == 0 { + align = 4 + } else if size&1 == 0 { + align = 2 + } else { + align = 1 + } + } + return persistentalloc(size, align, &memstats.other_sys) + } + + if inittrace.active && inittrace.id == getg().goid { + // Init functions are executed sequentially in a single goroutine. + inittrace.allocs += 1 + } + } + + // assistG is the G to charge for this allocation, or nil if + // GC is not currently active. + assistG := deductAssistCredit(size) + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + mp.mallocing = 1 + + shouldhelpgc := false + dataSize := userSize + c := getMCache(mp) + if c == nil { + throw("mallocgc called without a P or outside bootstrapping") + } + var span *mspan + var header **_type + var x unsafe.Pointer + noscan := typ == nil || typ.PtrBytes == 0 + // In some cases block zeroing can profitably (for latency reduction purposes) + // be delayed till preemption is possible; delayedZeroing tracks that state. + delayedZeroing := false + // Determine if it's a 'small' object that goes into a size-classed span. + // + // Note: This comparison looks a little strange, but it exists to smooth out + // the crossover between the largest size class and large objects that have + // their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize + // and maxSmallSize will be considered large, even though they might fit in + // a size class. In practice this is completely fine, since the largest small + // size class has a single object in it already, precisely to make the transition + // to large objects smooth. + if size <= maxSmallSize-mallocHeaderSize { + if noscan && size < maxTinySize { + // Tiny allocator. + // + // Tiny allocator combines several tiny allocation requests + // into a single memory block. The resulting memory block + // is freed when all subobjects are unreachable. The subobjects + // must be noscan (don't have pointers), this ensures that + // the amount of potentially wasted memory is bounded. + // + // Size of the memory block used for combining (maxTinySize) is tunable. + // Current setting is 16 bytes, which relates to 2x worst case memory + // wastage (when all but one subobjects are unreachable). + // 8 bytes would result in no wastage at all, but provides less + // opportunities for combining. + // 32 bytes provides more opportunities for combining, + // but can lead to 4x worst case wastage. + // The best case winning is 8x regardless of block size. + // + // Objects obtained from tiny allocator must not be freed explicitly. + // So when an object will be freed explicitly, we ensure that + // its size >= maxTinySize. + // + // SetFinalizer has a special case for objects potentially coming + // from tiny allocator, it such case it allows to set finalizers + // for an inner byte of a memory block. + // + // The main targets of tiny allocator are small strings and + // standalone escaping variables. On a json benchmark + // the allocator reduces number of allocations by ~12% and + // reduces heap size by ~20%. + off := c.tinyoffset + // Align tiny pointer for required (conservative) alignment. + if size&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && size == 12 { + // Conservatively align 12-byte objects to 8 bytes on 32-bit + // systems so that objects whose first field is a 64-bit + // value is aligned to 8 bytes and does not cause a fault on + // atomic access. See issue 37262. + // TODO(mknyszek): Remove this workaround if/when issue 36606 + // is resolved. + off = alignUp(off, 8) + } else if size&3 == 0 { + off = alignUp(off, 4) + } else if size&1 == 0 { + off = alignUp(off, 2) + } + if off+size <= maxTinySize && c.tiny != 0 { + // The object fits into existing tiny block. + x = unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + size + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + return x + } + // Allocate a new maxTinySize block. + span = c.alloc[tinySpanClass] + v := nextFreeFast(span) + if v == 0 { + v, span, shouldhelpgc = c.nextFree(tinySpanClass) + } + x = unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + // See if we need to replace the existing tiny block with the new one + // based on amount of remaining free space. + if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { + // Note: disabled when race detector is on, see comment near end of this function. + c.tiny = uintptr(x) + c.tinyoffset = size + } + size = maxTinySize + } else { + hasHeader := !noscan && !heapBitsInSpan(size) + if goexperiment.AllocHeaders && hasHeader { + size += mallocHeaderSize + } + var sizeclass uint8 + if size <= smallSizeMax-8 { + sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] + } else { + sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] + } + size = uintptr(class_to_size[sizeclass]) + spc := makeSpanClass(sizeclass, noscan) + span = c.alloc[spc] + v := nextFreeFast(span) + if v == 0 { + v, span, shouldhelpgc = c.nextFree(spc) + } + x = unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, size) + } + if goexperiment.AllocHeaders && hasHeader { + header = (**_type)(x) + x = add(x, mallocHeaderSize) + size -= mallocHeaderSize + } + } + } else { + shouldhelpgc = true + // For large allocations, keep track of zeroed state so that + // bulk zeroing can be happen later in a preemptible context. + span = c.allocLarge(size, noscan) + span.freeindex = 1 + span.allocCount = 1 + size = span.elemsize + x = unsafe.Pointer(span.base()) + if needzero && span.needzero != 0 { + if noscan { + delayedZeroing = true + } else { + memclrNoHeapPointers(x, size) + } + } + if goexperiment.AllocHeaders && !noscan { + header = &span.largeType + } + } + if !noscan { + if goexperiment.AllocHeaders { + c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span) + } else { + var scanSize uintptr + heapBitsSetType(uintptr(x), size, dataSize, typ) + if dataSize > typ.Size_ { + // Array allocation. If there are any + // pointers, GC has to scan to the last + // element. + if typ.PtrBytes != 0 { + scanSize = dataSize - typ.Size_ + typ.PtrBytes + } + } else { + scanSize = typ.PtrBytes + } + c.scanAlloc += scanSize + } + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + // As x and the heap bits are initialized, update + // freeIndexForScan now so x is seen by the GC + // (including conservative scan) as an allocated object. + // While this pointer can't escape into user code as a + // _live_ pointer until we return, conservative scanning + // may find a dead pointer that happens to point into this + // object. Delaying this update until now ensures that + // conservative scanning considers this pointer dead until + // this point. + span.freeIndexForScan = span.freeindex + + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + if gcphase != _GCoff { + gcmarknewobject(span, uintptr(x)) + } + + if raceenabled { + racemalloc(x, size) + } + + if msanenabled { + msanmalloc(x, size) + } + + if asanenabled { + // We should only read/write the memory with the size asked by the user. + // The rest of the allocated memory should be poisoned, so that we can report + // errors when accessing poisoned memory. + // The allocated memory is larger than required userSize, it will also include + // redzone and some other padding bytes. + rzBeg := unsafe.Add(x, userSize) + asanpoison(rzBeg, size-userSize) + asanunpoison(x, userSize) + } + + // If !goexperiment.AllocHeaders, "size" doesn't include the + // allocation header, so use span.elemsize as the "full" size + // for various computations below. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something, but it's risky to change the + // accounting so much right now. Just pretend its internal + // fragmentation and match the GC's accounting by using the + // whole allocation slot. + fullSize := size + if goexperiment.AllocHeaders { + fullSize = span.elemsize + } + if rate := MemProfileRate; rate > 0 { + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + if rate != 1 && fullSize < c.nextSample { + c.nextSample -= fullSize + } else { + profilealloc(mp, x, fullSize) + } + } + mp.mallocing = 0 + releasem(mp) + + // Pointerfree data can be zeroed late in a context where preemption can occur. + // x will keep the memory alive. + if delayedZeroing { + if !noscan { + throw("delayed zeroing on data that may contain pointers") + } + if goexperiment.AllocHeaders && header != nil { + throw("unexpected malloc header in delayed zeroing of large object") + } + // N.B. size == fullSize always in this case. + memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 + } + + if debug.malloc { + if debug.allocfreetrace != 0 { + tracealloc(x, size, typ) + } + + if inittrace.active && inittrace.id == getg().goid { + // Init functions are executed sequentially in a single goroutine. + inittrace.bytes += uint64(fullSize) + } + } + + if assistG != nil { + // Account for internal fragmentation in the assist + // debt now that we know it. + // + // N.B. Use the full size because that's how the rest + // of the GC accounts for bytes marked. + assistG.gcAssistBytes -= int64(fullSize - dataSize) + } + + if shouldhelpgc { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled && noscan && dataSize < maxTinySize { + // Pad tinysize allocations so they are aligned with the end + // of the tinyalloc region. This ensures that any arithmetic + // that goes off the top end of the object will be detectable + // by checkptr (issue 38872). + // Note that we disable tinyalloc when raceenabled for this to work. + // TODO: This padding is only performed when the race detector + // is enabled. It would be nice to enable it if any package + // was compiled with checkptr, but there's no easy way to + // detect that (especially at compile time). + // TODO: enable this padding for all allocations, not just + // tinyalloc ones. It's tricky because of pointer maps. + // Maybe just all noscan objects? + x = add(x, size-dataSize) + } + + return x +} + +// deductAssistCredit reduces the current G's assist credit +// by size bytes, and assists the GC if necessary. +// +// Caller must be preemptible. +// +// Returns the G for which the assist credit was accounted. +func deductAssistCredit(size uintptr) *g { + var assistG *g + if gcBlackenEnabled != 0 { + // Charge the current user G for this allocation. + assistG = getg() + if assistG.m.curg != nil { + assistG = assistG.m.curg + } + // Charge the allocation against the G. We'll account + // for internal fragmentation at the end of mallocgc. + assistG.gcAssistBytes -= int64(size) + + if assistG.gcAssistBytes < 0 { + // This G is in debt. Assist the GC to correct + // this before allocating. This must happen + // before disabling preemption. + gcAssistAlloc(assistG) + } + } + return assistG +} + +// memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers +// on chunks of the buffer to be zeroed, with opportunities for preemption +// along the way. memclrNoHeapPointers contains no safepoints and also +// cannot be preemptively scheduled, so this provides a still-efficient +// block copy that can also be preempted on a reasonable granularity. +// +// Use this with care; if the data being cleared is tagged to contain +// pointers, this allows the GC to run before it is all cleared. +func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { + v := uintptr(x) + // got this from benchmarking. 128k is too small, 512k is too large. + const chunkBytes = 256 * 1024 + vsize := v + size + for voff := v; voff < vsize; voff = voff + chunkBytes { + if getg().preempt { + // may hold locks, e.g., profiling + goschedguarded() + } + // clear min(avail, lump) bytes + n := vsize - voff + if n > chunkBytes { + n = chunkBytes + } + memclrNoHeapPointers(unsafe.Pointer(voff), n) + } +} + +// implementation of new builtin +// compiler (both frontend and SSA backend) knows the signature +// of this function. +func newobject(typ *_type) unsafe.Pointer { + return mallocgc(typ.Size_, typ, true) +} + +//go:linkname reflect_unsafe_New reflect.unsafe_New +func reflect_unsafe_New(typ *_type) unsafe.Pointer { + return mallocgc(typ.Size_, typ, true) +} + +//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New +func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { + return mallocgc(typ.Size_, typ, true) +} + +// newarray allocates an array of n elements of type typ. +func newarray(typ *_type, n int) unsafe.Pointer { + if n == 1 { + return mallocgc(typ.Size_, typ, true) + } + mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) + if overflow || mem > maxAlloc || n < 0 { + panic(plainError("runtime: allocation size out of range")) + } + return mallocgc(mem, typ, true) +} + +//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray +func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { + return newarray(typ, n) +} + +func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { + c := getMCache(mp) + if c == nil { + throw("profilealloc called without a P or outside bootstrapping") + } + c.nextSample = nextSample() + mProf_Malloc(x, size) +} + +// nextSample returns the next sampling point for heap profiling. The goal is +// to sample allocations on average every MemProfileRate bytes, but with a +// completely random distribution over the allocation timeline; this +// corresponds to a Poisson process with parameter MemProfileRate. In Poisson +// processes, the distance between two samples follows the exponential +// distribution (exp(MemProfileRate)), so the best return value is a random +// number taken from an exponential distribution whose mean is MemProfileRate. +func nextSample() uintptr { + if MemProfileRate == 1 { + // Callers assign our return value to + // mcache.next_sample, but next_sample is not used + // when the rate is 1. So avoid the math below and + // just return something. + return 0 + } + if GOOS == "plan9" { + // Plan 9 doesn't support floating point in note handler. + if gp := getg(); gp == gp.m.gsignal { + return nextSampleNoFP() + } + } + + return uintptr(fastexprand(MemProfileRate)) +} + +// fastexprand returns a random number from an exponential distribution with +// the specified mean. +func fastexprand(mean int) int32 { + // Avoid overflow. Maximum possible step is + // -ln(1/(1< 0x7000000: + mean = 0x7000000 + case mean == 0: + return 0 + } + + // Take a random sample of the exponential distribution exp(-mean*x). + // The probability distribution function is mean*exp(-mean*x), so the CDF is + // p = 1 - exp(-mean*x), so + // q = 1 - p == exp(-mean*x) + // log_e(q) = -mean*x + // -log_e(q)/mean = x + // x = -log_e(q) * mean + // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency + const randomBitCount = 26 + q := cheaprandn(1< 0 { + qlog = 0 + } + const minusLog2 = -0.6931471805599453 // -ln(2) + return int32(qlog*(minusLog2*float64(mean))) + 1 +} + +// nextSampleNoFP is similar to nextSample, but uses older, +// simpler code to avoid floating point. +func nextSampleNoFP() uintptr { + // Set first allocation sample size. + rate := MemProfileRate + if rate > 0x3fffffff { // make 2*rate not overflow + rate = 0x3fffffff + } + if rate != 0 { + return uintptr(cheaprandn(uint32(2 * rate))) + } + return 0 +} + +type persistentAlloc struct { + base *notInHeap + off uintptr +} + +var globalAlloc struct { + mutex + persistentAlloc +} + +// persistentChunkSize is the number of bytes we allocate when we grow +// a persistentAlloc. +const persistentChunkSize = 256 << 10 + +// persistentChunks is a list of all the persistent chunks we have +// allocated. The list is maintained through the first word in the +// persistent chunk. This is updated atomically. +var persistentChunks *notInHeap + +// Wrapper around sysAlloc that can allocate small chunks. +// There is no associated free operation. +// Intended for things like function/type/debug-related persistent data. +// If align is 0, uses default align (currently 8). +// The returned memory will be zeroed. +// sysStat must be non-nil. +// +// Consider marking persistentalloc'd types not in heap by embedding +// runtime/internal/sys.NotInHeap. +func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { + var p *notInHeap + systemstack(func() { + p = persistentalloc1(size, align, sysStat) + }) + return unsafe.Pointer(p) +} + +// Must run on system stack because stack growth can (re)invoke it. +// See issue 9174. +// +//go:systemstack +func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { + const ( + maxBlock = 64 << 10 // VM reservation granularity is 64K on windows + ) + + if size == 0 { + throw("persistentalloc: size == 0") + } + if align != 0 { + if align&(align-1) != 0 { + throw("persistentalloc: align is not a power of 2") + } + if align > _PageSize { + throw("persistentalloc: align is too large") + } + } else { + align = 8 + } + + if size >= maxBlock { + return (*notInHeap)(sysAlloc(size, sysStat)) + } + + mp := acquirem() + var persistent *persistentAlloc + if mp != nil && mp.p != 0 { + persistent = &mp.p.ptr().palloc + } else { + lock(&globalAlloc.mutex) + persistent = &globalAlloc.persistentAlloc + } + persistent.off = alignUp(persistent.off, align) + if persistent.off+size > persistentChunkSize || persistent.base == nil { + persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) + if persistent.base == nil { + if persistent == &globalAlloc.persistentAlloc { + unlock(&globalAlloc.mutex) + } + throw("runtime: cannot allocate memory") + } + + // Add the new chunk to the persistentChunks list. + for { + chunks := uintptr(unsafe.Pointer(persistentChunks)) + *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks + if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { + break + } + } + persistent.off = alignUp(goarch.PtrSize, align) + } + p := persistent.base.add(persistent.off) + persistent.off += size + releasem(mp) + if persistent == &globalAlloc.persistentAlloc { + unlock(&globalAlloc.mutex) + } + + if sysStat != &memstats.other_sys { + sysStat.add(int64(size)) + memstats.other_sys.add(-int64(size)) + } + return p +} + +// inPersistentAlloc reports whether p points to memory allocated by +// persistentalloc. This must be nosplit because it is called by the +// cgo checker code, which is called by the write barrier code. +// +//go:nosplit +func inPersistentAlloc(p uintptr) bool { + chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) + for chunk != 0 { + if p >= chunk && p < chunk+persistentChunkSize { + return true + } + chunk = *(*uintptr)(unsafe.Pointer(chunk)) + } + return false +} + +// linearAlloc is a simple linear allocator that pre-reserves a region +// of memory and then optionally maps that region into the Ready state +// as needed. +// +// The caller is responsible for locking. +type linearAlloc struct { + next uintptr // next free byte + mapped uintptr // one byte past end of mapped space + end uintptr // end of reserved space + + mapMemory bool // transition memory from Reserved to Ready if true +} + +func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { + if base+size < base { + // Chop off the last byte. The runtime isn't prepared + // to deal with situations where the bounds could overflow. + // Leave that memory reserved, though, so we don't map it + // later. + size -= 1 + } + l.next, l.mapped = base, base + l.end = base + size + l.mapMemory = mapMemory +} + +func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { + p := alignUp(l.next, align) + if p+size > l.end { + return nil + } + l.next = p + size + if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { + if l.mapMemory { + // Transition from Reserved to Prepared to Ready. + n := pEnd - l.mapped + sysMap(unsafe.Pointer(l.mapped), n, sysStat) + sysUsed(unsafe.Pointer(l.mapped), n, n) + } + l.mapped = pEnd + } + return unsafe.Pointer(p) +} + +// notInHeap is off-heap memory allocated by a lower-level allocator +// like sysAlloc or persistentAlloc. +// +// In general, it's better to use real types which embed +// runtime/internal/sys.NotInHeap, but this serves as a generic type +// for situations where that isn't possible (like in the allocators). +// +// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? +type notInHeap struct{ _ sys.NotInHeap } + +func (p *notInHeap) add(bytes uintptr) *notInHeap { + return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) +} + +// computeRZlog computes the size of the redzone. +// Refer to the implementation of the compiler-rt. +func computeRZlog(userSize uintptr) uintptr { + switch { + case userSize <= (64 - 16): + return 16 << 0 + case userSize <= (128 - 32): + return 16 << 1 + case userSize <= (512 - 64): + return 16 << 2 + case userSize <= (4096 - 128): + return 16 << 3 + case userSize <= (1<<14)-256: + return 16 << 4 + case userSize <= (1<<15)-512: + return 16 << 5 + case userSize <= (1<<16)-1024: + return 16 << 6 + default: + return 16 << 7 + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/malloc_test.go b/platform/dbops/binaries/go/go/src/runtime/malloc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8c162fbea4b158a2f3f69cc6b59b74f0a5d2a1b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/malloc_test.go @@ -0,0 +1,449 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "flag" + "fmt" + "internal/race" + "internal/testenv" + "os" + "os/exec" + "reflect" + "runtime" + . "runtime" + "strings" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +var testMemStatsCount int + +func TestMemStats(t *testing.T) { + testMemStatsCount++ + + // Make sure there's at least one forced GC. + GC() + + // Test that MemStats has sane values. + st := new(MemStats) + ReadMemStats(st) + + nz := func(x any) error { + if x != reflect.Zero(reflect.TypeOf(x)).Interface() { + return nil + } + return fmt.Errorf("zero value") + } + le := func(thresh float64) func(any) error { + return func(x any) error { + // These sanity tests aren't necessarily valid + // with high -test.count values, so only run + // them once. + if testMemStatsCount > 1 { + return nil + } + + if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh { + return nil + } + return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh) + } + } + eq := func(x any) func(any) error { + return func(y any) error { + if x == y { + return nil + } + return fmt.Errorf("want %v", x) + } + } + // Of the uint fields, HeapReleased, HeapIdle can be 0. + // PauseTotalNs can be 0 if timer resolution is poor. + fields := map[string][]func(any) error{ + "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)}, + "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)}, + "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)}, + "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)}, + "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)}, + "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)}, + "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)}, + "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)}, + "NextGC": {nz, le(1e10)}, "LastGC": {nz}, + "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil, + "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)}, + "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)}, + "BySize": nil, + } + + rst := reflect.ValueOf(st).Elem() + for i := 0; i < rst.Type().NumField(); i++ { + name, val := rst.Type().Field(i).Name, rst.Field(i).Interface() + checks, ok := fields[name] + if !ok { + t.Errorf("unknown MemStats field %s", name) + continue + } + for _, check := range checks { + if err := check(val); err != nil { + t.Errorf("%s = %v: %s", name, val, err) + } + } + } + + if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+ + st.BuckHashSys+st.GCSys+st.OtherSys { + t.Fatalf("Bad sys value: %+v", *st) + } + + if st.HeapIdle+st.HeapInuse != st.HeapSys { + t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys) + } + + if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe { + t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe) + } + + var pauseTotal uint64 + for _, pause := range st.PauseNs { + pauseTotal += pause + } + if int(st.NumGC) < len(st.PauseNs) { + // We have all pauses, so this should be exact. + if st.PauseTotalNs != pauseTotal { + t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal) + } + for i := int(st.NumGC); i < len(st.PauseNs); i++ { + if st.PauseNs[i] != 0 { + t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st) + } + if st.PauseEnd[i] != 0 { + t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st) + } + } + } else { + if st.PauseTotalNs < pauseTotal { + t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal) + } + } + + if st.NumForcedGC > st.NumGC { + t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC) + } +} + +func TestStringConcatenationAllocs(t *testing.T) { + n := testing.AllocsPerRun(1e3, func() { + b := make([]byte, 10) + for i := 0; i < 10; i++ { + b[i] = byte(i) + '0' + } + s := "foo" + string(b) + if want := "foo0123456789"; s != want { + t.Fatalf("want %v, got %v", want, s) + } + }) + // Only string concatenation allocates. + if n != 1 { + t.Fatalf("want 1 allocation, got %v", n) + } +} + +func TestTinyAlloc(t *testing.T) { + if runtime.Raceenabled { + t.Skip("tinyalloc suppressed when running in race mode") + } + const N = 16 + var v [N]unsafe.Pointer + for i := range v { + v[i] = unsafe.Pointer(new(byte)) + } + + chunks := make(map[uintptr]bool, N) + for _, p := range v { + chunks[uintptr(p)&^7] = true + } + + if len(chunks) == N { + t.Fatal("no bytes allocated within the same 8-byte chunk") + } +} + +type obj12 struct { + a uint64 + b uint32 +} + +func TestTinyAllocIssue37262(t *testing.T) { + if runtime.Raceenabled { + t.Skip("tinyalloc suppressed when running in race mode") + } + // Try to cause an alignment access fault + // by atomically accessing the first 64-bit + // value of a tiny-allocated object. + // See issue 37262 for details. + + // GC twice, once to reach a stable heap state + // and again to make sure we finish the sweep phase. + runtime.GC() + runtime.GC() + + // Disable preemption so we stay on one P's tiny allocator and + // nothing else allocates from it. + runtime.Acquirem() + + // Make 1-byte allocations until we get a fresh tiny slot. + aligned := false + for i := 0; i < 16; i++ { + x := runtime.Escape(new(byte)) + if uintptr(unsafe.Pointer(x))&0xf == 0xf { + aligned = true + break + } + } + if !aligned { + runtime.Releasem() + t.Fatal("unable to get a fresh tiny slot") + } + + // Create a 4-byte object so that the current + // tiny slot is partially filled. + runtime.Escape(new(uint32)) + + // Create a 12-byte object, which fits into the + // tiny slot. If it actually gets place there, + // then the field "a" will be improperly aligned + // for atomic access on 32-bit architectures. + // This won't be true if issue 36606 gets resolved. + tinyObj12 := runtime.Escape(new(obj12)) + + // Try to atomically access "x.a". + atomic.StoreUint64(&tinyObj12.a, 10) + + runtime.Releasem() +} + +func TestPageCacheLeak(t *testing.T) { + defer GOMAXPROCS(GOMAXPROCS(1)) + leaked := PageCachePagesLeaked() + if leaked != 0 { + t.Fatalf("found %d leaked pages in page caches", leaked) + } +} + +func TestPhysicalMemoryUtilization(t *testing.T) { + got := runTestProg(t, "testprog", "GCPhys") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %q", want, got) + } +} + +func TestScavengedBitsCleared(t *testing.T) { + var mismatches [128]BitsMismatch + if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok { + t.Errorf("uncleared scavenged bits") + for _, m := range mismatches[:n] { + t.Logf("\t@ address 0x%x", m.Base) + t.Logf("\t| got: %064b", m.Got) + t.Logf("\t| want: %064b", m.Want) + } + t.FailNow() + } +} + +type acLink struct { + x [1 << 20]byte +} + +var arenaCollisionSink []*acLink + +func TestArenaCollision(t *testing.T) { + testenv.MustHaveExec(t) + + // Test that mheap.sysAlloc handles collisions with other + // memory mappings. + if os.Getenv("TEST_ARENA_COLLISION") != "1" { + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v")) + cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1") + out, err := cmd.CombinedOutput() + if race.Enabled { + // This test runs the runtime out of hint + // addresses, so it will start mapping the + // heap wherever it can. The race detector + // doesn't support this, so look for the + // expected failure. + if want := "too many address space collisions"; !strings.Contains(string(out), want) { + t.Fatalf("want %q, got:\n%s", want, string(out)) + } + } else if !strings.Contains(string(out), "PASS\n") || err != nil { + t.Fatalf("%s\n(exit status %v)", string(out), err) + } + return + } + disallowed := [][2]uintptr{} + // Drop all but the next 3 hints. 64-bit has a lot of hints, + // so it would take a lot of memory to go through all of them. + KeepNArenaHints(3) + // Consume these 3 hints and force the runtime to find some + // fallback hints. + for i := 0; i < 5; i++ { + // Reserve memory at the next hint so it can't be used + // for the heap. + start, end, ok := MapNextArenaHint() + if !ok { + t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end) + } + t.Logf("reserved [%#x, %#x)", start, end) + disallowed = append(disallowed, [2]uintptr{start, end}) + // Allocate until the runtime tries to use the hint we + // just mapped over. + hint := GetNextArenaHint() + for GetNextArenaHint() == hint { + ac := new(acLink) + arenaCollisionSink = append(arenaCollisionSink, ac) + // The allocation must not have fallen into + // one of the reserved regions. + p := uintptr(unsafe.Pointer(ac)) + for _, d := range disallowed { + if d[0] <= p && p < d[1] { + t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1]) + } + } + } + } +} + +func BenchmarkMalloc8(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new(int64) + Escape(p) + } +} + +func BenchmarkMalloc16(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new([2]int64) + Escape(p) + } +} + +func BenchmarkMallocTypeInfo8(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new(struct { + p [8 / unsafe.Sizeof(uintptr(0))]*int + }) + Escape(p) + } +} + +func BenchmarkMallocTypeInfo16(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new(struct { + p [16 / unsafe.Sizeof(uintptr(0))]*int + }) + Escape(p) + } +} + +type LargeStruct struct { + x [16][]byte +} + +func BenchmarkMallocLargeStruct(b *testing.B) { + for i := 0; i < b.N; i++ { + p := make([]LargeStruct, 2) + Escape(p) + } +} + +var n = flag.Int("n", 1000, "number of goroutines") + +func BenchmarkGoroutineSelect(b *testing.B) { + quit := make(chan struct{}) + read := func(ch chan struct{}) { + for { + select { + case _, ok := <-ch: + if !ok { + return + } + case <-quit: + return + } + } + } + benchHelper(b, *n, read) +} + +func BenchmarkGoroutineBlocking(b *testing.B) { + read := func(ch chan struct{}) { + for { + if _, ok := <-ch; !ok { + return + } + } + } + benchHelper(b, *n, read) +} + +func BenchmarkGoroutineForRange(b *testing.B) { + read := func(ch chan struct{}) { + for range ch { + } + } + benchHelper(b, *n, read) +} + +func benchHelper(b *testing.B, n int, read func(chan struct{})) { + m := make([]chan struct{}, n) + for i := range m { + m[i] = make(chan struct{}, 1) + go read(m[i]) + } + b.StopTimer() + b.ResetTimer() + GC() + + for i := 0; i < b.N; i++ { + for _, ch := range m { + if ch != nil { + ch <- struct{}{} + } + } + time.Sleep(10 * time.Millisecond) + b.StartTimer() + GC() + b.StopTimer() + } + + for _, ch := range m { + close(ch) + } + time.Sleep(10 * time.Millisecond) +} + +func BenchmarkGoroutineIdle(b *testing.B) { + quit := make(chan struct{}) + fn := func() { + <-quit + } + for i := 0; i < *n; i++ { + go fn() + } + + GC() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + GC() + } + + b.StopTimer() + close(quit) + time.Sleep(10 * time.Millisecond) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map.go b/platform/dbops/binaries/go/go/src/runtime/map.go new file mode 100644 index 0000000000000000000000000000000000000000..cd3f838fa19c6b0f61b7c1a3926e2716649ac19e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map.go @@ -0,0 +1,1732 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go's map type. +// +// A map is just a hash table. The data is arranged +// into an array of buckets. Each bucket contains up to +// 8 key/elem pairs. The low-order bits of the hash are +// used to select a bucket. Each bucket contains a few +// high-order bits of each hash to distinguish the entries +// within a single bucket. +// +// If more than 8 keys hash to a bucket, we chain on +// extra buckets. +// +// When the hashtable grows, we allocate a new array +// of buckets twice as big. Buckets are incrementally +// copied from the old bucket array to the new bucket array. +// +// Map iterators walk through the array of buckets and +// return the keys in walk order (bucket #, then overflow +// chain order, then bucket index). To maintain iteration +// semantics, we never move keys within their bucket (if +// we did, keys might be returned 0 or 2 times). When +// growing the table, iterators remain iterating through the +// old table and must check the new table if the bucket +// they are iterating through has been moved ("evacuated") +// to the new table. + +// Picking loadFactor: too large and we have lots of overflow +// buckets, too small and we waste a lot of space. I wrote +// a simple program to check some stats for different loads: +// (64-bit, 8 byte keys and elems) +// loadFactor %overflow bytes/entry hitprobe missprobe +// 4.00 2.13 20.77 3.00 4.00 +// 4.50 4.05 17.30 3.25 4.50 +// 5.00 6.85 14.77 3.50 5.00 +// 5.50 10.55 12.94 3.75 5.50 +// 6.00 15.27 11.67 4.00 6.00 +// 6.50 20.90 10.79 4.25 6.50 +// 7.00 27.14 10.15 4.50 7.00 +// 7.50 34.03 9.73 4.75 7.50 +// 8.00 41.10 9.40 5.00 8.00 +// +// %overflow = percentage of buckets which have an overflow bucket +// bytes/entry = overhead bytes used per key/elem pair +// hitprobe = # of entries to check when looking up a present key +// missprobe = # of entries to check when looking up an absent key +// +// Keep in mind this data is for maximally loaded tables, i.e. just +// before the table grows. Typical tables will be somewhat less loaded. + +import ( + "internal/abi" + "internal/goarch" + "runtime/internal/atomic" + "runtime/internal/math" + "unsafe" +) + +const ( + // Maximum number of key/elem pairs a bucket can hold. + bucketCntBits = abi.MapBucketCountBits + bucketCnt = abi.MapBucketCount + + // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) + // Because of minimum alignment rules, bucketCnt is known to be at least 8. + // Represent as loadFactorNum/loadFactorDen, to allow integer math. + loadFactorDen = 2 + loadFactorNum = loadFactorDen * bucketCnt * 13 / 16 + + // Maximum key or elem size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + // Fast versions cannot handle big elems - the cutoff size for + // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. + maxKeySize = abi.MapMaxKeyBytes + maxElemSize = abi.MapMaxElemBytes + + // data offset should be the size of the bmap struct, but needs to be + // aligned correctly. For amd64p32 this means 64-bit alignment + // even though pointers are 32 bit. + dataOffset = unsafe.Offsetof(struct { + b bmap + v int64 + }{}.v) + + // Possible tophash values. We reserve a few possibilities for special marks. + // Each bucket (including its overflow buckets, if any) will have either all or none of its + // entries in the evacuated* states (except during the evacuate() method, which only happens + // during map writes and thus no one else can observe the map during that time). + emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. + emptyOne = 1 // this cell is empty + evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. + evacuatedY = 3 // same as above, but evacuated to second half of larger table. + evacuatedEmpty = 4 // cell is empty, bucket is evacuated. + minTopHash = 5 // minimum tophash for a normal filled cell. + + // flags + iterator = 1 // there may be an iterator using buckets + oldIterator = 2 // there may be an iterator using oldbuckets + hashWriting = 4 // a goroutine is writing to the map + sameSizeGrow = 8 // the current map growth is to a new map of the same size + + // sentinel bucket ID for iterator checks + noCheck = 1<<(8*goarch.PtrSize) - 1 +) + +// isEmpty reports whether the given tophash array entry represents an empty bucket entry. +func isEmpty(x uint8) bool { + return x <= emptyOne +} + +// A header for a Go map. +type hmap struct { + // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go. + // Make sure this stays in sync with the compiler's definition. + count int // # live cells == size of map. Must be first (used by len() builtin) + flags uint8 + B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) + noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details + hash0 uint32 // hash seed + + buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. + oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing + nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) + + extra *mapextra // optional fields +} + +// mapextra holds fields that are not present on all maps. +type mapextra struct { + // If both key and elem do not contain pointers and are inline, then we mark bucket + // type as containing no pointers. This avoids scanning such maps. + // However, bmap.overflow is a pointer. In order to keep overflow buckets + // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow. + // overflow and oldoverflow are only used if key and elem do not contain pointers. + // overflow contains overflow buckets for hmap.buckets. + // oldoverflow contains overflow buckets for hmap.oldbuckets. + // The indirection allows to store a pointer to the slice in hiter. + overflow *[]*bmap + oldoverflow *[]*bmap + + // nextOverflow holds a pointer to a free overflow bucket. + nextOverflow *bmap +} + +// A bucket for a Go map. +type bmap struct { + // tophash generally contains the top byte of the hash value + // for each key in this bucket. If tophash[0] < minTopHash, + // tophash[0] is a bucket evacuation state instead. + tophash [bucketCnt]uint8 + // Followed by bucketCnt keys and then bucketCnt elems. + // NOTE: packing all the keys together and then all the elems together makes the + // code a bit more complicated than alternating key/elem/key/elem/... but it allows + // us to eliminate padding which would be needed for, e.g., map[int64]int8. + // Followed by an overflow pointer. +} + +// A hash iteration structure. +// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go +// and reflect/value.go to match the layout of this structure. +type hiter struct { + key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go). + elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go). + t *maptype + h *hmap + buckets unsafe.Pointer // bucket ptr at hash_iter initialization time + bptr *bmap // current bucket + overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive + oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive + startBucket uintptr // bucket iteration started at + offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) + wrapped bool // already wrapped around from end of bucket array to beginning + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +// bucketShift returns 1<> (goarch.PtrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + return top +} + +func evacuated(b *bmap) bool { + h := b.tophash[0] + return h > emptyOne && h < minTopHash +} + +func (b *bmap) overflow(t *maptype) *bmap { + return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) +} + +func (b *bmap) setoverflow(t *maptype, ovf *bmap) { + *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf +} + +func (b *bmap) keys() unsafe.Pointer { + return add(unsafe.Pointer(b), dataOffset) +} + +// incrnoverflow increments h.noverflow. +// noverflow counts the number of overflow buckets. +// This is used to trigger same-size map growth. +// See also tooManyOverflowBuckets. +// To keep hmap small, noverflow is a uint16. +// When there are few buckets, noverflow is an exact count. +// When there are many buckets, noverflow is an approximate count. +func (h *hmap) incrnoverflow() { + // We trigger same-size map growth if there are + // as many overflow buckets as buckets. + // We need to be able to count to 1< maxAlloc { + hint = 0 + } + + // initialize Hmap + if h == nil { + h = new(hmap) + } + h.hash0 = uint32(rand()) + + // Find the size parameter B which will hold the requested # of elements. + // For hint < 0 overLoadFactor returns false since hint < bucketCnt. + B := uint8(0) + for overLoadFactor(hint, B) { + B++ + } + h.B = B + + // allocate initial hash table + // if B == 0, the buckets field is allocated lazily later (in mapassign) + // If hint is large zeroing this memory could take a while. + if h.B != 0 { + var nextOverflow *bmap + h.buckets, nextOverflow = makeBucketArray(t, h.B, nil) + if nextOverflow != nil { + h.extra = new(mapextra) + h.extra.nextOverflow = nextOverflow + } + } + + return h +} + +// makeBucketArray initializes a backing array for map buckets. +// 1<= 4 { + // Add on the estimated number of overflow buckets + // required to insert the median number of elements + // used with this value of b. + nbuckets += bucketShift(b - 4) + sz := t.Bucket.Size_ * nbuckets + up := roundupsize(sz, t.Bucket.PtrBytes == 0) + if up != sz { + nbuckets = up / t.Bucket.Size_ + } + } + + if dirtyalloc == nil { + buckets = newarray(t.Bucket, int(nbuckets)) + } else { + // dirtyalloc was previously generated by + // the above newarray(t.Bucket, int(nbuckets)) + // but may not be empty. + buckets = dirtyalloc + size := t.Bucket.Size_ * nbuckets + if t.Bucket.PtrBytes != 0 { + memclrHasPointers(buckets, size) + } else { + memclrNoHeapPointers(buckets, size) + } + } + + if base != nbuckets { + // We preallocated some overflow buckets. + // To keep the overhead of tracking these overflow buckets to a minimum, + // we use the convention that if a preallocated overflow bucket's overflow + // pointer is nil, then there are more available by bumping the pointer. + // We need a safe non-nil pointer for the last overflow bucket; just use buckets. + nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize))) + last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize))) + last.setoverflow(t, (*bmap)(buckets)) + } + return buckets, nextOverflow +} + +// mapaccess1 returns a pointer to h[key]. Never returns nil, instead +// it will return a reference to the zero object for the elem type if +// the key is not in the map. +// NOTE: The returned pointer may keep the whole map live, so don't +// hold onto it for very long. +func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(mapaccess1) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.Key, key, callerpc, pc) + } + if msanenabled && h != nil { + msanread(key, t.Key.Size_) + } + if asanenabled && h != nil { + asanread(key, t.Key.Size_) + } + if h == nil || h.count == 0 { + if err := mapKeyError(t, key); err != nil { + panic(err) // see issue 23734 + } + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + hash := t.Hasher(key, uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) +bucketloop: + for ; b != nil; b = b.overflow(t) { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + if t.Key.Equal(key, k) { + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + if t.IndirectElem() { + e = *((*unsafe.Pointer)(e)) + } + return e + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(mapaccess2) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.Key, key, callerpc, pc) + } + if msanenabled && h != nil { + msanread(key, t.Key.Size_) + } + if asanenabled && h != nil { + asanread(key, t.Key.Size_) + } + if h == nil || h.count == 0 { + if err := mapKeyError(t, key); err != nil { + panic(err) // see issue 23734 + } + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + hash := t.Hasher(key, uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) +bucketloop: + for ; b != nil; b = b.overflow(t) { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + if t.Key.Equal(key, k) { + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + if t.IndirectElem() { + e = *((*unsafe.Pointer)(e)) + } + return e, true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +// returns both key and elem. Used by map iterator. +func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { + if h == nil || h.count == 0 { + return nil, nil + } + hash := t.Hasher(key, uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) +bucketloop: + for ; b != nil; b = b.overflow(t) { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + if t.Key.Equal(key, k) { + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + if t.IndirectElem() { + e = *((*unsafe.Pointer)(e)) + } + return k, e + } + } + } + return nil, nil +} + +func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { + e := mapaccess1(t, h, key) + if e == unsafe.Pointer(&zeroVal[0]) { + return zero + } + return e +} + +func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { + e := mapaccess1(t, h, key) + if e == unsafe.Pointer(&zeroVal[0]) { + return zero, false + } + return e, true +} + +// Like mapaccess, but allocates a slot for the key if it is not present in the map. +func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(mapassign) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.Key, key, callerpc, pc) + } + if msanenabled { + msanread(key, t.Key.Size_) + } + if asanenabled { + asanread(key, t.Key.Size_) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.Hasher(key, uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher, since t.hasher may panic, + // in which case we have not actually done a write. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + top := tophash(hash) + + var inserti *uint8 + var insertk unsafe.Pointer + var elem unsafe.Pointer +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if isEmpty(b.tophash[i]) && inserti == nil { + inserti = &b.tophash[i] + insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + if !t.Key.Equal(key, k) { + continue + } + // already have a mapping for key. Update it. + if t.NeedKeyUpdate() { + typedmemmove(t.Key, k, key) + } + elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if inserti == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + newb := h.newoverflow(t, b) + inserti = &newb.tophash[0] + insertk = add(unsafe.Pointer(newb), dataOffset) + elem = add(insertk, bucketCnt*uintptr(t.KeySize)) + } + + // store new key/elem at insert position + if t.IndirectKey() { + kmem := newobject(t.Key) + *(*unsafe.Pointer)(insertk) = kmem + insertk = kmem + } + if t.IndirectElem() { + vmem := newobject(t.Elem) + *(*unsafe.Pointer)(elem) = vmem + } + typedmemmove(t.Key, insertk, key) + *inserti = top + h.count++ + +done: + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + if t.IndirectElem() { + elem = *((*unsafe.Pointer)(elem)) + } + return elem +} + +func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + if raceenabled && h != nil { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(mapdelete) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.Key, key, callerpc, pc) + } + if msanenabled && h != nil { + msanread(key, t.Key.Size_) + } + if asanenabled && h != nil { + asanread(key, t.Key.Size_) + } + if h == nil || h.count == 0 { + if err := mapKeyError(t, key); err != nil { + panic(err) // see issue 23734 + } + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + hash := t.Hasher(key, uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher, since t.hasher may panic, + // in which case we have not actually done a write (delete). + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + bOrig := b + top := tophash(hash) +search: + for ; b != nil; b = b.overflow(t) { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break search + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) + k2 := k + if t.IndirectKey() { + k2 = *((*unsafe.Pointer)(k2)) + } + if !t.Key.Equal(key, k2) { + continue + } + // Only clear key if there are pointers in it. + if t.IndirectKey() { + *(*unsafe.Pointer)(k) = nil + } else if t.Key.PtrBytes != 0 { + memclrHasPointers(k, t.Key.Size_) + } + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + if t.IndirectElem() { + *(*unsafe.Pointer)(e) = nil + } else if t.Elem.PtrBytes != 0 { + memclrHasPointers(e, t.Elem.Size_) + } else { + memclrNoHeapPointers(e, t.Elem.Size_) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + // It would be nice to make this a separate function, but + // for loops are not currently inlineable. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = uint32(rand()) + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +// mapiterinit initializes the hiter struct used for ranging over maps. +// The hiter struct pointed to by 'it' is allocated on the stack +// by the compilers order pass or on the heap by reflect_mapiterinit. +// Both need to have zeroed hiter since the struct contains pointers. +func mapiterinit(t *maptype, h *hmap, it *hiter) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit)) + } + + it.t = t + if h == nil || h.count == 0 { + return + } + + if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 { + throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go + } + it.h = h + + // grab snapshot of bucket state + it.B = h.B + it.buckets = h.buckets + if t.Bucket.PtrBytes == 0 { + // Allocate the current slice and remember pointers to both current and old. + // This preserves all relevant overflow buckets alive even if + // the table grows and/or overflow buckets are added to the table + // while we are iterating. + h.createOverflow() + it.overflow = h.extra.overflow + it.oldoverflow = h.extra.oldoverflow + } + + // decide where to start + r := uintptr(rand()) + it.startBucket = r & bucketMask(h.B) + it.offset = uint8(r >> h.B & (bucketCnt - 1)) + + // iterator state + it.bucket = it.startBucket + + // Remember we have an iterator. + // Can run concurrently with another mapiterinit(). + if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { + atomic.Or8(&h.flags, iterator|oldIterator) + } + + mapiternext(it) +} + +func mapiternext(it *hiter) { + h := it.h + if raceenabled { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map iteration and map write") + } + t := it.t + bucket := it.bucket + b := it.bptr + i := it.i + checkBucket := it.checkBucket + +next: + if b == nil { + if bucket == it.startBucket && it.wrapped { + // end of iteration + it.key = nil + it.elem = nil + return + } + if h.growing() && it.B == h.B { + // Iterator was started in the middle of a grow, and the grow isn't done yet. + // If the bucket we're looking at hasn't been filled in yet (i.e. the old + // bucket hasn't been evacuated) then we need to iterate through the old + // bucket and only return the ones that will be migrated to this bucket. + oldbucket := bucket & it.h.oldbucketmask() + b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) + if !evacuated(b) { + checkBucket = bucket + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) + checkBucket = noCheck + } + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) + checkBucket = noCheck + } + bucket++ + if bucket == bucketShift(it.B) { + bucket = 0 + it.wrapped = true + } + i = 0 + } + for ; i < bucketCnt; i++ { + offi := (i + it.offset) & (bucketCnt - 1) + if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { + // TODO: emptyRest is hard to use here, as we start iterating + // in the middle of a bucket. It's feasible, just tricky. + continue + } + k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize)) + if checkBucket != noCheck && !h.sameSizeGrow() { + // Special case: iterator was started during a grow to a larger size + // and the grow is not done yet. We're working on a bucket whose + // oldbucket has not been evacuated yet. Or at least, it wasn't + // evacuated when we started the bucket. So we're iterating + // through the oldbucket, skipping any keys that will go + // to the other new bucket (each oldbucket expands to two + // buckets during a grow). + if t.ReflexiveKey() || t.Key.Equal(k, k) { + // If the item in the oldbucket is not destined for + // the current new bucket in the iteration, skip it. + hash := t.Hasher(k, uintptr(h.hash0)) + if hash&bucketMask(it.B) != checkBucket { + continue + } + } else { + // Hash isn't repeatable if k != k (NaNs). We need a + // repeatable and randomish choice of which direction + // to send NaNs during evacuation. We'll use the low + // bit of tophash to decide which way NaNs go. + // NOTE: this case is why we need two evacuate tophash + // values, evacuatedX and evacuatedY, that differ in + // their low bit. + if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { + continue + } + } + } + if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || + !(t.ReflexiveKey() || t.Key.Equal(k, k)) { + // This is the golden data, we can return it. + // OR + // key!=key, so the entry can't be deleted or updated, so we can just return it. + // That's lucky for us because when key!=key we can't look it up successfully. + it.key = k + if t.IndirectElem() { + e = *((*unsafe.Pointer)(e)) + } + it.elem = e + } else { + // The hash table has grown since the iterator was started. + // The golden data for this key is now somewhere else. + // Check the current hash table for the data. + // This code handles the case where the key + // has been deleted, updated, or deleted and reinserted. + // NOTE: we need to regrab the key as it has potentially been + // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). + rk, re := mapaccessK(t, h, k) + if rk == nil { + continue // key has been deleted + } + it.key = rk + it.elem = re + } + it.bucket = bucket + if it.bptr != b { // avoid unnecessary write barrier; see issue 14921 + it.bptr = b + } + it.i = i + 1 + it.checkBucket = checkBucket + return + } + b = b.overflow(t) + i = 0 + goto next +} + +// mapclear deletes all keys from a map. +func mapclear(t *maptype, h *hmap) { + if raceenabled && h != nil { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(mapclear) + racewritepc(unsafe.Pointer(h), callerpc, pc) + } + + if h == nil || h.count == 0 { + return + } + + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + h.flags ^= hashWriting + + // Mark buckets empty, so existing iterators can be terminated, see issue #59411. + markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) { + for i := uintptr(0); i <= mask; i++ { + b := (*bmap)(add(bucket, i*uintptr(t.BucketSize))) + for ; b != nil; b = b.overflow(t) { + for i := uintptr(0); i < bucketCnt; i++ { + b.tophash[i] = emptyRest + } + } + } + } + markBucketsEmpty(h.buckets, bucketMask(h.B)) + if oldBuckets := h.oldbuckets; oldBuckets != nil { + markBucketsEmpty(oldBuckets, h.oldbucketmask()) + } + + h.flags &^= sameSizeGrow + h.oldbuckets = nil + h.nevacuate = 0 + h.noverflow = 0 + h.count = 0 + + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + h.hash0 = uint32(rand()) + + // Keep the mapextra allocation but clear any extra information. + if h.extra != nil { + *h.extra = mapextra{} + } + + // makeBucketArray clears the memory pointed to by h.buckets + // and recovers any overflow buckets by generating them + // as if h.buckets was newly alloced. + _, nextOverflow := makeBucketArray(t, h.B, h.buckets) + if nextOverflow != nil { + // If overflow buckets are created then h.extra + // will have been allocated during initial bucket creation. + h.extra.nextOverflow = nextOverflow + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func hashGrow(t *maptype, h *hmap) { + // If we've hit the load factor, get bigger. + // Otherwise, there are too many overflow buckets, + // so keep the same number of buckets and "grow" laterally. + bigger := uint8(1) + if !overLoadFactor(h.count+1, h.B) { + bigger = 0 + h.flags |= sameSizeGrow + } + oldbuckets := h.buckets + newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil) + + flags := h.flags &^ (iterator | oldIterator) + if h.flags&iterator != 0 { + flags |= oldIterator + } + // commit the grow (atomic wrt gc) + h.B += bigger + h.flags = flags + h.oldbuckets = oldbuckets + h.buckets = newbuckets + h.nevacuate = 0 + h.noverflow = 0 + + if h.extra != nil && h.extra.overflow != nil { + // Promote current overflow buckets to the old generation. + if h.extra.oldoverflow != nil { + throw("oldoverflow is not nil") + } + h.extra.oldoverflow = h.extra.overflow + h.extra.overflow = nil + } + if nextOverflow != nil { + if h.extra == nil { + h.extra = new(mapextra) + } + h.extra.nextOverflow = nextOverflow + } + + // the actual copying of the hash table data is done incrementally + // by growWork() and evacuate(). +} + +// overLoadFactor reports whether count items placed in 1< bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) +} + +// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1< 15 { + B = 15 + } + // The compiler doesn't see here that B < 16; mask B to generate shorter shift code. + return noverflow >= uint16(1)<<(B&15) +} + +// growing reports whether h is growing. The growth may be to the same size or bigger. +func (h *hmap) growing() bool { + return h.oldbuckets != nil +} + +// sameSizeGrow reports whether the current growth is to a map of the same size. +func (h *hmap) sameSizeGrow() bool { + return h.flags&sameSizeGrow != 0 +} + +// noldbuckets calculates the number of buckets prior to the current map growth. +func (h *hmap) noldbuckets() uintptr { + oldB := h.B + if !h.sameSizeGrow() { + oldB-- + } + return bucketShift(oldB) +} + +// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). +func (h *hmap) oldbucketmask() uintptr { + return h.noldbuckets() - 1 +} + +func growWork(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate(t, h, h.nevacuate) + } +} + +func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool { + b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize))) + return evacuated(b) +} + +// evacDst is an evacuation destination. +type evacDst struct { + b *bmap // current destination bucket + i int // key/elem index into b + k unsafe.Pointer // pointer to current key storage + e unsafe.Pointer // pointer to current elem storage +} + +func evacuate(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*uintptr(t.KeySize)) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*uintptr(t.KeySize)) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*uintptr(t.KeySize)) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + k2 := k + if t.IndirectKey() { + k2 = *((*unsafe.Pointer)(k2)) + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.Hasher(k2, uintptr(h.hash0)) + if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) { + // If key != key (NaNs), then the hash could be (and probably + // will be) entirely different from the old hash. Moreover, + // it isn't reproducible. Reproducibility is required in the + // presence of iterators, as our evacuation decision must + // match whatever decision the iterator made. + // Fortunately, we have the freedom to send these keys either + // way. Also, tophash is meaningless for these kinds of keys. + // We let the low bit of tophash drive the evacuation decision. + // We recompute a new random tophash for the next level so + // these keys will get evenly distributed across all buckets + // after multiple grows. + useY = top & 1 + top = tophash(hash) + } else { + if hash&newbit != 0 { + useY = 1 + } + } + } + + if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY { + throw("bad evacuatedN") + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize)) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + if t.IndirectKey() { + *(*unsafe.Pointer)(dst.k) = k2 // copy pointer + } else { + typedmemmove(t.Key, dst.k, k) // copy elem + } + if t.IndirectElem() { + *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e) + } else { + typedmemmove(t.Elem, dst.e, e) + } + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, uintptr(t.KeySize)) + dst.e = add(dst.e, uintptr(t.ValueSize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.BucketSize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} + +func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { + h.nevacuate++ + // Experiments suggest that 1024 is overkill by at least an order of magnitude. + // Put it in there as a safeguard anyway, to ensure O(1) behavior. + stop := h.nevacuate + 1024 + if stop > newbit { + stop = newbit + } + for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) { + h.nevacuate++ + } + if h.nevacuate == newbit { // newbit == # of oldbuckets + // Growing is all done. Free old main bucket array. + h.oldbuckets = nil + // Can discard old overflow buckets as well. + // If they are still referenced by an iterator, + // then the iterator holds a pointers to the slice. + if h.extra != nil { + h.extra.oldoverflow = nil + } + h.flags &^= sameSizeGrow + } +} + +// Reflect stubs. Called from ../reflect/asm_*.s + +//go:linkname reflect_makemap reflect.makemap +func reflect_makemap(t *maptype, cap int) *hmap { + // Check invariants and reflects math. + if t.Key.Equal == nil { + throw("runtime.reflect_makemap: unsupported map key type") + } + if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) || + t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) { + throw("key size wrong") + } + if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) || + t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) { + throw("elem size wrong") + } + if t.Key.Align_ > bucketCnt { + throw("key align too big") + } + if t.Elem.Align_ > bucketCnt { + throw("elem align too big") + } + if t.Key.Size_%uintptr(t.Key.Align_) != 0 { + throw("key size not a multiple of key align") + } + if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 { + throw("elem size not a multiple of elem align") + } + if bucketCnt < 8 { + throw("bucketsize too small for proper alignment") + } + if dataOffset%uintptr(t.Key.Align_) != 0 { + throw("need padding in bucket (key)") + } + if dataOffset%uintptr(t.Elem.Align_) != 0 { + throw("need padding in bucket (elem)") + } + + return makemap(t, cap, nil) +} + +//go:linkname reflect_mapaccess reflect.mapaccess +func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + elem, ok := mapaccess2(t, h, key) + if !ok { + // reflect wants nil for a missing element + elem = nil + } + return elem +} + +//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr +func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer { + elem, ok := mapaccess2_faststr(t, h, key) + if !ok { + // reflect wants nil for a missing element + elem = nil + } + return elem +} + +//go:linkname reflect_mapassign reflect.mapassign0 +func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) { + p := mapassign(t, h, key) + typedmemmove(t.Elem, p, elem) +} + +//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 +func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) { + p := mapassign_faststr(t, h, key) + typedmemmove(t.Elem, p, elem) +} + +//go:linkname reflect_mapdelete reflect.mapdelete +func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + mapdelete(t, h, key) +} + +//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr +func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) { + mapdelete_faststr(t, h, key) +} + +//go:linkname reflect_mapiterinit reflect.mapiterinit +func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) { + mapiterinit(t, h, it) +} + +//go:linkname reflect_mapiternext reflect.mapiternext +func reflect_mapiternext(it *hiter) { + mapiternext(it) +} + +//go:linkname reflect_mapiterkey reflect.mapiterkey +func reflect_mapiterkey(it *hiter) unsafe.Pointer { + return it.key +} + +//go:linkname reflect_mapiterelem reflect.mapiterelem +func reflect_mapiterelem(it *hiter) unsafe.Pointer { + return it.elem +} + +//go:linkname reflect_maplen reflect.maplen +func reflect_maplen(h *hmap) int { + if h == nil { + return 0 + } + if raceenabled { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + } + return h.count +} + +//go:linkname reflect_mapclear reflect.mapclear +func reflect_mapclear(t *maptype, h *hmap) { + mapclear(t, h) +} + +//go:linkname reflectlite_maplen internal/reflectlite.maplen +func reflectlite_maplen(h *hmap) int { + if h == nil { + return 0 + } + if raceenabled { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + } + return h.count +} + +var zeroVal [abi.ZeroValSize]byte + +// mapinitnoop is a no-op function known the Go linker; if a given global +// map (of the right size) is determined to be dead, the linker will +// rewrite the relocation (from the package init func) from the outlined +// map init function to this symbol. Defined in assembly so as to avoid +// complications with instrumentation (coverage, etc). +func mapinitnoop() + +// mapclone for implementing maps.Clone +// +//go:linkname mapclone maps.clone +func mapclone(m any) any { + e := efaceOf(&m) + e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data))) + return m +} + +// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows +// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. +func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { + for i := 0; i < bucketCnt; i++ { + if isEmpty(src.tophash[i]) { + continue + } + + for ; pos < bucketCnt; pos++ { + if isEmpty(dst.tophash[pos]) { + break + } + } + + if pos == bucketCnt { + dst = h.newoverflow(t, dst) + pos = 0 + } + + srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize)) + srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize)) + dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize)) + dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize)) + + dst.tophash[pos] = src.tophash[i] + if t.IndirectKey() { + srcK = *(*unsafe.Pointer)(srcK) + if t.NeedKeyUpdate() { + kStore := newobject(t.Key) + typedmemmove(t.Key, kStore, srcK) + srcK = kStore + } + // Note: if NeedKeyUpdate is false, then the memory + // used to store the key is immutable, so we can share + // it between the original map and its clone. + *(*unsafe.Pointer)(dstK) = srcK + } else { + typedmemmove(t.Key, dstK, srcK) + } + if t.IndirectElem() { + srcEle = *(*unsafe.Pointer)(srcEle) + eStore := newobject(t.Elem) + typedmemmove(t.Elem, eStore, srcEle) + *(*unsafe.Pointer)(dstEle) = eStore + } else { + typedmemmove(t.Elem, dstEle, srcEle) + } + pos++ + h.count++ + } + return dst, pos +} + +func mapclone2(t *maptype, src *hmap) *hmap { + dst := makemap(t, src.count, nil) + dst.hash0 = src.hash0 + dst.nevacuate = 0 + //flags do not need to be copied here, just like a new map has no flags. + + if src.count == 0 { + return dst + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() { + // Quick copy for small maps. + dst.buckets = newobject(t.Bucket) + dst.count = src.count + typedmemmove(t.Bucket, dst.buckets, src.buckets) + return dst + } + + if dst.B == 0 { + dst.buckets = newobject(t.Bucket) + } + dstArraySize := int(bucketShift(dst.B)) + srcArraySize := int(bucketShift(src.B)) + for i := 0; i < dstArraySize; i++ { + dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize)))) + pos := 0 + for j := 0; j < srcArraySize; j += dstArraySize { + srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize)))) + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + } + } + + if src.oldbuckets == nil { + return dst + } + + oldB := src.B + srcOldbuckets := src.oldbuckets + if !src.sameSizeGrow() { + oldB-- + } + oldSrcArraySize := int(bucketShift(oldB)) + + for i := 0; i < oldSrcArraySize; i++ { + srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize)))) + if evacuated(srcBmap) { + continue + } + + if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src + dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize))) + for dstBmap.overflow(t) != nil { + dstBmap = dstBmap.overflow(t) + } + pos := 0 + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + continue + } + + // oldB < dst.B, so a single source bucket may go to multiple destination buckets. + // Process entries one at a time. + for srcBmap != nil { + // move from oldBlucket to new bucket + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(srcBmap.tophash[i]) { + continue + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize)) + if t.IndirectKey() { + srcK = *((*unsafe.Pointer)(srcK)) + } + + srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + if t.IndirectElem() { + srcEle = *((*unsafe.Pointer)(srcEle)) + } + dstEle := mapassign(t, dst, srcK) + typedmemmove(t.Elem, dstEle, srcEle) + } + srcBmap = srcBmap.overflow(t) + } + } + return dst +} + +// keys for implementing maps.keys +// +//go:linkname keys maps.keys +func keys(m any, p unsafe.Pointer) { + e := efaceOf(&m) + t := (*maptype)(unsafe.Pointer(e._type)) + h := (*hmap)(e.data) + + if h == nil || h.count == 0 { + return + } + s := (*slice)(p) + r := int(rand()) + offset := uint8(r >> h.B & (bucketCnt - 1)) + if h.B == 0 { + copyKeys(t, h, (*bmap)(h.buckets), s, offset) + return + } + arraySize := int(bucketShift(h.B)) + buckets := h.buckets + for i := 0; i < arraySize; i++ { + bucket := (i + r) & (arraySize - 1) + b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) + copyKeys(t, h, b, s, offset) + } + + if h.growing() { + oldArraySize := int(h.noldbuckets()) + for i := 0; i < oldArraySize; i++ { + bucket := (i + r) & (oldArraySize - 1) + b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) + if evacuated(b) { + continue + } + copyKeys(t, h, b, s, offset) + } + } + return +} + +func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { + for b != nil { + for i := uintptr(0); i < bucketCnt; i++ { + offi := (i + uintptr(offset)) & (bucketCnt - 1) + if isEmpty(b.tophash[offi]) { + continue + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize)) + if t.IndirectKey() { + k = *((*unsafe.Pointer)(k)) + } + if s.len >= s.cap { + fatal("concurrent map read and map write") + } + typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k) + s.len++ + } + b = b.overflow(t) + } +} + +// values for implementing maps.values +// +//go:linkname values maps.values +func values(m any, p unsafe.Pointer) { + e := efaceOf(&m) + t := (*maptype)(unsafe.Pointer(e._type)) + h := (*hmap)(e.data) + if h == nil || h.count == 0 { + return + } + s := (*slice)(p) + r := int(rand()) + offset := uint8(r >> h.B & (bucketCnt - 1)) + if h.B == 0 { + copyValues(t, h, (*bmap)(h.buckets), s, offset) + return + } + arraySize := int(bucketShift(h.B)) + buckets := h.buckets + for i := 0; i < arraySize; i++ { + bucket := (i + r) & (arraySize - 1) + b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) + copyValues(t, h, b, s, offset) + } + + if h.growing() { + oldArraySize := int(h.noldbuckets()) + for i := 0; i < oldArraySize; i++ { + bucket := (i + r) & (oldArraySize - 1) + b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) + if evacuated(b) { + continue + } + copyValues(t, h, b, s, offset) + } + } + return +} + +func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { + for b != nil { + for i := uintptr(0); i < bucketCnt; i++ { + offi := (i + uintptr(offset)) & (bucketCnt - 1) + if isEmpty(b.tophash[offi]) { + continue + } + + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + + ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize)) + if t.IndirectElem() { + ele = *((*unsafe.Pointer)(ele)) + } + if s.len >= s.cap { + fatal("concurrent map read and map write") + } + typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele) + s.len++ + } + b = b.overflow(t) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map_benchmark_test.go b/platform/dbops/binaries/go/go/src/runtime/map_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43d1accbb97cd67575a01280515bfeadd2465d2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map_benchmark_test.go @@ -0,0 +1,540 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "testing" +) + +const size = 10 + +func BenchmarkHashStringSpeed(b *testing.B) { + strings := make([]string, size) + for i := 0; i < size; i++ { + strings[i] = fmt.Sprintf("string#%d", i) + } + sum := 0 + m := make(map[string]int, size) + for i := 0; i < size; i++ { + m[strings[i]] = 0 + } + idx := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + sum += m[strings[idx]] + idx++ + if idx == size { + idx = 0 + } + } +} + +type chunk [17]byte + +func BenchmarkHashBytesSpeed(b *testing.B) { + // a bunch of chunks, each with a different alignment mod 16 + var chunks [size]chunk + // initialize each to a different value + for i := 0; i < size; i++ { + chunks[i][0] = byte(i) + } + // put into a map + m := make(map[chunk]int, size) + for i, c := range chunks { + m[c] = i + } + idx := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if m[chunks[idx]] != idx { + b.Error("bad map entry for chunk") + } + idx++ + if idx == size { + idx = 0 + } + } +} + +func BenchmarkHashInt32Speed(b *testing.B) { + ints := make([]int32, size) + for i := 0; i < size; i++ { + ints[i] = int32(i) + } + sum := 0 + m := make(map[int32]int, size) + for i := 0; i < size; i++ { + m[ints[i]] = 0 + } + idx := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + sum += m[ints[idx]] + idx++ + if idx == size { + idx = 0 + } + } +} + +func BenchmarkHashInt64Speed(b *testing.B) { + ints := make([]int64, size) + for i := 0; i < size; i++ { + ints[i] = int64(i) + } + sum := 0 + m := make(map[int64]int, size) + for i := 0; i < size; i++ { + m[ints[i]] = 0 + } + idx := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + sum += m[ints[idx]] + idx++ + if idx == size { + idx = 0 + } + } +} +func BenchmarkHashStringArraySpeed(b *testing.B) { + stringpairs := make([][2]string, size) + for i := 0; i < size; i++ { + for j := 0; j < 2; j++ { + stringpairs[i][j] = fmt.Sprintf("string#%d/%d", i, j) + } + } + sum := 0 + m := make(map[[2]string]int, size) + for i := 0; i < size; i++ { + m[stringpairs[i]] = 0 + } + idx := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + sum += m[stringpairs[idx]] + idx++ + if idx == size { + idx = 0 + } + } +} + +func BenchmarkMegMap(b *testing.B) { + m := make(map[string]bool) + for suffix := 'A'; suffix <= 'G'; suffix++ { + m[strings.Repeat("X", 1<<20-1)+fmt.Sprint(suffix)] = true + } + key := strings.Repeat("X", 1<<20-1) + "k" + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key] + } +} + +func BenchmarkMegOneMap(b *testing.B) { + m := make(map[string]bool) + m[strings.Repeat("X", 1<<20)] = true + key := strings.Repeat("Y", 1<<20) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key] + } +} + +func BenchmarkMegEqMap(b *testing.B) { + m := make(map[string]bool) + key1 := strings.Repeat("X", 1<<20) + key2 := strings.Repeat("X", 1<<20) // equal but different instance + m[key1] = true + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key2] + } +} + +func BenchmarkMegEmptyMap(b *testing.B) { + m := make(map[string]bool) + key := strings.Repeat("X", 1<<20) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key] + } +} + +func BenchmarkMegEmptyMapWithInterfaceKey(b *testing.B) { + m := make(map[any]bool) + key := strings.Repeat("X", 1<<20) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key] + } +} + +func BenchmarkSmallStrMap(b *testing.B) { + m := make(map[string]bool) + for suffix := 'A'; suffix <= 'G'; suffix++ { + m[fmt.Sprint(suffix)] = true + } + key := "k" + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[key] + } +} + +func BenchmarkMapStringKeysEight_16(b *testing.B) { benchmarkMapStringKeysEight(b, 16) } +func BenchmarkMapStringKeysEight_32(b *testing.B) { benchmarkMapStringKeysEight(b, 32) } +func BenchmarkMapStringKeysEight_64(b *testing.B) { benchmarkMapStringKeysEight(b, 64) } +func BenchmarkMapStringKeysEight_1M(b *testing.B) { benchmarkMapStringKeysEight(b, 1<<20) } + +func benchmarkMapStringKeysEight(b *testing.B, keySize int) { + m := make(map[string]bool) + for i := 0; i < 8; i++ { + m[strings.Repeat("K", i+1)] = true + } + key := strings.Repeat("K", keySize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[key] + } +} + +func BenchmarkIntMap(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < 8; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = m[7] + } +} + +func BenchmarkMapFirst(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[0] + } + }) + } +} +func BenchmarkMapMid(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[n>>1] + } + }) + } +} +func BenchmarkMapLast(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[n-1] + } + }) + } +} + +func BenchmarkMapCycle(b *testing.B) { + // Arrange map entries to be a permutation, so that + // we hit all entries, and one lookup is data dependent + // on the previous lookup. + const N = 3127 + p := rand.New(rand.NewSource(1)).Perm(N) + m := map[int]int{} + for i := 0; i < N; i++ { + m[i] = p[i] + } + b.ResetTimer() + j := 0 + for i := 0; i < b.N; i++ { + j = m[j] + } + sink = uint64(j) +} + +// Accessing the same keys in a row. +func benchmarkRepeatedLookup(b *testing.B, lookupKeySize int) { + m := make(map[string]bool) + // At least bigger than a single bucket: + for i := 0; i < 64; i++ { + m[fmt.Sprintf("some key %d", i)] = true + } + base := strings.Repeat("x", lookupKeySize-1) + key1 := base + "1" + key2 := base + "2" + b.ResetTimer() + for i := 0; i < b.N/4; i++ { + _ = m[key1] + _ = m[key1] + _ = m[key2] + _ = m[key2] + } +} + +func BenchmarkRepeatedLookupStrMapKey32(b *testing.B) { benchmarkRepeatedLookup(b, 32) } +func BenchmarkRepeatedLookupStrMapKey1M(b *testing.B) { benchmarkRepeatedLookup(b, 1<<20) } + +func BenchmarkMakeMap(b *testing.B) { + b.Run("[Byte]Byte", func(b *testing.B) { + var m map[byte]byte + for i := 0; i < b.N; i++ { + m = make(map[byte]byte, 10) + } + hugeSink = m + }) + b.Run("[Int]Int", func(b *testing.B) { + var m map[int]int + for i := 0; i < b.N; i++ { + m = make(map[int]int, 10) + } + hugeSink = m + }) +} + +func BenchmarkNewEmptyMap(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = make(map[int]int) + } +} + +func BenchmarkNewSmallMap(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + m := make(map[int]int) + m[0] = 0 + m[1] = 1 + } +} + +func BenchmarkMapIter(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < 8; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for range m { + } + } +} + +func BenchmarkMapIterEmpty(b *testing.B) { + m := make(map[int]bool) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for range m { + } + } +} + +func BenchmarkSameLengthMap(b *testing.B) { + // long strings, same length, differ in first few + // and last few bytes. + m := make(map[string]bool) + s1 := "foo" + strings.Repeat("-", 100) + "bar" + s2 := "goo" + strings.Repeat("-", 100) + "ber" + m[s1] = true + m[s2] = true + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[s1] + } +} + +type BigKey [3]int64 + +func BenchmarkBigKeyMap(b *testing.B) { + m := make(map[BigKey]bool) + k := BigKey{3, 4, 5} + m[k] = true + for i := 0; i < b.N; i++ { + _ = m[k] + } +} + +type BigVal [3]int64 + +func BenchmarkBigValMap(b *testing.B) { + m := make(map[BigKey]BigVal) + k := BigKey{3, 4, 5} + m[k] = BigVal{6, 7, 8} + for i := 0; i < b.N; i++ { + _ = m[k] + } +} + +func BenchmarkSmallKeyMap(b *testing.B) { + m := make(map[int16]bool) + m[5] = true + for i := 0; i < b.N; i++ { + _ = m[5] + } +} + +func BenchmarkMapPopulate(b *testing.B) { + for size := 1; size < 1000000; size *= 10 { + b.Run(strconv.Itoa(size), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + m := make(map[int]bool) + for j := 0; j < size; j++ { + m[j] = true + } + } + }) + } +} + +type ComplexAlgKey struct { + a, b, c int64 + _ int + d int32 + _ int + e string + _ int + f, g, h int64 +} + +func BenchmarkComplexAlgMap(b *testing.B) { + m := make(map[ComplexAlgKey]bool) + var k ComplexAlgKey + m[k] = true + for i := 0; i < b.N; i++ { + _ = m[k] + } +} + +func BenchmarkGoMapClear(b *testing.B) { + b.Run("Reflexive", func(b *testing.B) { + for size := 1; size < 100000; size *= 10 { + b.Run(strconv.Itoa(size), func(b *testing.B) { + m := make(map[int]int, size) + for i := 0; i < b.N; i++ { + m[0] = size // Add one element so len(m) != 0 avoiding fast paths. + clear(m) + } + }) + } + }) + b.Run("NonReflexive", func(b *testing.B) { + for size := 1; size < 100000; size *= 10 { + b.Run(strconv.Itoa(size), func(b *testing.B) { + m := make(map[float64]int, size) + for i := 0; i < b.N; i++ { + m[1.0] = size // Add one element so len(m) != 0 avoiding fast paths. + clear(m) + } + }) + } + }) +} + +func BenchmarkMapStringConversion(b *testing.B) { + for _, length := range []int{32, 64} { + b.Run(strconv.Itoa(length), func(b *testing.B) { + bytes := make([]byte, length) + b.Run("simple", func(b *testing.B) { + b.ReportAllocs() + m := make(map[string]int) + m[string(bytes)] = 0 + for i := 0; i < b.N; i++ { + _ = m[string(bytes)] + } + }) + b.Run("struct", func(b *testing.B) { + b.ReportAllocs() + type stringstruct struct{ s string } + m := make(map[stringstruct]int) + m[stringstruct{string(bytes)}] = 0 + for i := 0; i < b.N; i++ { + _ = m[stringstruct{string(bytes)}] + } + }) + b.Run("array", func(b *testing.B) { + b.ReportAllocs() + type stringarray [1]string + m := make(map[stringarray]int) + m[stringarray{string(bytes)}] = 0 + for i := 0; i < b.N; i++ { + _ = m[stringarray{string(bytes)}] + } + }) + }) + } +} + +var BoolSink bool + +func BenchmarkMapInterfaceString(b *testing.B) { + m := map[any]bool{} + + for i := 0; i < 100; i++ { + m[fmt.Sprintf("%d", i)] = true + } + + key := (any)("A") + b.ResetTimer() + for i := 0; i < b.N; i++ { + BoolSink = m[key] + } +} +func BenchmarkMapInterfacePtr(b *testing.B) { + m := map[any]bool{} + + for i := 0; i < 100; i++ { + i := i + m[&i] = true + } + + key := new(int) + b.ResetTimer() + for i := 0; i < b.N; i++ { + BoolSink = m[key] + } +} + +var ( + hintLessThan8 = 7 + hintGreaterThan8 = 32 +) + +func BenchmarkNewEmptyMapHintLessThan8(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = make(map[int]int, hintLessThan8) + } +} + +func BenchmarkNewEmptyMapHintGreaterThan8(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = make(map[int]int, hintGreaterThan8) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map_fast32.go b/platform/dbops/binaries/go/go/src/runtime/map_fast32.go new file mode 100644 index 0000000000000000000000000000000000000000..e1dd495365b39c5369268ae6d21993ee3e106465 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map_fast32.go @@ -0,0 +1,462 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)) + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + inserti = i + insertb = b + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + inserti = i + insertb = b + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) + // store new key at insert position + *(*uint32)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + inserti = i + insertb = b + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + inserti = i + insertb = b + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) + // store new key at insert position + *(*unsafe.Pointer)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapdelete_fast32(t *maptype, h *hmap, key uint32) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32)) + } + if h == nil || h.count == 0 { + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapdelete + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + bOrig := b +search: + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { + continue + } + // Only clear key if there are pointers in it. + // This can only happen if pointers are 32 bit + // wide as 64 bit pointers do not fit into a 32 bit key. + if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 { + // The key must be a pointer as we checked pointers are + // 32 bits wide and the key is 32 bits wide also. + *(*unsafe.Pointer)(k) = nil + } + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)) + if t.Elem.PtrBytes != 0 { + memclrHasPointers(e, t.Elem.Size_) + } else { + memclrNoHeapPointers(e, t.Elem.Size_) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = uint32(rand()) + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate_fast32(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate_fast32(t, h, h.nevacuate) + } +} + +func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*4) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*4) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*4) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.Hasher(k, uintptr(h.hash0)) + if hash&newbit != 0 { + useY = 1 + } + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*4) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + + // Copy key. + if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled { + // Write with a write barrier. + *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) + } else { + *(*uint32)(dst.k) = *(*uint32)(k) + } + + typedmemmove(t.Elem, dst.e, e) + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, 4) + dst.e = add(dst.e, uintptr(t.ValueSize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.BucketSize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map_fast64.go b/platform/dbops/binaries/go/go/src/runtime/map_fast64.go new file mode 100644 index 0000000000000000000000000000000000000000..7ca35ec6cb137da301a7347342baf17dca6c69f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map_fast64.go @@ -0,0 +1,470 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)) + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast64(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + insertb = b + inserti = i + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + insertb = b + inserti = i + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) + // store new key at insert position + *(*uint64)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast64(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + insertb = b + inserti = i + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + insertb = b + inserti = i + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) + // store new key at insert position + *(*unsafe.Pointer)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapdelete_fast64(t *maptype, h *hmap, key uint64) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64)) + } + if h == nil || h.count == 0 { + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapdelete + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast64(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + bOrig := b +search: + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { + continue + } + // Only clear key if there are pointers in it. + if t.Key.PtrBytes != 0 { + if goarch.PtrSize == 8 { + *(*unsafe.Pointer)(k) = nil + } else { + // There are three ways to squeeze at one or more 32 bit pointers into 64 bits. + // Just call memclrHasPointers instead of trying to handle all cases here. + memclrHasPointers(k, 8) + } + } + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)) + if t.Elem.PtrBytes != 0 { + memclrHasPointers(e, t.Elem.Size_) + } else { + memclrNoHeapPointers(e, t.Elem.Size_) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = uint32(rand()) + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate_fast64(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate_fast64(t, h, h.nevacuate) + } +} + +func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*8) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*8) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*8) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.Hasher(k, uintptr(h.hash0)) + if hash&newbit != 0 { + useY = 1 + } + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*8) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + + // Copy key. + if t.Key.PtrBytes != 0 && writeBarrier.enabled { + if goarch.PtrSize == 8 { + // Write with a write barrier. + *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) + } else { + // There are three ways to squeeze at least one 32 bit pointer into 64 bits. + // Give up and call typedmemmove. + typedmemmove(t.Key, dst.k, k) + } + } else { + *(*uint64)(dst.k) = *(*uint64)(k) + } + + typedmemmove(t.Elem, dst.e, e) + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, 8) + dst.e = add(dst.e, uintptr(t.ValueSize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.BucketSize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map_faststr.go b/platform/dbops/binaries/go/go/src/runtime/map_faststr.go new file mode 100644 index 0000000000000000000000000000000000000000..22e1f61f066054b05dde3468d9d220f2cb75ff7f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map_faststr.go @@ -0,0 +1,485 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + key := stringStructOf(&ky) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + } + } + return unsafe.Pointer(&zeroVal[0]) + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) + if memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) + } + } + return unsafe.Pointer(&zeroVal[0]) + } +dohash: + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + key := stringStructOf(&ky) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + } + } + return unsafe.Pointer(&zeroVal[0]), false + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) + if memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true + } + } + return unsafe.Pointer(&zeroVal[0]), false + } +dohash: + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + key := stringStructOf(&s) + hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_faststr(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + top := tophash(hash) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if isEmpty(b.tophash[i]) && insertb == nil { + insertb = b + inserti = i + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)) + if k.len != key.len { + continue + } + if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { + continue + } + // already have a mapping for key. Update it. + inserti = i + insertb = b + // Overwrite existing key, so it can be garbage collected. + // The size is already guaranteed to be set correctly. + k.str = key.str + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize) + // store new key at insert position + *((*stringStruct)(insertk)) = *key + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapdelete_faststr(t *maptype, h *hmap, ky string) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr)) + } + if h == nil || h.count == 0 { + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + key := stringStructOf(&ky) + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapdelete + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_faststr(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) + bOrig := b + top := tophash(hash) +search: + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { + continue + } + // Clear key's pointer. + k.str = nil + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + if t.Elem.PtrBytes != 0 { + memclrHasPointers(e, t.Elem.Size_) + } else { + memclrNoHeapPointers(e, t.Elem.Size_) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = uint32(rand()) + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate_faststr(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate_faststr(t, h, h.nevacuate) + } +} + +func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*2*goarch.PtrSize) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*2*goarch.PtrSize) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*2*goarch.PtrSize) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.Hasher(k, uintptr(h.hash0)) + if hash&newbit != 0 { + useY = 1 + } + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + + // Copy key. + *(*string)(dst.k) = *(*string)(k) + + typedmemmove(t.Elem, dst.e, e) + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, 2*goarch.PtrSize) + dst.e = add(dst.e, uintptr(t.ValueSize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.BucketSize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/map_test.go b/platform/dbops/binaries/go/go/src/runtime/map_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c29fb933ee49be2961bfea73a10758b7facce8bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/map_test.go @@ -0,0 +1,1546 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/abi" + "internal/goarch" + "internal/testenv" + "math" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "testing" + "unsafe" +) + +func TestHmapSize(t *testing.T) { + // The structure of hmap is defined in runtime/map.go + // and in cmd/compile/internal/gc/reflect.go and must be in sync. + // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms. + var hmapSize = uintptr(8 + 5*goarch.PtrSize) + if runtime.RuntimeHmapSize != hmapSize { + t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize) + } + +} + +// negative zero is a good test because: +// 1. 0 and -0 are equal, yet have distinct representations. +// 2. 0 is represented as all zeros, -0 isn't. +// +// I'm not sure the language spec actually requires this behavior, +// but it's what the current map implementation does. +func TestNegativeZero(t *testing.T) { + m := make(map[float64]bool, 0) + + m[+0.0] = true + m[math.Copysign(0.0, -1.0)] = true // should overwrite +0 entry + + if len(m) != 1 { + t.Error("length wrong") + } + + for k := range m { + if math.Copysign(1.0, k) > 0 { + t.Error("wrong sign") + } + } + + m = make(map[float64]bool, 0) + m[math.Copysign(0.0, -1.0)] = true + m[+0.0] = true // should overwrite -0.0 entry + + if len(m) != 1 { + t.Error("length wrong") + } + + for k := range m { + if math.Copysign(1.0, k) < 0 { + t.Error("wrong sign") + } + } +} + +func testMapNan(t *testing.T, m map[float64]int) { + if len(m) != 3 { + t.Error("length wrong") + } + s := 0 + for k, v := range m { + if k == k { + t.Error("nan disappeared") + } + if (v & (v - 1)) != 0 { + t.Error("value wrong") + } + s |= v + } + if s != 7 { + t.Error("values wrong") + } +} + +// nan is a good test because nan != nan, and nan has +// a randomized hash value. +func TestMapAssignmentNan(t *testing.T) { + m := make(map[float64]int, 0) + nan := math.NaN() + + // Test assignment. + m[nan] = 1 + m[nan] = 2 + m[nan] = 4 + testMapNan(t, m) +} + +// nan is a good test because nan != nan, and nan has +// a randomized hash value. +func TestMapOperatorAssignmentNan(t *testing.T) { + m := make(map[float64]int, 0) + nan := math.NaN() + + // Test assignment operations. + m[nan] += 1 + m[nan] += 2 + m[nan] += 4 + testMapNan(t, m) +} + +func TestMapOperatorAssignment(t *testing.T) { + m := make(map[int]int, 0) + + // "m[k] op= x" is rewritten into "m[k] = m[k] op x" + // differently when op is / or % than when it isn't. + // Simple test to make sure they all work as expected. + m[0] = 12345 + m[0] += 67890 + m[0] /= 123 + m[0] %= 456 + + const want = (12345 + 67890) / 123 % 456 + if got := m[0]; got != want { + t.Errorf("got %d, want %d", got, want) + } +} + +var sinkAppend bool + +func TestMapAppendAssignment(t *testing.T) { + m := make(map[int][]int, 0) + + m[0] = nil + m[0] = append(m[0], 12345) + m[0] = append(m[0], 67890) + sinkAppend, m[0] = !sinkAppend, append(m[0], 123, 456) + a := []int{7, 8, 9, 0} + m[0] = append(m[0], a...) + + want := []int{12345, 67890, 123, 456, 7, 8, 9, 0} + if got := m[0]; !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +// Maps aren't actually copied on assignment. +func TestAlias(t *testing.T) { + m := make(map[int]int, 0) + m[0] = 5 + n := m + n[0] = 6 + if m[0] != 6 { + t.Error("alias didn't work") + } +} + +func TestGrowWithNaN(t *testing.T) { + m := make(map[float64]int, 4) + nan := math.NaN() + + // Use both assignment and assignment operations as they may + // behave differently. + m[nan] = 1 + m[nan] = 2 + m[nan] += 4 + + cnt := 0 + s := 0 + growflag := true + for k, v := range m { + if growflag { + // force a hashtable resize + for i := 0; i < 50; i++ { + m[float64(i)] = i + } + for i := 50; i < 100; i++ { + m[float64(i)] += i + } + growflag = false + } + if k != k { + cnt++ + s |= v + } + } + if cnt != 3 { + t.Error("NaN keys lost during grow") + } + if s != 7 { + t.Error("NaN values lost during grow") + } +} + +type FloatInt struct { + x float64 + y int +} + +func TestGrowWithNegativeZero(t *testing.T) { + negzero := math.Copysign(0.0, -1.0) + m := make(map[FloatInt]int, 4) + m[FloatInt{0.0, 0}] = 1 + m[FloatInt{0.0, 1}] += 2 + m[FloatInt{0.0, 2}] += 4 + m[FloatInt{0.0, 3}] = 8 + growflag := true + s := 0 + cnt := 0 + negcnt := 0 + // The first iteration should return the +0 key. + // The subsequent iterations should return the -0 key. + // I'm not really sure this is required by the spec, + // but it makes sense. + // TODO: are we allowed to get the first entry returned again??? + for k, v := range m { + if v == 0 { + continue + } // ignore entries added to grow table + cnt++ + if math.Copysign(1.0, k.x) < 0 { + if v&16 == 0 { + t.Error("key/value not updated together 1") + } + negcnt++ + s |= v & 15 + } else { + if v&16 == 16 { + t.Error("key/value not updated together 2", k, v) + } + s |= v + } + if growflag { + // force a hashtable resize + for i := 0; i < 100; i++ { + m[FloatInt{3.0, i}] = 0 + } + // then change all the entries + // to negative zero + m[FloatInt{negzero, 0}] = 1 | 16 + m[FloatInt{negzero, 1}] = 2 | 16 + m[FloatInt{negzero, 2}] = 4 | 16 + m[FloatInt{negzero, 3}] = 8 | 16 + growflag = false + } + } + if s != 15 { + t.Error("entry missing", s) + } + if cnt != 4 { + t.Error("wrong number of entries returned by iterator", cnt) + } + if negcnt != 3 { + t.Error("update to negzero missed by iteration", negcnt) + } +} + +func TestIterGrowAndDelete(t *testing.T) { + m := make(map[int]int, 4) + for i := 0; i < 100; i++ { + m[i] = i + } + growflag := true + for k := range m { + if growflag { + // grow the table + for i := 100; i < 1000; i++ { + m[i] = i + } + // delete all odd keys + for i := 1; i < 1000; i += 2 { + delete(m, i) + } + growflag = false + } else { + if k&1 == 1 { + t.Error("odd value returned") + } + } + } +} + +// make sure old bucket arrays don't get GCd while +// an iterator is still using them. +func TestIterGrowWithGC(t *testing.T) { + m := make(map[int]int, 4) + for i := 0; i < 8; i++ { + m[i] = i + } + for i := 8; i < 16; i++ { + m[i] += i + } + growflag := true + bitmask := 0 + for k := range m { + if k < 16 { + bitmask |= 1 << uint(k) + } + if growflag { + // grow the table + for i := 100; i < 1000; i++ { + m[i] = i + } + // trigger a gc + runtime.GC() + growflag = false + } + } + if bitmask != 1<<16-1 { + t.Error("missing key", bitmask) + } +} + +func testConcurrentReadsAfterGrowth(t *testing.T, useReflect bool) { + t.Parallel() + if runtime.GOMAXPROCS(-1) == 1 { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16)) + } + numLoop := 10 + numGrowStep := 250 + numReader := 16 + if testing.Short() { + numLoop, numGrowStep = 2, 100 + } + for i := 0; i < numLoop; i++ { + m := make(map[int]int, 0) + for gs := 0; gs < numGrowStep; gs++ { + m[gs] = gs + var wg sync.WaitGroup + wg.Add(numReader * 2) + for nr := 0; nr < numReader; nr++ { + go func() { + defer wg.Done() + for range m { + } + }() + go func() { + defer wg.Done() + for key := 0; key < gs; key++ { + _ = m[key] + } + }() + if useReflect { + wg.Add(1) + go func() { + defer wg.Done() + mv := reflect.ValueOf(m) + keys := mv.MapKeys() + for _, k := range keys { + mv.MapIndex(k) + } + }() + } + } + wg.Wait() + } + } +} + +func TestConcurrentReadsAfterGrowth(t *testing.T) { + testConcurrentReadsAfterGrowth(t, false) +} + +func TestConcurrentReadsAfterGrowthReflect(t *testing.T) { + testConcurrentReadsAfterGrowth(t, true) +} + +func TestBigItems(t *testing.T) { + var key [256]string + for i := 0; i < 256; i++ { + key[i] = "foo" + } + m := make(map[[256]string][256]string, 4) + for i := 0; i < 100; i++ { + key[37] = fmt.Sprintf("string%02d", i) + m[key] = key + } + var keys [100]string + var values [100]string + i := 0 + for k, v := range m { + keys[i] = k[37] + values[i] = v[37] + i++ + } + sort.Strings(keys[:]) + sort.Strings(values[:]) + for i := 0; i < 100; i++ { + if keys[i] != fmt.Sprintf("string%02d", i) { + t.Errorf("#%d: missing key: %v", i, keys[i]) + } + if values[i] != fmt.Sprintf("string%02d", i) { + t.Errorf("#%d: missing value: %v", i, values[i]) + } + } +} + +func TestMapHugeZero(t *testing.T) { + type T [4000]byte + m := map[int]T{} + x := m[0] + if x != (T{}) { + t.Errorf("map value not zero") + } + y, ok := m[0] + if ok { + t.Errorf("map value should be missing") + } + if y != (T{}) { + t.Errorf("map value not zero") + } +} + +type empty struct { +} + +func TestEmptyKeyAndValue(t *testing.T) { + a := make(map[int]empty, 4) + b := make(map[empty]int, 4) + c := make(map[empty]empty, 4) + a[0] = empty{} + b[empty{}] = 0 + b[empty{}] = 1 + c[empty{}] = empty{} + + if len(a) != 1 { + t.Errorf("empty value insert problem") + } + if b[empty{}] != 1 { + t.Errorf("empty key returned wrong value") + } +} + +// Tests a map with a single bucket, with same-lengthed short keys +// ("quick keys") as well as long keys. +func TestSingleBucketMapStringKeys_DupLen(t *testing.T) { + testMapLookups(t, map[string]string{ + "x": "x1val", + "xx": "x2val", + "foo": "fooval", + "bar": "barval", // same key length as "foo" + "xxxx": "x4val", + strings.Repeat("x", 128): "longval1", + strings.Repeat("y", 128): "longval2", + }) +} + +// Tests a map with a single bucket, with all keys having different lengths. +func TestSingleBucketMapStringKeys_NoDupLen(t *testing.T) { + testMapLookups(t, map[string]string{ + "x": "x1val", + "xx": "x2val", + "foo": "fooval", + "xxxx": "x4val", + "xxxxx": "x5val", + "xxxxxx": "x6val", + strings.Repeat("x", 128): "longval", + }) +} + +func testMapLookups(t *testing.T, m map[string]string) { + for k, v := range m { + if m[k] != v { + t.Fatalf("m[%q] = %q; want %q", k, m[k], v) + } + } +} + +// Tests whether the iterator returns the right elements when +// started in the middle of a grow, when the keys are NaNs. +func TestMapNanGrowIterator(t *testing.T) { + m := make(map[float64]int) + nan := math.NaN() + const nBuckets = 16 + // To fill nBuckets buckets takes LOAD * nBuckets keys. + nKeys := int(nBuckets * runtime.HashLoad) + + // Get map to full point with nan keys. + for i := 0; i < nKeys; i++ { + m[nan] = i + } + // Trigger grow + m[1.0] = 1 + delete(m, 1.0) + + // Run iterator + found := make(map[int]struct{}) + for _, v := range m { + if v != -1 { + if _, repeat := found[v]; repeat { + t.Fatalf("repeat of value %d", v) + } + found[v] = struct{}{} + } + if len(found) == nKeys/2 { + // Halfway through iteration, finish grow. + for i := 0; i < nBuckets; i++ { + delete(m, 1.0) + } + } + } + if len(found) != nKeys { + t.Fatalf("missing value") + } +} + +func TestMapIterOrder(t *testing.T) { + sizes := []int{3, 7, 9, 15} + if abi.MapBucketCountBits >= 5 { + // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5. + t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.MapBucketCountBits) + } + for _, n := range sizes { + for i := 0; i < 1000; i++ { + // Make m be {0: true, 1: true, ..., n-1: true}. + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + // Check that iterating over the map produces at least two different orderings. + ord := func() []int { + var s []int + for key := range m { + s = append(s, key) + } + return s + } + first := ord() + ok := false + for try := 0; try < 100; try++ { + if !reflect.DeepEqual(first, ord()) { + ok = true + break + } + } + if !ok { + t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + break + } + } + } +} + +// Issue 8410 +func TestMapSparseIterOrder(t *testing.T) { + // Run several rounds to increase the probability + // of failure. One is not enough. +NextRound: + for round := 0; round < 10; round++ { + m := make(map[int]bool) + // Add 1000 items, remove 980. + for i := 0; i < 1000; i++ { + m[i] = true + } + for i := 20; i < 1000; i++ { + delete(m, i) + } + + var first []int + for i := range m { + first = append(first, i) + } + + // 800 chances to get a different iteration order. + // See bug 8736 for why we need so many tries. + for n := 0; n < 800; n++ { + idx := 0 + for i := range m { + if i != first[idx] { + // iteration order changed. + continue NextRound + } + idx++ + } + } + t.Fatalf("constant iteration order on round %d: %v", round, first) + } +} + +func TestMapStringBytesLookup(t *testing.T) { + // Use large string keys to avoid small-allocation coalescing, + // which can cause AllocsPerRun to report lower counts than it should. + m := map[string]int{ + "1000000000000000000000000000000000000000000000000": 1, + "2000000000000000000000000000000000000000000000000": 2, + } + buf := []byte("1000000000000000000000000000000000000000000000000") + if x := m[string(buf)]; x != 1 { + t.Errorf(`m[string([]byte("1"))] = %d, want 1`, x) + } + buf[0] = '2' + if x := m[string(buf)]; x != 2 { + t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x) + } + + var x int + n := testing.AllocsPerRun(100, func() { + x += m[string(buf)] + }) + if n != 0 { + t.Errorf("AllocsPerRun for m[string(buf)] = %v, want 0", n) + } + + x = 0 + n = testing.AllocsPerRun(100, func() { + y, ok := m[string(buf)] + if !ok { + panic("!ok") + } + x += y + }) + if n != 0 { + t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n) + } +} + +func TestMapLargeKeyNoPointer(t *testing.T) { + const ( + I = 1000 + N = 64 + ) + type T [N]int + m := make(map[T]int) + for i := 0; i < I; i++ { + var v T + for j := 0; j < N; j++ { + v[j] = i + j + } + m[v] = i + } + runtime.GC() + for i := 0; i < I; i++ { + var v T + for j := 0; j < N; j++ { + v[j] = i + j + } + if m[v] != i { + t.Fatalf("corrupted map: want %+v, got %+v", i, m[v]) + } + } +} + +func TestMapLargeValNoPointer(t *testing.T) { + const ( + I = 1000 + N = 64 + ) + type T [N]int + m := make(map[int]T) + for i := 0; i < I; i++ { + var v T + for j := 0; j < N; j++ { + v[j] = i + j + } + m[i] = v + } + runtime.GC() + for i := 0; i < I; i++ { + var v T + for j := 0; j < N; j++ { + v[j] = i + j + } + v1 := m[i] + for j := 0; j < N; j++ { + if v1[j] != v[j] { + t.Fatalf("corrupted map: want %+v, got %+v", v, v1) + } + } + } +} + +// Test that making a map with a large or invalid hint +// doesn't panic. (Issue 19926). +func TestIgnoreBogusMapHint(t *testing.T) { + for _, hint := range []int64{-1, 1 << 62} { + _ = make(map[int]int, hint) + } +} + +const bs = abi.MapBucketCount + +// belowOverflow should be a pretty-full pair of buckets; +// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets +// that are 13/16 full each, which is the overflow boundary. +// Adding one to that should ensure overflow to the next higher size. +const ( + belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75% + atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill. +) + +var mapBucketTests = [...]struct { + n int // n is the number of map elements + noescape int // number of expected buckets for non-escaping map + escape int // number of expected buckets for escaping map +}{ + {-(1 << 30), 1, 1}, + {-1, 1, 1}, + {0, 1, 1}, + {1, 1, 1}, + {bs, 1, 1}, + {bs + 1, 2, 2}, + {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75% + {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4 + + {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75% + {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8 + + {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75% + {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16 +} + +func TestMapBuckets(t *testing.T) { + // Test that maps of different sizes have the right number of buckets. + // Non-escaping maps with small buckets (like map[int]int) never + // have a nil bucket pointer due to starting with preallocated buckets + // on the stack. Escaping maps start with a non-nil bucket pointer if + // hint size is above bucketCnt and thereby have more than one bucket. + // These tests depend on bucketCnt and loadFactor* in map.go. + t.Run("mapliteral", func(t *testing.T) { + for _, tt := range mapBucketTests { + localMap := map[int]int{} + if runtime.MapBucketsPointerIsNil(localMap) { + t.Errorf("no escape: buckets pointer is nil for non-escaping map") + } + for i := 0; i < tt.n; i++ { + localMap[i] = i + } + if got := runtime.MapBucketsCount(localMap); got != tt.noescape { + t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) + } + escapingMap := runtime.Escape(map[int]int{}) + if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { + t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) + } + for i := 0; i < tt.n; i++ { + escapingMap[i] = i + } + if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { + t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got) + } + } + }) + t.Run("nohint", func(t *testing.T) { + for _, tt := range mapBucketTests { + localMap := make(map[int]int) + if runtime.MapBucketsPointerIsNil(localMap) { + t.Errorf("no escape: buckets pointer is nil for non-escaping map") + } + for i := 0; i < tt.n; i++ { + localMap[i] = i + } + if got := runtime.MapBucketsCount(localMap); got != tt.noescape { + t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) + } + escapingMap := runtime.Escape(make(map[int]int)) + if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { + t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) + } + for i := 0; i < tt.n; i++ { + escapingMap[i] = i + } + if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { + t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) + } + } + }) + t.Run("makemap", func(t *testing.T) { + for _, tt := range mapBucketTests { + localMap := make(map[int]int, tt.n) + if runtime.MapBucketsPointerIsNil(localMap) { + t.Errorf("no escape: buckets pointer is nil for non-escaping map") + } + for i := 0; i < tt.n; i++ { + localMap[i] = i + } + if got := runtime.MapBucketsCount(localMap); got != tt.noescape { + t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) + } + escapingMap := runtime.Escape(make(map[int]int, tt.n)) + if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { + t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) + } + for i := 0; i < tt.n; i++ { + escapingMap[i] = i + } + if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { + t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) + } + } + }) + t.Run("makemap64", func(t *testing.T) { + for _, tt := range mapBucketTests { + localMap := make(map[int]int, int64(tt.n)) + if runtime.MapBucketsPointerIsNil(localMap) { + t.Errorf("no escape: buckets pointer is nil for non-escaping map") + } + for i := 0; i < tt.n; i++ { + localMap[i] = i + } + if got := runtime.MapBucketsCount(localMap); got != tt.noescape { + t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) + } + escapingMap := runtime.Escape(make(map[int]int, tt.n)) + if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { + t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) + } + for i := 0; i < tt.n; i++ { + escapingMap[i] = i + } + if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { + t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) + } + } + }) + +} + +func benchmarkMapPop(b *testing.B, n int) { + m := map[int]int{} + for i := 0; i < b.N; i++ { + for j := 0; j < n; j++ { + m[j] = j + } + for j := 0; j < n; j++ { + // Use iterator to pop an element. + // We want this to be fast, see issue 8412. + for k := range m { + delete(m, k) + break + } + } + } +} + +func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) } +func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) } +func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) } + +var testNonEscapingMapVariable int = 8 + +func TestNonEscapingMap(t *testing.T) { + n := testing.AllocsPerRun(1000, func() { + m := map[int]int{} + m[0] = 0 + }) + if n != 0 { + t.Fatalf("mapliteral: want 0 allocs, got %v", n) + } + n = testing.AllocsPerRun(1000, func() { + m := make(map[int]int) + m[0] = 0 + }) + if n != 0 { + t.Fatalf("no hint: want 0 allocs, got %v", n) + } + n = testing.AllocsPerRun(1000, func() { + m := make(map[int]int, 8) + m[0] = 0 + }) + if n != 0 { + t.Fatalf("with small hint: want 0 allocs, got %v", n) + } + n = testing.AllocsPerRun(1000, func() { + m := make(map[int]int, testNonEscapingMapVariable) + m[0] = 0 + }) + if n != 0 { + t.Fatalf("with variable hint: want 0 allocs, got %v", n) + } + +} + +func benchmarkMapAssignInt32(b *testing.B, n int) { + a := make(map[int32]int) + for i := 0; i < b.N; i++ { + a[int32(i&(n-1))] = i + } +} + +func benchmarkMapOperatorAssignInt32(b *testing.B, n int) { + a := make(map[int32]int) + for i := 0; i < b.N; i++ { + a[int32(i&(n-1))] += i + } +} + +func benchmarkMapAppendAssignInt32(b *testing.B, n int) { + a := make(map[int32][]int) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + key := int32(i & (n - 1)) + a[key] = append(a[key], i) + } +} + +func benchmarkMapDeleteInt32(b *testing.B, n int) { + a := make(map[int32]int, n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if len(a) == 0 { + b.StopTimer() + for j := i; j < i+n; j++ { + a[int32(j)] = j + } + b.StartTimer() + } + delete(a, int32(i)) + } +} + +func benchmarkMapAssignInt64(b *testing.B, n int) { + a := make(map[int64]int) + for i := 0; i < b.N; i++ { + a[int64(i&(n-1))] = i + } +} + +func benchmarkMapOperatorAssignInt64(b *testing.B, n int) { + a := make(map[int64]int) + for i := 0; i < b.N; i++ { + a[int64(i&(n-1))] += i + } +} + +func benchmarkMapAppendAssignInt64(b *testing.B, n int) { + a := make(map[int64][]int) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + key := int64(i & (n - 1)) + a[key] = append(a[key], i) + } +} + +func benchmarkMapDeleteInt64(b *testing.B, n int) { + a := make(map[int64]int, n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if len(a) == 0 { + b.StopTimer() + for j := i; j < i+n; j++ { + a[int64(j)] = j + } + b.StartTimer() + } + delete(a, int64(i)) + } +} + +func benchmarkMapAssignStr(b *testing.B, n int) { + k := make([]string, n) + for i := 0; i < len(k); i++ { + k[i] = strconv.Itoa(i) + } + b.ResetTimer() + a := make(map[string]int) + for i := 0; i < b.N; i++ { + a[k[i&(n-1)]] = i + } +} + +func benchmarkMapOperatorAssignStr(b *testing.B, n int) { + k := make([]string, n) + for i := 0; i < len(k); i++ { + k[i] = strconv.Itoa(i) + } + b.ResetTimer() + a := make(map[string]string) + for i := 0; i < b.N; i++ { + key := k[i&(n-1)] + a[key] += key + } +} + +func benchmarkMapAppendAssignStr(b *testing.B, n int) { + k := make([]string, n) + for i := 0; i < len(k); i++ { + k[i] = strconv.Itoa(i) + } + a := make(map[string][]string) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + key := k[i&(n-1)] + a[key] = append(a[key], key) + } +} + +func benchmarkMapDeleteStr(b *testing.B, n int) { + i2s := make([]string, n) + for i := 0; i < n; i++ { + i2s[i] = strconv.Itoa(i) + } + a := make(map[string]int, n) + b.ResetTimer() + k := 0 + for i := 0; i < b.N; i++ { + if len(a) == 0 { + b.StopTimer() + for j := 0; j < n; j++ { + a[i2s[j]] = j + } + k = i + b.StartTimer() + } + delete(a, i2s[i-k]) + } +} + +func benchmarkMapDeletePointer(b *testing.B, n int) { + i2p := make([]*int, n) + for i := 0; i < n; i++ { + i2p[i] = new(int) + } + a := make(map[*int]int, n) + b.ResetTimer() + k := 0 + for i := 0; i < b.N; i++ { + if len(a) == 0 { + b.StopTimer() + for j := 0; j < n; j++ { + a[i2p[j]] = j + } + k = i + b.StartTimer() + } + delete(a, i2p[i-k]) + } +} + +func runWith(f func(*testing.B, int), v ...int) func(*testing.B) { + return func(b *testing.B) { + for _, n := range v { + b.Run(strconv.Itoa(n), func(b *testing.B) { f(b, n) }) + } + } +} + +func BenchmarkMapAssign(b *testing.B) { + b.Run("Int32", runWith(benchmarkMapAssignInt32, 1<<8, 1<<16)) + b.Run("Int64", runWith(benchmarkMapAssignInt64, 1<<8, 1<<16)) + b.Run("Str", runWith(benchmarkMapAssignStr, 1<<8, 1<<16)) +} + +func BenchmarkMapOperatorAssign(b *testing.B) { + b.Run("Int32", runWith(benchmarkMapOperatorAssignInt32, 1<<8, 1<<16)) + b.Run("Int64", runWith(benchmarkMapOperatorAssignInt64, 1<<8, 1<<16)) + b.Run("Str", runWith(benchmarkMapOperatorAssignStr, 1<<8, 1<<16)) +} + +func BenchmarkMapAppendAssign(b *testing.B) { + b.Run("Int32", runWith(benchmarkMapAppendAssignInt32, 1<<8, 1<<16)) + b.Run("Int64", runWith(benchmarkMapAppendAssignInt64, 1<<8, 1<<16)) + b.Run("Str", runWith(benchmarkMapAppendAssignStr, 1<<8, 1<<16)) +} + +func BenchmarkMapDelete(b *testing.B) { + b.Run("Int32", runWith(benchmarkMapDeleteInt32, 100, 1000, 10000)) + b.Run("Int64", runWith(benchmarkMapDeleteInt64, 100, 1000, 10000)) + b.Run("Str", runWith(benchmarkMapDeleteStr, 100, 1000, 10000)) + b.Run("Pointer", runWith(benchmarkMapDeletePointer, 100, 1000, 10000)) +} + +func TestDeferDeleteSlow(t *testing.T) { + ks := []complex128{0, 1, 2, 3} + + m := make(map[any]int) + for i, k := range ks { + m[k] = i + } + if len(m) != len(ks) { + t.Errorf("want %d elements, got %d", len(ks), len(m)) + } + + func() { + for _, k := range ks { + defer delete(m, k) + } + }() + if len(m) != 0 { + t.Errorf("want 0 elements, got %d", len(m)) + } +} + +// TestIncrementAfterDeleteValueInt and other test Issue 25936. +// Value types int, int32, int64 are affected. Value type string +// works as expected. +func TestIncrementAfterDeleteValueInt(t *testing.T) { + const key1 = 12 + const key2 = 13 + + m := make(map[int]int) + m[key1] = 99 + delete(m, key1) + m[key2]++ + if n2 := m[key2]; n2 != 1 { + t.Errorf("incremented 0 to %d", n2) + } +} + +func TestIncrementAfterDeleteValueInt32(t *testing.T) { + const key1 = 12 + const key2 = 13 + + m := make(map[int]int32) + m[key1] = 99 + delete(m, key1) + m[key2]++ + if n2 := m[key2]; n2 != 1 { + t.Errorf("incremented 0 to %d", n2) + } +} + +func TestIncrementAfterDeleteValueInt64(t *testing.T) { + const key1 = 12 + const key2 = 13 + + m := make(map[int]int64) + m[key1] = 99 + delete(m, key1) + m[key2]++ + if n2 := m[key2]; n2 != 1 { + t.Errorf("incremented 0 to %d", n2) + } +} + +func TestIncrementAfterDeleteKeyStringValueInt(t *testing.T) { + const key1 = "" + const key2 = "x" + + m := make(map[string]int) + m[key1] = 99 + delete(m, key1) + m[key2] += 1 + if n2 := m[key2]; n2 != 1 { + t.Errorf("incremented 0 to %d", n2) + } +} + +func TestIncrementAfterDeleteKeyValueString(t *testing.T) { + const key1 = "" + const key2 = "x" + + m := make(map[string]string) + m[key1] = "99" + delete(m, key1) + m[key2] += "1" + if n2 := m[key2]; n2 != "1" { + t.Errorf("appended '1' to empty (nil) string, got %s", n2) + } +} + +// TestIncrementAfterBulkClearKeyStringValueInt tests that map bulk +// deletion (mapclear) still works as expected. Note that it was not +// affected by Issue 25936. +func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { + const key1 = "" + const key2 = "x" + + m := make(map[string]int) + m[key1] = 99 + for k := range m { + delete(m, k) + } + m[key2]++ + if n2 := m[key2]; n2 != 1 { + t.Errorf("incremented 0 to %d", n2) + } +} + +func TestMapTombstones(t *testing.T) { + m := map[int]int{} + const N = 10000 + // Fill a map. + for i := 0; i < N; i++ { + m[i] = i + } + runtime.MapTombstoneCheck(m) + // Delete half of the entries. + for i := 0; i < N; i += 2 { + delete(m, i) + } + runtime.MapTombstoneCheck(m) + // Add new entries to fill in holes. + for i := N; i < 3*N/2; i++ { + m[i] = i + } + runtime.MapTombstoneCheck(m) + // Delete everything. + for i := 0; i < 3*N/2; i++ { + delete(m, i) + } + runtime.MapTombstoneCheck(m) +} + +type canString int + +func (c canString) String() string { + return fmt.Sprintf("%d", int(c)) +} + +func TestMapInterfaceKey(t *testing.T) { + // Test all the special cases in runtime.typehash. + type GrabBag struct { + f32 float32 + f64 float64 + c64 complex64 + c128 complex128 + s string + i0 any + i1 interface { + String() string + } + a [4]string + } + + m := map[any]bool{} + // Put a bunch of data in m, so that a bad hash is likely to + // lead to a bad bucket, which will lead to a missed lookup. + for i := 0; i < 1000; i++ { + m[i] = true + } + m[GrabBag{f32: 1.0}] = true + if !m[GrabBag{f32: 1.0}] { + panic("f32 not found") + } + m[GrabBag{f64: 1.0}] = true + if !m[GrabBag{f64: 1.0}] { + panic("f64 not found") + } + m[GrabBag{c64: 1.0i}] = true + if !m[GrabBag{c64: 1.0i}] { + panic("c64 not found") + } + m[GrabBag{c128: 1.0i}] = true + if !m[GrabBag{c128: 1.0i}] { + panic("c128 not found") + } + m[GrabBag{s: "foo"}] = true + if !m[GrabBag{s: "foo"}] { + panic("string not found") + } + m[GrabBag{i0: "foo"}] = true + if !m[GrabBag{i0: "foo"}] { + panic("interface{} not found") + } + m[GrabBag{i1: canString(5)}] = true + if !m[GrabBag{i1: canString(5)}] { + panic("interface{String() string} not found") + } + m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true + if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] { + panic("array not found") + } +} + +type panicStructKey struct { + sli []int +} + +func (p panicStructKey) String() string { + return "panic" +} + +type structKey struct { +} + +func (structKey) String() string { + return "structKey" +} + +func TestEmptyMapWithInterfaceKey(t *testing.T) { + var ( + b bool + i int + i8 int8 + i16 int16 + i32 int32 + i64 int64 + ui uint + ui8 uint8 + ui16 uint16 + ui32 uint32 + ui64 uint64 + uipt uintptr + f32 float32 + f64 float64 + c64 complex64 + c128 complex128 + a [4]string + s string + p *int + up unsafe.Pointer + ch chan int + i0 any + i1 interface { + String() string + } + structKey structKey + i0Panic any = []int{} + i1Panic interface { + String() string + } = panicStructKey{} + panicStructKey = panicStructKey{} + sli []int + me = map[any]struct{}{} + mi = map[interface { + String() string + }]struct{}{} + ) + mustNotPanic := func(f func()) { + f() + } + mustPanic := func(f func()) { + defer func() { + r := recover() + if r == nil { + t.Errorf("didn't panic") + } + }() + f() + } + mustNotPanic(func() { + _ = me[b] + }) + mustNotPanic(func() { + _ = me[i] + }) + mustNotPanic(func() { + _ = me[i8] + }) + mustNotPanic(func() { + _ = me[i16] + }) + mustNotPanic(func() { + _ = me[i32] + }) + mustNotPanic(func() { + _ = me[i64] + }) + mustNotPanic(func() { + _ = me[ui] + }) + mustNotPanic(func() { + _ = me[ui8] + }) + mustNotPanic(func() { + _ = me[ui16] + }) + mustNotPanic(func() { + _ = me[ui32] + }) + mustNotPanic(func() { + _ = me[ui64] + }) + mustNotPanic(func() { + _ = me[uipt] + }) + mustNotPanic(func() { + _ = me[f32] + }) + mustNotPanic(func() { + _ = me[f64] + }) + mustNotPanic(func() { + _ = me[c64] + }) + mustNotPanic(func() { + _ = me[c128] + }) + mustNotPanic(func() { + _ = me[a] + }) + mustNotPanic(func() { + _ = me[s] + }) + mustNotPanic(func() { + _ = me[p] + }) + mustNotPanic(func() { + _ = me[up] + }) + mustNotPanic(func() { + _ = me[ch] + }) + mustNotPanic(func() { + _ = me[i0] + }) + mustNotPanic(func() { + _ = me[i1] + }) + mustNotPanic(func() { + _ = me[structKey] + }) + mustPanic(func() { + _ = me[i0Panic] + }) + mustPanic(func() { + _ = me[i1Panic] + }) + mustPanic(func() { + _ = me[panicStructKey] + }) + mustPanic(func() { + _ = me[sli] + }) + mustPanic(func() { + _ = me[me] + }) + + mustNotPanic(func() { + _ = mi[structKey] + }) + mustPanic(func() { + _ = mi[panicStructKey] + }) +} + +func TestLoadFactor(t *testing.T) { + for b := uint8(0); b < 20; b++ { + count := 13 * (1 << b) / 2 // 6.5 + if b == 0 { + count = 8 + } + if runtime.OverLoadFactor(count, b) { + t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b) + } + if !runtime.OverLoadFactor(count+1, b) { + t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b) + } + } +} + +func TestMapKeys(t *testing.T) { + type key struct { + s string + pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes + } + m := map[key]int{{s: "a"}: 1, {s: "b"}: 2} + keys := make([]key, 0, len(m)) + runtime.MapKeys(m, unsafe.Pointer(&keys)) + for _, k := range keys { + if len(k.s) != 1 { + t.Errorf("len(k.s) == %d, want 1", len(k.s)) + } + } +} + +func TestMapValues(t *testing.T) { + type val struct { + s string + pad [128]byte // sizeof(val) > abi.MapMaxElemBytes + } + m := map[int]val{1: {s: "a"}, 2: {s: "b"}} + vals := make([]val, 0, len(m)) + runtime.MapValues(m, unsafe.Pointer(&vals)) + for _, v := range vals { + if len(v.s) != 1 { + t.Errorf("len(v.s) == %d, want 1", len(v.s)) + } + } +} + +func computeHash() uintptr { + var v struct{} + return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v)) +} + +func subprocessHash(t *testing.T, env string) uintptr { + t.Helper() + + cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestMemHashGlobalSeed$")) + cmd.Env = append(cmd.Env, "GO_TEST_SUBPROCESS_HASH=1") + if env != "" { + cmd.Env = append(cmd.Env, env) + } + + out, err := cmd.Output() + if err != nil { + t.Fatalf("cmd.Output got err %v want nil", err) + } + + s := strings.TrimSpace(string(out)) + h, err := strconv.ParseUint(s, 10, 64) + if err != nil { + t.Fatalf("Parse output %q got err %v want nil", s, err) + } + return uintptr(h) +} + +// memhash has unique per-process seeds, so hashes should differ across +// processes. +// +// Regression test for https://go.dev/issue/66885. +func TestMemHashGlobalSeed(t *testing.T) { + if os.Getenv("GO_TEST_SUBPROCESS_HASH") != "" { + fmt.Println(computeHash()) + os.Exit(0) + return + } + + testenv.MustHaveExec(t) + + // aeshash and memhashFallback use separate per-process seeds, so test + // both. + t.Run("aes", func(t *testing.T) { + if !*runtime.UseAeshash { + t.Skip("No AES") + } + + h1 := subprocessHash(t, "") + t.Logf("%d", h1) + h2 := subprocessHash(t, "") + t.Logf("%d", h2) + h3 := subprocessHash(t, "") + t.Logf("%d", h3) + + if h1 == h2 && h2 == h3 { + t.Errorf("got duplicate hash %d want unique", h1) + } + }) + + t.Run("noaes", func(t *testing.T) { + env := "" + if *runtime.UseAeshash { + env = "GODEBUG=cpu.aes=off" + } + + h1 := subprocessHash(t, env) + t.Logf("%d", h1) + h2 := subprocessHash(t, env) + t.Logf("%d", h2) + h3 := subprocessHash(t, env) + t.Logf("%d", h3) + + if h1 == h2 && h2 == h3 { + t.Errorf("got duplicate hash %d want unique", h1) + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mbarrier.go b/platform/dbops/binaries/go/go/src/runtime/mbarrier.go new file mode 100644 index 0000000000000000000000000000000000000000..c4b6c2a789cd165ef4d39f421fe4faa084cf7564 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mbarrier.go @@ -0,0 +1,373 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: write barriers. +// +// For the concurrent garbage collector, the Go compiler implements +// updates to pointer-valued fields that may be in heap objects by +// emitting calls to write barriers. The main write barrier for +// individual pointer writes is gcWriteBarrier and is implemented in +// assembly. This file contains write barrier entry points for bulk +// operations. See also mwbbuf.go. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "internal/goexperiment" + "unsafe" +) + +// Go uses a hybrid barrier that combines a Yuasa-style deletion +// barrier—which shades the object whose reference is being +// overwritten—with Dijkstra insertion barrier—which shades the object +// whose reference is being written. The insertion part of the barrier +// is necessary while the calling goroutine's stack is grey. In +// pseudocode, the barrier is: +// +// writePointer(slot, ptr): +// shade(*slot) +// if current stack is grey: +// shade(ptr) +// *slot = ptr +// +// slot is the destination in Go code. +// ptr is the value that goes into the slot in Go code. +// +// Shade indicates that it has seen a white pointer by adding the referent +// to wbuf as well as marking it. +// +// The two shades and the condition work together to prevent a mutator +// from hiding an object from the garbage collector: +// +// 1. shade(*slot) prevents a mutator from hiding an object by moving +// the sole pointer to it from the heap to its stack. If it attempts +// to unlink an object from the heap, this will shade it. +// +// 2. shade(ptr) prevents a mutator from hiding an object by moving +// the sole pointer to it from its stack into a black object in the +// heap. If it attempts to install the pointer into a black object, +// this will shade it. +// +// 3. Once a goroutine's stack is black, the shade(ptr) becomes +// unnecessary. shade(ptr) prevents hiding an object by moving it from +// the stack to the heap, but this requires first having a pointer +// hidden on the stack. Immediately after a stack is scanned, it only +// points to shaded objects, so it's not hiding anything, and the +// shade(*slot) prevents it from hiding any other pointers on its +// stack. +// +// For a detailed description of this barrier and proof of +// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md +// +// +// +// Dealing with memory ordering: +// +// Both the Yuasa and Dijkstra barriers can be made conditional on the +// color of the object containing the slot. We chose not to make these +// conditional because the cost of ensuring that the object holding +// the slot doesn't concurrently change color without the mutator +// noticing seems prohibitive. +// +// Consider the following example where the mutator writes into +// a slot and then loads the slot's mark bit while the GC thread +// writes to the slot's mark bit and then as part of scanning reads +// the slot. +// +// Initially both [slot] and [slotmark] are 0 (nil) +// Mutator thread GC thread +// st [slot], ptr st [slotmark], 1 +// +// ld r1, [slotmark] ld r2, [slot] +// +// Without an expensive memory barrier between the st and the ld, the final +// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic +// example of what can happen when loads are allowed to be reordered with older +// stores (avoiding such reorderings lies at the heart of the classic +// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory +// barriers, which will slow down both the mutator and the GC, we always grey +// the ptr object regardless of the slot's color. +// +// Another place where we intentionally omit memory barriers is when +// accessing mheap_.arena_used to check if a pointer points into the +// heap. On relaxed memory machines, it's possible for a mutator to +// extend the size of the heap by updating arena_used, allocate an +// object from this new region, and publish a pointer to that object, +// but for tracing running on another processor to observe the pointer +// but use the old value of arena_used. In this case, tracing will not +// mark the object, even though it's reachable. However, the mutator +// is guaranteed to execute a write barrier when it publishes the +// pointer, so it will take care of marking the object. A general +// consequence of this is that the garbage collector may cache the +// value of mheap_.arena_used. (See issue #9984.) +// +// +// Stack writes: +// +// The compiler omits write barriers for writes to the current frame, +// but if a stack pointer has been passed down the call stack, the +// compiler will generate a write barrier for writes through that +// pointer (because it doesn't know it's not a heap pointer). +// +// +// Global writes: +// +// The Go garbage collector requires write barriers when heap pointers +// are stored in globals. Many garbage collectors ignore writes to +// globals and instead pick up global -> heap pointers during +// termination. This increases pause time, so we instead rely on write +// barriers for writes to globals so that we don't have to rescan +// global during mark termination. +// +// +// Publication ordering: +// +// The write barrier is *pre-publication*, meaning that the write +// barrier happens prior to the *slot = ptr write that may make ptr +// reachable by some goroutine that currently cannot reach it. +// +// +// Signal handler pointer writes: +// +// In general, the signal handler cannot safely invoke the write +// barrier because it may run without a P or even during the write +// barrier. +// +// There is exactly one exception: profbuf.go omits a barrier during +// signal handler profile logging. That's safe only because of the +// deletion barrier. See profbuf.go for a detailed argument. If we +// remove the deletion barrier, we'll have to work out a new way to +// handle the profile logging. + +// typedmemmove copies a value of type typ to dst from src. +// Must be nosplit, see #16026. +// +// TODO: Perfect for go:nosplitrec since we can't have a safe point +// anywhere in the bulk barrier or memmove. +// +//go:nosplit +func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) { + if dst == src { + return + } + if writeBarrier.enabled && typ.PtrBytes != 0 { + // This always copies a full value of type typ so it's safe + // to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) + } + // There's a race here: if some other goroutine can write to + // src, it may change some pointer in src after we've + // performed the write barrier but before we perform the + // memory copy. This safe because the write performed by that + // other goroutine must also be accompanied by a write + // barrier, so at worst we've unnecessarily greyed the old + // pointer that was in src. + memmove(dst, src, typ.Size_) + if goexperiment.CgoCheck2 { + cgoCheckMemmove2(typ, dst, src, 0, typ.Size_) + } +} + +// wbZero performs the write barrier operations necessary before +// zeroing a region of memory at address dst of type typ. +// Does not actually do the zeroing. +// +//go:nowritebarrierrec +//go:nosplit +func wbZero(typ *_type, dst unsafe.Pointer) { + // This always copies a full value of type typ so it's safe + // to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ) +} + +// wbMove performs the write barrier operations necessary before +// copying a region of memory from src to dst of type typ. +// Does not actually do the copying. +// +//go:nowritebarrierrec +//go:nosplit +func wbMove(typ *_type, dst, src unsafe.Pointer) { + // This always copies a full value of type typ so it's safe to + // pass a type here. + // + // See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) +} + +//go:linkname reflect_typedmemmove reflect.typedmemmove +func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) { + if raceenabled { + raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove)) + raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove)) + } + if msanenabled { + msanwrite(dst, typ.Size_) + msanread(src, typ.Size_) + } + if asanenabled { + asanwrite(dst, typ.Size_) + asanread(src, typ.Size_) + } + typedmemmove(typ, dst, src) +} + +//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove +func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { + reflect_typedmemmove(typ, dst, src) +} + +// reflectcallmove is invoked by reflectcall to copy the return values +// out of the stack and into the heap, invoking the necessary write +// barriers. dst, src, and size describe the return value area to +// copy. typ describes the entire frame (not just the return values). +// typ may be nil, which indicates write barriers are not needed. +// +// It must be nosplit and must only call nosplit functions because the +// stack map of reflectcall is wrong. +// +//go:nosplit +func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { + if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize { + // Pass nil for the type. dst does not point to value of type typ, + // but rather points into one, so applying the optimization is not + // safe. See the comment on this function. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil) + } + memmove(dst, src, size) + + // Move pointers returned in registers to a place where the GC can see them. + for i := range regs.Ints { + if regs.ReturnIsPtr.Get(i) { + regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i]) + } + } +} + +//go:nosplit +func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int { + n := dstLen + if n > srcLen { + n = srcLen + } + if n == 0 { + return 0 + } + + // The compiler emits calls to typedslicecopy before + // instrumentation runs, so unlike the other copying and + // assignment operations, it's not instrumented in the calling + // code and needs its own instrumentation. + if raceenabled { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(slicecopy) + racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc) + racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc) + } + if msanenabled { + msanwrite(dstPtr, uintptr(n)*typ.Size_) + msanread(srcPtr, uintptr(n)*typ.Size_) + } + if asanenabled { + asanwrite(dstPtr, uintptr(n)*typ.Size_) + asanread(srcPtr, uintptr(n)*typ.Size_) + } + + if goexperiment.CgoCheck2 { + cgoCheckSliceCopy(typ, dstPtr, srcPtr, n) + } + + if dstPtr == srcPtr { + return n + } + + // Note: No point in checking typ.PtrBytes here: + // compiler only emits calls to typedslicecopy for types with pointers, + // and growslice and reflect_typedslicecopy check for pointers + // before calling typedslicecopy. + size := uintptr(n) * typ.Size_ + if writeBarrier.enabled { + // This always copies one or more full values of type typ so + // it's safe to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. + pwsize := size - typ.Size_ + typ.PtrBytes + bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ) + } + // See typedmemmove for a discussion of the race between the + // barrier and memmove. + memmove(dstPtr, srcPtr, size) + return n +} + +//go:linkname reflect_typedslicecopy reflect.typedslicecopy +func reflect_typedslicecopy(elemType *_type, dst, src slice) int { + if elemType.PtrBytes == 0 { + return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_) + } + return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len) +} + +// typedmemclr clears the typed memory at ptr with type typ. The +// memory at ptr must already be initialized (and hence in type-safe +// state). If the memory is being initialized for the first time, see +// memclrNoHeapPointers. +// +// If the caller knows that typ has pointers, it can alternatively +// call memclrHasPointers. +// +// TODO: A "go:nosplitrec" annotation would be perfect for this. +// +//go:nosplit +func typedmemclr(typ *_type, ptr unsafe.Pointer) { + if writeBarrier.enabled && typ.PtrBytes != 0 { + // This always clears a whole value of type typ, so it's + // safe to pass a type here and apply the optimization. + // See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ) + } + memclrNoHeapPointers(ptr, typ.Size_) +} + +//go:linkname reflect_typedmemclr reflect.typedmemclr +func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { + typedmemclr(typ, ptr) +} + +//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial +func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { + if writeBarrier.enabled && typ.PtrBytes != 0 { + // Pass nil for the type. ptr does not point to value of type typ, + // but rather points into one so it's not safe to apply the optimization. + // See the comment on this function in the reflect package and the + // comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, size, nil) + } + memclrNoHeapPointers(ptr, size) +} + +//go:linkname reflect_typedarrayclear reflect.typedarrayclear +func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { + size := typ.Size_ * uintptr(len) + if writeBarrier.enabled && typ.PtrBytes != 0 { + // This always clears whole elements of an array, so it's + // safe to pass a type here. See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, size, typ) + } + memclrNoHeapPointers(ptr, size) +} + +// memclrHasPointers clears n bytes of typed memory starting at ptr. +// The caller must ensure that the type of the object at ptr has +// pointers, usually by checking typ.PtrBytes. However, ptr +// does not have to point to the start of the allocation. +// +//go:nosplit +func memclrHasPointers(ptr unsafe.Pointer, n uintptr) { + // Pass nil for the type since we don't have one here anyway. + bulkBarrierPreWrite(uintptr(ptr), 0, n, nil) + memclrNoHeapPointers(ptr, n) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mbitmap.go b/platform/dbops/binaries/go/go/src/runtime/mbitmap.go new file mode 100644 index 0000000000000000000000000000000000000000..cdd1c5fc3b5b24fcd110f9e876bf7592bee3c42e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mbitmap.go @@ -0,0 +1,775 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/goarch" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// addb returns the byte pointer p+n. +// +//go:nowritebarrier +//go:nosplit +func addb(p *byte, n uintptr) *byte { + // Note: wrote out full expression instead of calling add(p, n) + // to reduce the number of temporaries generated by the + // compiler for this trivial expression during inlining. + return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) +} + +// subtractb returns the byte pointer p-n. +// +//go:nowritebarrier +//go:nosplit +func subtractb(p *byte, n uintptr) *byte { + // Note: wrote out full expression instead of calling add(p, -n) + // to reduce the number of temporaries generated by the + // compiler for this trivial expression during inlining. + return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n)) +} + +// add1 returns the byte pointer p+1. +// +//go:nowritebarrier +//go:nosplit +func add1(p *byte) *byte { + // Note: wrote out full expression instead of calling addb(p, 1) + // to reduce the number of temporaries generated by the + // compiler for this trivial expression during inlining. + return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) +} + +// subtract1 returns the byte pointer p-1. +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nowritebarrier +//go:nosplit +func subtract1(p *byte) *byte { + // Note: wrote out full expression instead of calling subtractb(p, 1) + // to reduce the number of temporaries generated by the + // compiler for this trivial expression during inlining. + return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1)) +} + +// markBits provides access to the mark bit for an object in the heap. +// bytep points to the byte holding the mark bit. +// mask is a byte with a single bit set that can be &ed with *bytep +// to see if the bit has been set. +// *m.byte&m.mask != 0 indicates the mark bit is set. +// index can be used along with span information to generate +// the address of the object in the heap. +// We maintain one set of mark bits for allocation and one for +// marking purposes. +type markBits struct { + bytep *uint8 + mask uint8 + index uintptr +} + +//go:nosplit +func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits { + bytep, mask := s.allocBits.bitp(allocBitIndex) + return markBits{bytep, mask, allocBitIndex} +} + +// refillAllocCache takes 8 bytes s.allocBits starting at whichByte +// and negates them so that ctz (count trailing zeros) instructions +// can be used. It then places these 8 bytes into the cached 64 bit +// s.allocCache. +func (s *mspan) refillAllocCache(whichByte uint16) { + bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte)))) + aCache := uint64(0) + aCache |= uint64(bytes[0]) + aCache |= uint64(bytes[1]) << (1 * 8) + aCache |= uint64(bytes[2]) << (2 * 8) + aCache |= uint64(bytes[3]) << (3 * 8) + aCache |= uint64(bytes[4]) << (4 * 8) + aCache |= uint64(bytes[5]) << (5 * 8) + aCache |= uint64(bytes[6]) << (6 * 8) + aCache |= uint64(bytes[7]) << (7 * 8) + s.allocCache = ^aCache +} + +// nextFreeIndex returns the index of the next free object in s at +// or after s.freeindex. +// There are hardware instructions that can be used to make this +// faster if profiling warrants it. +func (s *mspan) nextFreeIndex() uint16 { + sfreeindex := s.freeindex + snelems := s.nelems + if sfreeindex == snelems { + return sfreeindex + } + if sfreeindex > snelems { + throw("s.freeindex > s.nelems") + } + + aCache := s.allocCache + + bitIndex := sys.TrailingZeros64(aCache) + for bitIndex == 64 { + // Move index to start of next cached bits. + sfreeindex = (sfreeindex + 64) &^ (64 - 1) + if sfreeindex >= snelems { + s.freeindex = snelems + return snelems + } + whichByte := sfreeindex / 8 + // Refill s.allocCache with the next 64 alloc bits. + s.refillAllocCache(whichByte) + aCache = s.allocCache + bitIndex = sys.TrailingZeros64(aCache) + // nothing available in cached bits + // grab the next 8 bytes and try again. + } + result := sfreeindex + uint16(bitIndex) + if result >= snelems { + s.freeindex = snelems + return snelems + } + + s.allocCache >>= uint(bitIndex + 1) + sfreeindex = result + 1 + + if sfreeindex%64 == 0 && sfreeindex != snelems { + // We just incremented s.freeindex so it isn't 0. + // As each 1 in s.allocCache was encountered and used for allocation + // it was shifted away. At this point s.allocCache contains all 0s. + // Refill s.allocCache so that it corresponds + // to the bits at s.allocBits starting at s.freeindex. + whichByte := sfreeindex / 8 + s.refillAllocCache(whichByte) + } + s.freeindex = sfreeindex + return result +} + +// isFree reports whether the index'th object in s is unallocated. +// +// The caller must ensure s.state is mSpanInUse, and there must have +// been no preemption points since ensuring this (which could allow a +// GC transition, which would allow the state to change). +func (s *mspan) isFree(index uintptr) bool { + if index < uintptr(s.freeIndexForScan) { + return false + } + bytep, mask := s.allocBits.bitp(index) + return *bytep&mask == 0 +} + +// divideByElemSize returns n/s.elemsize. +// n must be within [0, s.npages*_PageSize), +// or may be exactly s.npages*_PageSize +// if s.elemsize is from sizeclasses.go. +// +// nosplit, because it is called by objIndex, which is nosplit +// +//go:nosplit +func (s *mspan) divideByElemSize(n uintptr) uintptr { + const doubleCheck = false + + // See explanation in mksizeclasses.go's computeDivMagic. + q := uintptr((uint64(n) * uint64(s.divMul)) >> 32) + + if doubleCheck && q != n/s.elemsize { + println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q) + throw("bad magic division") + } + return q +} + +// nosplit, because it is called by other nosplit code like findObject +// +//go:nosplit +func (s *mspan) objIndex(p uintptr) uintptr { + return s.divideByElemSize(p - s.base()) +} + +func markBitsForAddr(p uintptr) markBits { + s := spanOf(p) + objIndex := s.objIndex(p) + return s.markBitsForIndex(objIndex) +} + +func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { + bytep, mask := s.gcmarkBits.bitp(objIndex) + return markBits{bytep, mask, objIndex} +} + +func (s *mspan) markBitsForBase() markBits { + return markBits{&s.gcmarkBits.x, uint8(1), 0} +} + +// isMarked reports whether mark bit m is set. +func (m markBits) isMarked() bool { + return *m.bytep&m.mask != 0 +} + +// setMarked sets the marked bit in the markbits, atomically. +func (m markBits) setMarked() { + // Might be racing with other updates, so use atomic update always. + // We used to be clever here and use a non-atomic update in certain + // cases, but it's not worth the risk. + atomic.Or8(m.bytep, m.mask) +} + +// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically. +func (m markBits) setMarkedNonAtomic() { + *m.bytep |= m.mask +} + +// clearMarked clears the marked bit in the markbits, atomically. +func (m markBits) clearMarked() { + // Might be racing with other updates, so use atomic update always. + // We used to be clever here and use a non-atomic update in certain + // cases, but it's not worth the risk. + atomic.And8(m.bytep, ^m.mask) +} + +// markBitsForSpan returns the markBits for the span base address base. +func markBitsForSpan(base uintptr) (mbits markBits) { + mbits = markBitsForAddr(base) + if mbits.mask != 1 { + throw("markBitsForSpan: unaligned start") + } + return mbits +} + +// advance advances the markBits to the next object in the span. +func (m *markBits) advance() { + if m.mask == 1<<7 { + m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1)) + m.mask = 1 + } else { + m.mask = m.mask << 1 + } + m.index++ +} + +// clobberdeadPtr is a special value that is used by the compiler to +// clobber dead stack slots, when -clobberdead flag is set. +const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32)) + +// badPointer throws bad pointer in heap panic. +func badPointer(s *mspan, p, refBase, refOff uintptr) { + // Typically this indicates an incorrect use + // of unsafe or cgo to store a bad pointer in + // the Go heap. It may also indicate a runtime + // bug. + // + // TODO(austin): We could be more aggressive + // and detect pointers to unallocated objects + // in allocated spans. + printlock() + print("runtime: pointer ", hex(p)) + if s != nil { + state := s.state.get() + if state != mSpanInUse { + print(" to unallocated span") + } else { + print(" to unused region of span") + } + print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state) + } + print("\n") + if refBase != 0 { + print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n") + gcDumpObject("object", refBase, refOff) + } + getg().m.traceback = 2 + throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") +} + +// findObject returns the base address for the heap object containing +// the address p, the object's span, and the index of the object in s. +// If p does not point into a heap object, it returns base == 0. +// +// If p points is an invalid heap pointer and debug.invalidptr != 0, +// findObject panics. +// +// refBase and refOff optionally give the base address of the object +// in which the pointer p was found and the byte offset at which it +// was found. These are used for error reporting. +// +// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. +// Since p is a uintptr, it would not be adjusted if the stack were to move. +// +//go:nosplit +func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) { + s = spanOf(p) + // If s is nil, the virtual address has never been part of the heap. + // This pointer may be to some mmap'd region, so we allow it. + if s == nil { + if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 { + // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now, + // as they are the only platform where compiler's clobberdead mode is + // implemented. On these platforms clobberdeadPtr cannot be a valid address. + badPointer(s, p, refBase, refOff) + } + return + } + // If p is a bad pointer, it may not be in s's bounds. + // + // Check s.state to synchronize with span initialization + // before checking other fields. See also spanOfHeap. + if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit { + // Pointers into stacks are also ok, the runtime manages these explicitly. + if state == mSpanManual { + return + } + // The following ensures that we are rigorous about what data + // structures hold valid pointers. + if debug.invalidptr != 0 { + badPointer(s, p, refBase, refOff) + } + return + } + + objIndex = s.objIndex(p) + base = s.base() + objIndex*s.elemsize + return +} + +// reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok. +// +//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr +func reflect_verifyNotInHeapPtr(p uintptr) bool { + // Conversion to a pointer is ok as long as findObject above does not call badPointer. + // Since we're already promised that p doesn't point into the heap, just disallow heap + // pointers and the special clobbered pointer. + return spanOf(p) == nil && p != clobberdeadPtr +} + +const ptrBits = 8 * goarch.PtrSize + +// bulkBarrierBitmap executes write barriers for copying from [src, +// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is +// assumed to start maskOffset bytes into the data covered by the +// bitmap in bits (which may not be a multiple of 8). +// +// This is used by bulkBarrierPreWrite for writes to data and BSS. +// +//go:nosplit +func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { + word := maskOffset / goarch.PtrSize + bits = addb(bits, word/8) + mask := uint8(1) << (word % 8) + + buf := &getg().m.p.ptr().wbBuf + for i := uintptr(0); i < size; i += goarch.PtrSize { + if mask == 0 { + bits = addb(bits, 1) + if *bits == 0 { + // Skip 8 words. + i += 7 * goarch.PtrSize + continue + } + mask = 1 + } + if *bits&mask != 0 { + dstx := (*uintptr)(unsafe.Pointer(dst + i)) + if src == 0 { + p := buf.get1() + p[0] = *dstx + } else { + srcx := (*uintptr)(unsafe.Pointer(src + i)) + p := buf.get2() + p[0] = *dstx + p[1] = *srcx + } + } + mask <<= 1 + } +} + +// typeBitsBulkBarrier executes a write barrier for every +// pointer that would be copied from [src, src+size) to [dst, +// dst+size) by a memmove using the type bitmap to locate those +// pointer slots. +// +// The type typ must correspond exactly to [src, src+size) and [dst, dst+size). +// dst, src, and size must be pointer-aligned. +// The type typ must have a plain bitmap, not a GC program. +// The only use of this function is in channel sends, and the +// 64 kB channel element limit takes care of this for us. +// +// Must not be preempted because it typically runs right before memmove, +// and the GC must observe them as an atomic action. +// +// Callers must perform cgo checks if goexperiment.CgoCheck2. +// +//go:nosplit +func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { + if typ == nil { + throw("runtime: typeBitsBulkBarrier without type") + } + if typ.Size_ != size { + println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size) + throw("runtime: invalid typeBitsBulkBarrier") + } + if typ.Kind_&kindGCProg != 0 { + println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog") + throw("runtime: invalid typeBitsBulkBarrier") + } + if !writeBarrier.enabled { + return + } + ptrmask := typ.GCData + buf := &getg().m.p.ptr().wbBuf + var bits uint32 + for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize { + if i&(goarch.PtrSize*8-1) == 0 { + bits = uint32(*ptrmask) + ptrmask = addb(ptrmask, 1) + } else { + bits = bits >> 1 + } + if bits&1 != 0 { + dstx := (*uintptr)(unsafe.Pointer(dst + i)) + srcx := (*uintptr)(unsafe.Pointer(src + i)) + p := buf.get2() + p[0] = *dstx + p[1] = *srcx + } + } +} + +// countAlloc returns the number of objects allocated in span s by +// scanning the mark bitmap. +func (s *mspan) countAlloc() int { + count := 0 + bytes := divRoundUp(uintptr(s.nelems), 8) + // Iterate over each 8-byte chunk and count allocations + // with an intrinsic. Note that newMarkBits guarantees that + // gcmarkBits will be 8-byte aligned, so we don't have to + // worry about edge cases, irrelevant bits will simply be zero. + for i := uintptr(0); i < bytes; i += 8 { + // Extract 64 bits from the byte pointer and get a OnesCount. + // Note that the unsafe cast here doesn't preserve endianness, + // but that's OK. We only care about how many bits are 1, not + // about the order we discover them in. + mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i))) + count += sys.OnesCount64(mrkBits) + } + return count +} + +// Read the bytes starting at the aligned pointer p into a uintptr. +// Read is little-endian. +func readUintptr(p *byte) uintptr { + x := *(*uintptr)(unsafe.Pointer(p)) + if goarch.BigEndian { + if goarch.PtrSize == 8 { + return uintptr(sys.Bswap64(uint64(x))) + } + return uintptr(sys.Bswap32(uint32(x))) + } + return x +} + +var debugPtrmask struct { + lock mutex + data *byte +} + +// progToPointerMask returns the 1-bit pointer mask output by the GC program prog. +// size the size of the region described by prog, in bytes. +// The resulting bitvector will have no more than size/goarch.PtrSize bits. +func progToPointerMask(prog *byte, size uintptr) bitvector { + n := (size/goarch.PtrSize + 7) / 8 + x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1] + x[len(x)-1] = 0xa1 // overflow check sentinel + n = runGCProg(prog, &x[0]) + if x[len(x)-1] != 0xa1 { + throw("progToPointerMask: overflow") + } + return bitvector{int32(n), &x[0]} +} + +// Packed GC pointer bitmaps, aka GC programs. +// +// For large types containing arrays, the type information has a +// natural repetition that can be encoded to save space in the +// binary and in the memory representation of the type information. +// +// The encoding is a simple Lempel-Ziv style bytecode machine +// with the following instructions: +// +// 00000000: stop +// 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes +// 10000000 n c: repeat the previous n bits c times; n, c are varints +// 1nnnnnnn c: repeat the previous n bits c times; c is a varint + +// runGCProg returns the number of 1-bit entries written to memory. +func runGCProg(prog, dst *byte) uintptr { + dstStart := dst + + // Bits waiting to be written to memory. + var bits uintptr + var nbits uintptr + + p := prog +Run: + for { + // Flush accumulated full bytes. + // The rest of the loop assumes that nbits <= 7. + for ; nbits >= 8; nbits -= 8 { + *dst = uint8(bits) + dst = add1(dst) + bits >>= 8 + } + + // Process one instruction. + inst := uintptr(*p) + p = add1(p) + n := inst & 0x7F + if inst&0x80 == 0 { + // Literal bits; n == 0 means end of program. + if n == 0 { + // Program is over. + break Run + } + nbyte := n / 8 + for i := uintptr(0); i < nbyte; i++ { + bits |= uintptr(*p) << nbits + p = add1(p) + *dst = uint8(bits) + dst = add1(dst) + bits >>= 8 + } + if n %= 8; n > 0 { + bits |= uintptr(*p) << nbits + p = add1(p) + nbits += n + } + continue Run + } + + // Repeat. If n == 0, it is encoded in a varint in the next bytes. + if n == 0 { + for off := uint(0); ; off += 7 { + x := uintptr(*p) + p = add1(p) + n |= (x & 0x7F) << off + if x&0x80 == 0 { + break + } + } + } + + // Count is encoded in a varint in the next bytes. + c := uintptr(0) + for off := uint(0); ; off += 7 { + x := uintptr(*p) + p = add1(p) + c |= (x & 0x7F) << off + if x&0x80 == 0 { + break + } + } + c *= n // now total number of bits to copy + + // If the number of bits being repeated is small, load them + // into a register and use that register for the entire loop + // instead of repeatedly reading from memory. + // Handling fewer than 8 bits here makes the general loop simpler. + // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add + // the pattern to a bit buffer holding at most 7 bits (a partial byte) + // it will not overflow. + src := dst + const maxBits = goarch.PtrSize*8 - 7 + if n <= maxBits { + // Start with bits in output buffer. + pattern := bits + npattern := nbits + + // If we need more bits, fetch them from memory. + src = subtract1(src) + for npattern < n { + pattern <<= 8 + pattern |= uintptr(*src) + src = subtract1(src) + npattern += 8 + } + + // We started with the whole bit output buffer, + // and then we loaded bits from whole bytes. + // Either way, we might now have too many instead of too few. + // Discard the extra. + if npattern > n { + pattern >>= npattern - n + npattern = n + } + + // Replicate pattern to at most maxBits. + if npattern == 1 { + // One bit being repeated. + // If the bit is 1, make the pattern all 1s. + // If the bit is 0, the pattern is already all 0s, + // but we can claim that the number of bits + // in the word is equal to the number we need (c), + // because right shift of bits will zero fill. + if pattern == 1 { + pattern = 1<8 bits, there will be full bytes to flush + // on each iteration. + for ; c >= npattern; c -= npattern { + bits |= pattern << nbits + nbits += npattern + for nbits >= 8 { + *dst = uint8(bits) + dst = add1(dst) + bits >>= 8 + nbits -= 8 + } + } + + // Add final fragment to bit buffer. + if c > 0 { + pattern &= 1< nbits because n > maxBits and nbits <= 7 + // Leading src fragment. + src = subtractb(src, (off+7)/8) + if frag := off & 7; frag != 0 { + bits |= uintptr(*src) >> (8 - frag) << nbits + src = add1(src) + nbits += frag + c -= frag + } + // Main loop: load one byte, write another. + // The bits are rotating through the bit buffer. + for i := c / 8; i > 0; i-- { + bits |= uintptr(*src) << nbits + src = add1(src) + *dst = uint8(bits) + dst = add1(dst) + bits >>= 8 + } + // Final src fragment. + if c %= 8; c > 0 { + bits |= (uintptr(*src) & (1< 0; nbits -= 8 { + *dst = uint8(bits) + dst = add1(dst) + bits >>= 8 + } + return totalBits +} + +// materializeGCProg allocates space for the (1-bit) pointer bitmask +// for an object of size ptrdata. Then it fills that space with the +// pointer bitmask specified by the program prog. +// The bitmask starts at s.startAddr. +// The result must be deallocated with dematerializeGCProg. +func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { + // Each word of ptrdata needs one bit in the bitmap. + bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize) + // Compute the number of pages needed for bitmapBytes. + pages := divRoundUp(bitmapBytes, pageSize) + s := mheap_.allocManual(pages, spanAllocPtrScalarBits) + runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr))) + return s +} +func dematerializeGCProg(s *mspan) { + mheap_.freeManual(s, spanAllocPtrScalarBits) +} + +func dumpGCProg(p *byte) { + nptr := 0 + for { + x := *p + p = add1(p) + if x == 0 { + print("\t", nptr, " end\n") + break + } + if x&0x80 == 0 { + print("\t", nptr, " lit ", x, ":") + n := int(x+7) / 8 + for i := 0; i < n; i++ { + print(" ", hex(*p)) + p = add1(p) + } + print("\n") + nptr += int(x) + } else { + nbit := int(x &^ 0x80) + if nbit == 0 { + for nb := uint(0); ; nb += 7 { + x := *p + p = add1(p) + nbit |= int(x&0x7f) << nb + if x&0x80 == 0 { + break + } + } + } + count := 0 + for nb := uint(0); ; nb += 7 { + x := *p + p = add1(p) + count |= int(x&0x7f) << nb + if x&0x80 == 0 { + break + } + } + print("\t", nptr, " repeat ", nbit, " × ", count, "\n") + nptr += nbit * count + } + } +} + +// Testing. + +// reflect_gcbits returns the GC type info for x, for testing. +// The result is the bitmap entries (0 or 1), one entry per byte. +// +//go:linkname reflect_gcbits reflect.gcbits +func reflect_gcbits(x any) []byte { + return getgcmask(x) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mbitmap_allocheaders.go b/platform/dbops/binaries/go/go/src/runtime/mbitmap_allocheaders.go new file mode 100644 index 0000000000000000000000000000000000000000..1ec055352e788b2974fc48c35b0e7506644956f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mbitmap_allocheaders.go @@ -0,0 +1,1376 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.allocheaders + +// Garbage collector: type and heap bitmaps. +// +// Stack, data, and bss bitmaps +// +// Stack frames and global variables in the data and bss sections are +// described by bitmaps with 1 bit per pointer-sized word. A "1" bit +// means the word is a live pointer to be visited by the GC (referred to +// as "pointer"). A "0" bit means the word should be ignored by GC +// (referred to as "scalar", though it could be a dead pointer value). +// +// Heap bitmaps +// +// The heap bitmap comprises 1 bit for each pointer-sized word in the heap, +// recording whether a pointer is stored in that word or not. This bitmap +// is stored at the end of a span for small objects and is unrolled at +// runtime from type metadata for all larger objects. Objects without +// pointers have neither a bitmap nor associated type metadata. +// +// Bits in all cases correspond to words in little-endian order. +// +// For small objects, if s is the mspan for the span starting at "start", +// then s.heapBits() returns a slice containing the bitmap for the whole span. +// That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first +// goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span. +// On a related note, small objects are always small enough that their bitmap +// fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap +// writes at most (because object boundaries don't generally lie on +// s.heapBits()[i] boundaries). +// +// For larger objects, if t is the type for the object starting at "start", +// within some span whose mspan is s, then the bitmap at t.GCData is "tiled" +// from "start" through "start+s.elemsize". +// Specifically, the first bit of t.GCData corresponds to the word at "start", +// the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes, +// we skip to "start+t.Size_" and begin again from there. This process is +// repeated until we hit "start+s.elemsize". +// This tiling algorithm supports array data, since the type always refers to +// the element type of the array. Single objects are considered the same as +// single-element arrays. +// The tiling algorithm may scan data past the end of the compiler-recognized +// object, but any unused data within the allocation slot (i.e. within s.elemsize) +// is zeroed, so the GC just observes nil pointers. +// Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly. +// +// For objects without their own span, the type metadata is stored in the first +// word before the object at the beginning of the allocation slot. For objects +// with their own span, the type metadata is stored in the mspan. +// +// The bitmap for small unallocated objects in scannable spans is not maintained +// (can be junk). + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "runtime/internal/sys" + "unsafe" +) + +const ( + // A malloc header is functionally a single type pointer, but + // we need to use 8 here to ensure 8-byte alignment of allocations + // on 32-bit platforms. It's wasteful, but a lot of code relies on + // 8-byte alignment for 8-byte atomics. + mallocHeaderSize = 8 + + // The minimum object size that has a malloc header, exclusive. + // + // The size of this value controls overheads from the malloc header. + // The minimum size is bound by writeHeapBitsSmall, which assumes that the + // pointer bitmap for objects of a size smaller than this doesn't cross + // more than one pointer-word boundary. This sets an upper-bound on this + // value at the number of bits in a uintptr, multiplied by the pointer + // size in bytes. + // + // We choose a value here that has a natural cutover point in terms of memory + // overheads. This value just happens to be the maximum possible value this + // can be. + // + // A span with heap bits in it will have 128 bytes of heap bits on 64-bit + // platforms, and 256 bytes of heap bits on 32-bit platforms. The first size + // class where malloc headers match this overhead for 64-bit platforms is + // 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead). + // On 32-bit platforms, this same point is the 256 byte size class + // (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead). + // + // Guaranteed to be exactly at a size class boundary. The reason this value is + // an exclusive minimum is subtle. Suppose we're allocating a 504-byte object + // and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader + // is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader + // by the two values would produce different results. In other words, the comparison + // would not be invariant to size-class rounding. Eschewing this property means a + // more complex check or possibly storing additional state to determine whether a + // span has malloc headers. + minSizeForMallocHeader = goarch.PtrSize * ptrBits +) + +// heapBitsInSpan returns true if the size of an object implies its ptr/scalar +// data is stored at the end of the span, and is accessible via span.heapBits. +// +// Note: this works for both rounded-up sizes (span.elemsize) and unrounded +// type sizes because minSizeForMallocHeader is guaranteed to be at a size +// class boundary. +// +//go:nosplit +func heapBitsInSpan(userSize uintptr) bool { + // N.B. minSizeForMallocHeader is an exclusive minimum so that this function is + // invariant under size-class rounding on its input. + return userSize <= minSizeForMallocHeader +} + +// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC. +type heapArenaPtrScalar struct { + // N.B. This is no longer necessary with allocation headers. +} + +// typePointers is an iterator over the pointers in a heap object. +// +// Iteration through this type implements the tiling algorithm described at the +// top of this file. +type typePointers struct { + // elem is the address of the current array element of type typ being iterated over. + // Objects that are not arrays are treated as single-element arrays, in which case + // this value does not change. + elem uintptr + + // addr is the address the iterator is currently working from and describes + // the address of the first word referenced by mask. + addr uintptr + + // mask is a bitmask where each bit corresponds to pointer-words after addr. + // Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on. + // If a bit is 1, then there is a pointer at that word. + // nextFast and next mask out bits in this mask as their pointers are processed. + mask uintptr + + // typ is a pointer to the type information for the heap object's type. + // This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true. + typ *_type +} + +// typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size). +// +// addr and addr+size must be in the range [span.base(), span.limit). +// +// Note: addr+size must be passed as the limit argument to the iterator's next method on +// each iteration. This slightly awkward API is to allow typePointers to be destructured +// by the compiler. +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (span *mspan) typePointersOf(addr, size uintptr) typePointers { + base := span.objBase(addr) + tp := span.typePointersOfUnchecked(base) + if base == addr && size == span.elemsize { + return tp + } + return tp.fastForward(addr-tp.addr, addr+size) +} + +// typePointersOfUnchecked is like typePointersOf, but assumes addr is the base +// of an allocation slot in a span (the start of the object if no header, the +// header otherwise). It returns an iterator that generates all pointers +// in the range [addr, addr+span.elemsize). +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { + const doubleCheck = false + if doubleCheck && span.objBase(addr) != addr { + print("runtime: addr=", addr, " base=", span.objBase(addr), "\n") + throw("typePointersOfUnchecked consisting of non-base-address for object") + } + + spc := span.spanclass + if spc.noscan() { + return typePointers{} + } + if heapBitsInSpan(span.elemsize) { + // Handle header-less objects. + return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)} + } + + // All of these objects have a header. + var typ *_type + if spc.sizeclass() != 0 { + // Pull the allocation header from the first word of the object. + typ = *(**_type)(unsafe.Pointer(addr)) + addr += mallocHeaderSize + } else { + typ = span.largeType + } + gcdata := typ.GCData + return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ} +} + +// typePointersOfType is like typePointersOf, but assumes addr points to one or more +// contiguous instances of the provided type. The provided type must not be nil and +// it must not have its type metadata encoded as a gcprog. +// +// It returns an iterator that tiles typ.GCData starting from addr. It's the caller's +// responsibility to limit iteration. +// +// nosplit because its callers are nosplit and require all their callees to be nosplit. +// +//go:nosplit +func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers { + const doubleCheck = false + if doubleCheck && (typ == nil || typ.Kind_&kindGCProg != 0) { + throw("bad type passed to typePointersOfType") + } + if span.spanclass.noscan() { + return typePointers{} + } + // Since we have the type, pretend we have a header. + gcdata := typ.GCData + return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ} +} + +// nextFast is the fast path of next. nextFast is written to be inlineable and, +// as the name implies, fast. +// +// Callers that are performance-critical should iterate using the following +// pattern: +// +// for { +// var addr uintptr +// if tp, addr = tp.nextFast(); addr == 0 { +// if tp, addr = tp.next(limit); addr == 0 { +// break +// } +// } +// // Use addr. +// ... +// } +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (tp typePointers) nextFast() (typePointers, uintptr) { + // TESTQ/JEQ + if tp.mask == 0 { + return tp, 0 + } + // BSFQ + var i int + if goarch.PtrSize == 8 { + i = sys.TrailingZeros64(uint64(tp.mask)) + } else { + i = sys.TrailingZeros32(uint32(tp.mask)) + } + // BTCQ + tp.mask ^= uintptr(1) << (i & (ptrBits - 1)) + // LEAQ (XX)(XX*8) + return tp, tp.addr + uintptr(i)*goarch.PtrSize +} + +// next advances the pointers iterator, returning the updated iterator and +// the address of the next pointer. +// +// limit must be the same each time it is passed to next. +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (tp typePointers) next(limit uintptr) (typePointers, uintptr) { + for { + if tp.mask != 0 { + return tp.nextFast() + } + + // Stop if we don't actually have type information. + if tp.typ == nil { + return typePointers{}, 0 + } + + // Advance to the next element if necessary. + if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes { + tp.elem += tp.typ.Size_ + tp.addr = tp.elem + } else { + tp.addr += ptrBits * goarch.PtrSize + } + + // Check if we've exceeded the limit with the last update. + if tp.addr >= limit { + return typePointers{}, 0 + } + + // Grab more bits and try again. + tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8)) + if tp.addr+goarch.PtrSize*ptrBits > limit { + bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize + tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) + } + } +} + +// fastForward moves the iterator forward by n bytes. n must be a multiple +// of goarch.PtrSize. limit must be the same limit passed to next for this +// iterator. +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (tp typePointers) fastForward(n, limit uintptr) typePointers { + // Basic bounds check. + target := tp.addr + n + if target >= limit { + return typePointers{} + } + if tp.typ == nil { + // Handle small objects. + // Clear any bits before the target address. + tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1 + // Clear any bits past the limit. + if tp.addr+goarch.PtrSize*ptrBits > limit { + bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize + tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) + } + return tp + } + + // Move up elem and addr. + // Offsets within an element are always at a ptrBits*goarch.PtrSize boundary. + if n >= tp.typ.Size_ { + // elem needs to be moved to the element containing + // tp.addr + n. + oldelem := tp.elem + tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_ + tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize) + } else { + tp.addr += alignDown(n, ptrBits*goarch.PtrSize) + } + + if tp.addr-tp.elem >= tp.typ.PtrBytes { + // We're starting in the non-pointer area of an array. + // Move up to the next element. + tp.elem += tp.typ.Size_ + tp.addr = tp.elem + tp.mask = readUintptr(tp.typ.GCData) + + // We may have exceeded the limit after this. Bail just like next does. + if tp.addr >= limit { + return typePointers{} + } + } else { + // Grab the mask, but then clear any bits before the target address and any + // bits over the limit. + tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8)) + tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1 + } + if tp.addr+goarch.PtrSize*ptrBits > limit { + bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize + tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) + } + return tp +} + +// objBase returns the base pointer for the object containing addr in span. +// +// Assumes that addr points into a valid part of span (span.base() <= addr < span.limit). +// +//go:nosplit +func (span *mspan) objBase(addr uintptr) uintptr { + return span.base() + span.objIndex(addr)*span.elemsize +} + +// bulkBarrierPreWrite executes a write barrier +// for every pointer slot in the memory range [src, src+size), +// using pointer/scalar information from [dst, dst+size). +// This executes the write barriers necessary before a memmove. +// src, dst, and size must be pointer-aligned. +// The range [dst, dst+size) must lie within a single object. +// It does not perform the actual writes. +// +// As a special case, src == 0 indicates that this is being used for a +// memclr. bulkBarrierPreWrite will pass 0 for the src of each write +// barrier. +// +// Callers should call bulkBarrierPreWrite immediately before +// calling memmove(dst, src, size). This function is marked nosplit +// to avoid being preempted; the GC must not stop the goroutine +// between the memmove and the execution of the barriers. +// The caller is also responsible for cgo pointer checks if this +// may be writing Go pointers into non-Go memory. +// +// Pointer data is not maintained for allocations containing +// no pointers at all; any caller of bulkBarrierPreWrite must first +// make sure the underlying allocation contains pointers, usually +// by checking typ.PtrBytes. +// +// The typ argument is the type of the space at src and dst (and the +// element type if src and dst refer to arrays) and it is optional. +// If typ is nil, the barrier will still behave as expected and typ +// is used purely as an optimization. However, it must be used with +// care. +// +// If typ is not nil, then src and dst must point to one or more values +// of type typ. The caller must ensure that the ranges [src, src+size) +// and [dst, dst+size) refer to one or more whole values of type src and +// dst (leaving off the pointerless tail of the space is OK). If this +// precondition is not followed, this function will fail to scan the +// right pointers. +// +// When in doubt, pass nil for typ. That is safe and will always work. +// +// Callers must perform cgo checks if goexperiment.CgoCheck2. +// +//go:nosplit +func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) { + if (dst|src|size)&(goarch.PtrSize-1) != 0 { + throw("bulkBarrierPreWrite: unaligned arguments") + } + if !writeBarrier.enabled { + return + } + s := spanOf(dst) + if s == nil { + // If dst is a global, use the data or BSS bitmaps to + // execute write barriers. + for _, datap := range activeModules() { + if datap.data <= dst && dst < datap.edata { + bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata) + return + } + } + for _, datap := range activeModules() { + if datap.bss <= dst && dst < datap.ebss { + bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata) + return + } + } + return + } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst { + // dst was heap memory at some point, but isn't now. + // It can't be a global. It must be either our stack, + // or in the case of direct channel sends, it could be + // another stack. Either way, no need for barriers. + // This will also catch if dst is in a freed span, + // though that should never have. + return + } + buf := &getg().m.p.ptr().wbBuf + + // Double-check that the bitmaps generated in the two possible paths match. + const doubleCheck = false + if doubleCheck { + doubleCheckTypePointersOfType(s, typ, dst, size) + } + + var tp typePointers + if typ != nil && typ.Kind_&kindGCProg == 0 { + tp = s.typePointersOfType(typ, dst) + } else { + tp = s.typePointersOf(dst, size) + } + if src == 0 { + for { + var addr uintptr + if tp, addr = tp.next(dst + size); addr == 0 { + break + } + dstx := (*uintptr)(unsafe.Pointer(addr)) + p := buf.get1() + p[0] = *dstx + } + } else { + for { + var addr uintptr + if tp, addr = tp.next(dst + size); addr == 0 { + break + } + dstx := (*uintptr)(unsafe.Pointer(addr)) + srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst))) + p := buf.get2() + p[0] = *dstx + p[1] = *srcx + } + } +} + +// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but +// does not execute write barriers for [dst, dst+size). +// +// In addition to the requirements of bulkBarrierPreWrite +// callers need to ensure [dst, dst+size) is zeroed. +// +// This is used for special cases where e.g. dst was just +// created and zeroed with malloc. +// +// The type of the space can be provided purely as an optimization. +// See bulkBarrierPreWrite's comment for more details -- use this +// optimization with great care. +// +//go:nosplit +func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) { + if (dst|src|size)&(goarch.PtrSize-1) != 0 { + throw("bulkBarrierPreWrite: unaligned arguments") + } + if !writeBarrier.enabled { + return + } + buf := &getg().m.p.ptr().wbBuf + s := spanOf(dst) + + // Double-check that the bitmaps generated in the two possible paths match. + const doubleCheck = false + if doubleCheck { + doubleCheckTypePointersOfType(s, typ, dst, size) + } + + var tp typePointers + if typ != nil && typ.Kind_&kindGCProg == 0 { + tp = s.typePointersOfType(typ, dst) + } else { + tp = s.typePointersOf(dst, size) + } + for { + var addr uintptr + if tp, addr = tp.next(dst + size); addr == 0 { + break + } + srcx := (*uintptr)(unsafe.Pointer(addr - dst + src)) + p := buf.get1() + p[0] = *srcx + } +} + +// initHeapBits initializes the heap bitmap for a span. +// +// TODO(mknyszek): This should set the heap bits for single pointer +// allocations eagerly to avoid calling heapSetType at allocation time, +// just to write one bit. +func (s *mspan) initHeapBits(forceClear bool) { + if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk { + b := s.heapBits() + for i := range b { + b[i] = 0 + } + } +} + +// bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms, +// and leaves it alone elsewhere. +func bswapIfBigEndian(x uintptr) uintptr { + if goarch.BigEndian { + if goarch.PtrSize == 8 { + return uintptr(sys.Bswap64(uint64(x))) + } + return uintptr(sys.Bswap32(uint32(x))) + } + return x +} + +type writeUserArenaHeapBits struct { + offset uintptr // offset in span that the low bit of mask represents the pointer state of. + mask uintptr // some pointer bits starting at the address addr. + valid uintptr // number of bits in buf that are valid (including low) + low uintptr // number of low-order bits to not overwrite +} + +func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) { + offset := addr - s.base() + + // We start writing bits maybe in the middle of a heap bitmap word. + // Remember how many bits into the word we started, so we can be sure + // not to overwrite the previous bits. + h.low = offset / goarch.PtrSize % ptrBits + + // round down to heap word that starts the bitmap word. + h.offset = offset - h.low*goarch.PtrSize + + // We don't have any bits yet. + h.mask = 0 + h.valid = h.low + + return +} + +// write appends the pointerness of the next valid pointer slots +// using the low valid bits of bits. 1=pointer, 0=scalar. +func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits { + if h.valid+valid <= ptrBits { + // Fast path - just accumulate the bits. + h.mask |= bits << h.valid + h.valid += valid + return h + } + // Too many bits to fit in this word. Write the current word + // out and move on to the next word. + + data := h.mask | bits<> (ptrBits - h.valid) // leftover for next word + h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them + + // Flush mask to the memory bitmap. + idx := h.offset / (ptrBits * goarch.PtrSize) + m := uintptr(1)< ptrBits { + h = h.write(s, 0, ptrBits) + words -= ptrBits + } + return h.write(s, 0, words) +} + +// Flush the bits that have been written, and add zeros as needed +// to cover the full object [addr, addr+size). +func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) { + offset := addr - s.base() + + // zeros counts the number of bits needed to represent the object minus the + // number of bits we've already written. This is the number of 0 bits + // that need to be added. + zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid + + // Add zero bits up to the bitmap word boundary + if zeros > 0 { + z := ptrBits - h.valid + if z > zeros { + z = zeros + } + h.valid += z + zeros -= z + } + + // Find word in bitmap that we're going to write. + bitmap := s.heapBits() + idx := h.offset / (ptrBits * goarch.PtrSize) + + // Write remaining bits. + if h.valid != h.low { + m := uintptr(1)< minSizeForMallocHeader { + throw("heapBits called for span class that should have a malloc header") + } + } + // Find the bitmap at the end of the span. + // + // Nearly every span with heap bits is exactly one page in size. Arenas are the only exception. + if span.npages == 1 { + // This will be inlined and constant-folded down. + return heapBitsSlice(span.base(), pageSize) + } + return heapBitsSlice(span.base(), span.npages*pageSize) +} + +// Helper for constructing a slice for the span's heap bits. +// +//go:nosplit +func heapBitsSlice(spanBase, spanSize uintptr) []uintptr { + bitmapSize := spanSize / goarch.PtrSize / 8 + elems := int(bitmapSize / goarch.PtrSize) + var sl notInHeapSlice + sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems} + return *(*[]uintptr)(unsafe.Pointer(&sl)) +} + +// heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. +// +// addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize) +// must be true. +// +//go:nosplit +func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr { + spanSize := span.npages * pageSize + bitmapSize := spanSize / goarch.PtrSize / 8 + hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize)) + + // These objects are always small enough that their bitmaps + // fit in a single word, so just load the word or two we need. + // + // Mirrors mspan.writeHeapBitsSmall. + // + // We should be using heapBits(), but unfortunately it introduces + // both bounds checks panics and throw which causes us to exceed + // the nosplit limit in quite a few cases. + i := (addr - span.base()) / goarch.PtrSize / ptrBits + j := (addr - span.base()) / goarch.PtrSize % ptrBits + bits := span.elemsize / goarch.PtrSize + word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0)))) + word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1)))) + + var read uintptr + if j+bits > ptrBits { + // Two reads. + bits0 := ptrBits - j + bits1 := bits - bits0 + read = *word0 >> j + read |= (*word1 & ((1 << bits1) - 1)) << bits0 + } else { + // One read. + read = (*word0 >> j) & ((1 << bits) - 1) + } + return read +} + +// writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is +// stored as a bitmap at the end of the span. +// +// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. +// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. +// +//go:nosplit +func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) { + // The objects here are always really small, so a single load is sufficient. + src0 := readUintptr(typ.GCData) + + // Create repetitions of the bitmap if we have a small array. + bits := span.elemsize / goarch.PtrSize + scanSize = typ.PtrBytes + src := src0 + switch typ.Size_ { + case goarch.PtrSize: + src = (1 << (dataSize / goarch.PtrSize)) - 1 + default: + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + // Since we're never writing more than one uintptr's worth of bits, we're either going + // to do one or two writes. + dst := span.heapBits() + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + if j+bits > ptrBits { + // Two writes. + bits0 := ptrBits - j + bits1 := bits - bits0 + dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j) + dst[i+1] = dst[i+1]&^((1<> bits0) + } else { + // One write. + dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j) + } + + const doubleCheck = false + if doubleCheck { + srcRead := span.heapBitsSmallForAddr(x) + if srcRead != src { + print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n") + print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n") + print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n") + throw("bad pointer bits written for small object") + } + } + return +} + +// For !goexperiment.AllocHeaders. +func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { +} + +// heapSetType records that the new allocation [x, x+size) +// holds in [x, x+dataSize) one or more values of type typ. +// (The number of values is given by dataSize / typ.Size.) +// If dataSize < size, the fragment [x+dataSize, x+size) is +// recorded as non-pointer data. +// It is known that the type has pointers somewhere; +// malloc does not call heapSetType when there are no pointers. +// +// There can be read-write races between heapSetType and things +// that read the heap metadata like scanobject. However, since +// heapSetType is only used for objects that have not yet been +// made reachable, readers will ignore bits being modified by this +// function. This does mean this function cannot transiently modify +// shared memory that belongs to neighboring objects. Also, on weakly-ordered +// machines, callers must execute a store/store (publication) barrier +// between calling this function and making the object reachable. +func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) { + const doubleCheck = false + + gctyp := typ + if header == nil { + if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) { + throw("tried to write heap bits, but no heap bits in span") + } + // Handle the case where we have no malloc header. + scanSize = span.writeHeapBitsSmall(x, dataSize, typ) + } else { + if typ.Kind_&kindGCProg != 0 { + // Allocate space to unroll the gcprog. This space will consist of + // a dummy _type value and the unrolled gcprog. The dummy _type will + // refer to the bitmap, and the mspan will refer to the dummy _type. + if span.spanclass.sizeclass() != 0 { + throw("GCProg for type that isn't large") + } + spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize) + heapBitsOff := spaceNeeded + spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize) + npages := alignUp(spaceNeeded, pageSize) / pageSize + var progSpan *mspan + systemstack(func() { + progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits) + memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize) + }) + // Write a dummy _type in the new space. + // + // We only need to write size, PtrBytes, and GCData, since that's all + // the GC cares about. + gctyp = (*_type)(unsafe.Pointer(progSpan.base())) + gctyp.Size_ = typ.Size_ + gctyp.PtrBytes = typ.PtrBytes + gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff)) + gctyp.TFlag = abi.TFlagUnrolledBitmap + + // Expand the GC program into space reserved at the end of the new span. + runGCProg(addb(typ.GCData, 4), gctyp.GCData) + } + + // Write out the header. + *header = gctyp + scanSize = span.elemsize + } + + if doubleCheck { + doubleCheckHeapPointers(x, dataSize, gctyp, header, span) + + // To exercise the less common path more often, generate + // a random interior pointer and make sure iterating from + // that point works correctly too. + maxIterBytes := span.elemsize + if header == nil { + maxIterBytes = dataSize + } + off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize) + size := dataSize - off + if size == 0 { + off -= goarch.PtrSize + size += goarch.PtrSize + } + interior := x + off + size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize) + if size == 0 { + size = goarch.PtrSize + } + // Round up the type to the size of the type. + size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_ + if interior+size > x+maxIterBytes { + size = x + maxIterBytes - interior + } + doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span) + } + return +} + +func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) { + // Check that scanning the full object works. + tp := span.typePointersOfUnchecked(span.objBase(x)) + maxIterBytes := span.elemsize + if header == nil { + maxIterBytes = dataSize + } + bad := false + for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize { + // Compute the pointer bit we want at offset i. + want := false + if i < span.elemsize { + off := i % typ.Size_ + if off < typ.PtrBytes { + j := off / goarch.PtrSize + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 + } + } + if want { + var addr uintptr + tp, addr = tp.next(x + span.elemsize) + if addr == 0 { + println("runtime: found bad iterator") + } + if addr != x+i { + print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n") + bad = true + } + } + } + if !bad { + var addr uintptr + tp, addr = tp.next(x + span.elemsize) + if addr == 0 { + return + } + println("runtime: extra pointer:", hex(addr)) + } + print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&kindGCProg != 0, "\n") + print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n") + print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n") + print("runtime: limit=", hex(x+span.elemsize), "\n") + tp = span.typePointersOfUnchecked(x) + dumpTypePointers(tp) + for { + var addr uintptr + if tp, addr = tp.next(x + span.elemsize); addr == 0 { + println("runtime: would've stopped here") + dumpTypePointers(tp) + break + } + print("runtime: addr=", hex(addr), "\n") + dumpTypePointers(tp) + } + throw("heapSetType: pointer entry not correct") +} + +func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) { + bad := false + if interior < x { + print("runtime: interior=", hex(interior), " x=", hex(x), "\n") + throw("found bad interior pointer") + } + off := interior - x + tp := span.typePointersOf(interior, size) + for i := off; i < off+size; i += goarch.PtrSize { + // Compute the pointer bit we want at offset i. + want := false + if i < span.elemsize { + off := i % typ.Size_ + if off < typ.PtrBytes { + j := off / goarch.PtrSize + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 + } + } + if want { + var addr uintptr + tp, addr = tp.next(interior + size) + if addr == 0 { + println("runtime: found bad iterator") + bad = true + } + if addr != x+i { + print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n") + bad = true + } + } + } + if !bad { + var addr uintptr + tp, addr = tp.next(interior + size) + if addr == 0 { + return + } + println("runtime: extra pointer:", hex(addr)) + } + print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n") + print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n") + print("runtime: limit=", hex(interior+size), "\n") + tp = span.typePointersOf(interior, size) + dumpTypePointers(tp) + for { + var addr uintptr + if tp, addr = tp.next(interior + size); addr == 0 { + println("runtime: would've stopped here") + dumpTypePointers(tp) + break + } + print("runtime: addr=", hex(addr), "\n") + dumpTypePointers(tp) + } + + print("runtime: want: ") + for i := off; i < off+size; i += goarch.PtrSize { + // Compute the pointer bit we want at offset i. + want := false + if i < dataSize { + off := i % typ.Size_ + if off < typ.PtrBytes { + j := off / goarch.PtrSize + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 + } + } + if want { + print("1") + } else { + print("0") + } + } + println() + + throw("heapSetType: pointer entry not correct") +} + +//go:nosplit +func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) { + if typ == nil || typ.Kind_&kindGCProg != 0 { + return + } + if typ.Kind_&kindMask == kindInterface { + // Interfaces are unfortunately inconsistently handled + // when it comes to the type pointer, so it's easy to + // produce a lot of false positives here. + return + } + tp0 := s.typePointersOfType(typ, addr) + tp1 := s.typePointersOf(addr, size) + failed := false + for { + var addr0, addr1 uintptr + tp0, addr0 = tp0.next(addr + size) + tp1, addr1 = tp1.next(addr + size) + if addr0 != addr1 { + failed = true + break + } + if addr0 == 0 { + break + } + } + if failed { + tp0 := s.typePointersOfType(typ, addr) + tp1 := s.typePointersOf(addr, size) + print("runtime: addr=", hex(addr), " size=", size, "\n") + print("runtime: type=", toRType(typ).string(), "\n") + dumpTypePointers(tp0) + dumpTypePointers(tp1) + for { + var addr0, addr1 uintptr + tp0, addr0 = tp0.next(addr + size) + tp1, addr1 = tp1.next(addr + size) + print("runtime: ", hex(addr0), " ", hex(addr1), "\n") + if addr0 == 0 && addr1 == 0 { + break + } + } + throw("mismatch between typePointersOfType and typePointersOf") + } +} + +func dumpTypePointers(tp typePointers) { + print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n") + print("runtime: tp.addr=", hex(tp.addr), " tp.mask=") + for i := uintptr(0); i < ptrBits; i++ { + if tp.mask&(uintptr(1)<> (off % 8)) & 1 + } + return + } + + // bss + if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { + bitmap := datap.gcbssmask.bytedata + n := et.Size_ + mask = make([]byte, n/goarch.PtrSize) + for i := uintptr(0); i < n; i += goarch.PtrSize { + off := (uintptr(p) + i - datap.bss) / goarch.PtrSize + mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 + } + return + } + } + + // heap + if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 { + if s.spanclass.noscan() { + return nil + } + limit := base + s.elemsize + + // Move the base up to the iterator's start, because + // we want to hide evidence of a malloc header from the + // caller. + tp := s.typePointersOfUnchecked(base) + base = tp.addr + + // Unroll the full bitmap the GC would actually observe. + maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize) + for { + var addr uintptr + if tp, addr = tp.next(limit); addr == 0 { + break + } + maskFromHeap[(addr-base)/goarch.PtrSize] = 1 + } + + // Double-check that every part of the ptr/scalar we're not + // showing the caller is zeroed. This keeps us honest that + // that information is actually irrelevant. + for i := limit; i < s.elemsize; i++ { + if *(*byte)(unsafe.Pointer(i)) != 0 { + throw("found non-zeroed tail of allocation") + } + } + + // Callers (and a check we're about to run) expects this mask + // to end at the last pointer. + for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 { + maskFromHeap = maskFromHeap[:len(maskFromHeap)-1] + } + + if et.Kind_&kindGCProg == 0 { + // Unroll again, but this time from the type information. + maskFromType := make([]byte, (limit-base)/goarch.PtrSize) + tp = s.typePointersOfType(et, base) + for { + var addr uintptr + if tp, addr = tp.next(limit); addr == 0 { + break + } + maskFromType[(addr-base)/goarch.PtrSize] = 1 + } + + // Validate that the prefix of maskFromType is equal to + // maskFromHeap. maskFromType may contain more pointers than + // maskFromHeap produces because maskFromHeap may be able to + // get exact type information for certain classes of objects. + // With maskFromType, we're always just tiling the type bitmap + // through to the elemsize. + // + // It's OK if maskFromType has pointers in elemsize that extend + // past the actual populated space; we checked above that all + // that space is zeroed, so just the GC will just see nil pointers. + differs := false + for i := range maskFromHeap { + if maskFromHeap[i] != maskFromType[i] { + differs = true + break + } + } + + if differs { + print("runtime: heap mask=") + for _, b := range maskFromHeap { + print(b) + } + println() + print("runtime: type mask=") + for _, b := range maskFromType { + print(b) + } + println() + print("runtime: type=", toRType(et).string(), "\n") + throw("found two different masks from two different methods") + } + } + + // Select the heap mask to return. We may not have a type mask. + mask = maskFromHeap + + // Make sure we keep ep alive. We may have stopped referencing + // ep's data pointer sometime before this point and it's possible + // for that memory to get freed. + KeepAlive(ep) + return + } + + // stack + if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi { + found := false + var u unwinder + for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() { + if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp { + found = true + break + } + } + if found { + locals, _, _ := u.frame.getStackMap(false) + if locals.n == 0 { + return + } + size := uintptr(locals.n) * goarch.PtrSize + n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + mask = make([]byte, n/goarch.PtrSize) + for i := uintptr(0); i < n; i += goarch.PtrSize { + off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize + mask[i/goarch.PtrSize] = locals.ptrbit(off) + } + } + return + } + + // otherwise, not something the GC knows about. + // possibly read-only data, like malloc(0). + // must not have pointers + return +} + +// userArenaHeapBitsSetType is the equivalent of heapSetType but for +// non-slice-backing-store Go values allocated in a user arena chunk. It +// sets up the type metadata for the value with type typ allocated at address ptr. +// base is the base address of the arena chunk. +func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) { + base := s.base() + h := s.writeUserArenaHeapBits(uintptr(ptr)) + + p := typ.GCData // start of 1-bit pointer mask (or GC program) + var gcProgBits uintptr + if typ.Kind_&kindGCProg != 0 { + // Expand gc program, using the object itself for storage. + gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr)) + p = (*byte)(ptr) + } + nb := typ.PtrBytes / goarch.PtrSize + + for i := uintptr(0); i < nb; i += ptrBits { + k := nb - i + if k > ptrBits { + k = ptrBits + } + // N.B. On big endian platforms we byte swap the data that we + // read from GCData, which is always stored in little-endian order + // by the compiler. writeUserArenaHeapBits handles data in + // a platform-ordered way for efficiency, but stores back the + // data in little endian order, since we expose the bitmap through + // a dummy type. + h = h.write(s, readUintptr(addb(p, i/8)), k) + } + // Note: we call pad here to ensure we emit explicit 0 bits + // for the pointerless tail of the object. This ensures that + // there's only a single noMorePtrs mark for the next object + // to clear. We don't need to do this to clear stale noMorePtrs + // markers from previous uses because arena chunk pointer bitmaps + // are always fully cleared when reused. + h = h.pad(s, typ.Size_-typ.PtrBytes) + h.flush(s, uintptr(ptr), typ.Size_) + + if typ.Kind_&kindGCProg != 0 { + // Zero out temporary ptrmask buffer inside object. + memclrNoHeapPointers(ptr, (gcProgBits+7)/8) + } + + // Update the PtrBytes value in the type information. After this + // point, the GC will observe the new bitmap. + s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes + + // Double-check that the bitmap was written out correctly. + const doubleCheck = false + if doubleCheck { + doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s) + } +} + +// For !goexperiment.AllocHeaders, to pass TestIntendedInlining. +func writeHeapBitsForAddr() { + panic("not implemented") +} + +// For !goexperiment.AllocHeaders. +type heapBits struct { +} + +// For !goexperiment.AllocHeaders. +// +//go:nosplit +func heapBitsForAddr(addr, size uintptr) heapBits { + panic("not implemented") +} + +// For !goexperiment.AllocHeaders. +// +//go:nosplit +func (h heapBits) next() (heapBits, uintptr) { + panic("not implemented") +} + +// For !goexperiment.AllocHeaders. +// +//go:nosplit +func (h heapBits) nextFast() (heapBits, uintptr) { + panic("not implemented") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mbitmap_noallocheaders.go b/platform/dbops/binaries/go/go/src/runtime/mbitmap_noallocheaders.go new file mode 100644 index 0000000000000000000000000000000000000000..383993aa1ef7d5d08a24115c912cce9381e85868 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mbitmap_noallocheaders.go @@ -0,0 +1,938 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.allocheaders + +// Garbage collector: type and heap bitmaps. +// +// Stack, data, and bss bitmaps +// +// Stack frames and global variables in the data and bss sections are +// described by bitmaps with 1 bit per pointer-sized word. A "1" bit +// means the word is a live pointer to be visited by the GC (referred to +// as "pointer"). A "0" bit means the word should be ignored by GC +// (referred to as "scalar", though it could be a dead pointer value). +// +// Heap bitmap +// +// The heap bitmap comprises 1 bit for each pointer-sized word in the heap, +// recording whether a pointer is stored in that word or not. This bitmap +// is stored in the heapArena metadata backing each heap arena. +// That is, if ha is the heapArena for the arena starting at "start", +// then ha.bitmap[0] holds the 64 bits for the 64 words "start" +// through start+63*ptrSize, ha.bitmap[1] holds the entries for +// start+64*ptrSize through start+127*ptrSize, and so on. +// Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents +// the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc. +// (For 32-bit platforms, s/64/32/.) +// +// We also keep a noMorePtrs bitmap which allows us to stop scanning +// the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1 +// is 1, then the object containing the last word described by ha.bitmap[8*i+j] +// has no more pointers beyond those described by ha.bitmap[8*i+j]. +// If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and +// beyond must all be zero until the start of the next object. +// +// The bitmap for noscan spans is set to all zero at span allocation time. +// +// The bitmap for unallocated objects in scannable spans is not maintained +// (can be junk). + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "runtime/internal/sys" + "unsafe" +) + +const ( + // For compatibility with the allocheaders GOEXPERIMENT. + mallocHeaderSize = 0 + minSizeForMallocHeader = ^uintptr(0) +) + +// For compatibility with the allocheaders GOEXPERIMENT. +// +//go:nosplit +func heapBitsInSpan(_ uintptr) bool { + return false +} + +// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC. +type heapArenaPtrScalar struct { + // bitmap stores the pointer/scalar bitmap for the words in + // this arena. See mbitmap.go for a description. + // This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit). + bitmap [heapArenaBitmapWords]uintptr + + // If the ith bit of noMorePtrs is true, then there are no more + // pointers for the object containing the word described by the + // high bit of bitmap[i]. + // In that case, bitmap[i+1], ... must be zero until the start + // of the next object. + // We never operate on these entries using bit-parallel techniques, + // so it is ok if they are small. Also, they can't be bigger than + // uint16 because at that size a single noMorePtrs entry + // represents 8K of memory, the minimum size of a span. Any larger + // and we'd have to worry about concurrent updates. + // This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit). + noMorePtrs [heapArenaBitmapWords / 8]uint8 +} + +// heapBits provides access to the bitmap bits for a single heap word. +// The methods on heapBits take value receivers so that the compiler +// can more easily inline calls to those methods and registerize the +// struct fields independently. +type heapBits struct { + // heapBits will report on pointers in the range [addr,addr+size). + // The low bit of mask contains the pointerness of the word at addr + // (assuming valid>0). + addr, size uintptr + + // The next few pointer bits representing words starting at addr. + // Those bits already returned by next() are zeroed. + mask uintptr + // Number of bits in mask that are valid. mask is always less than 1<> off + valid := ptrBits - off + + // Process depending on where the object ends. + nptr := size / goarch.PtrSize + if nptr < valid { + // Bits for this object end before the end of this bitmap word. + // Squash bits for the following objects. + mask &= 1<<(nptr&(ptrBits-1)) - 1 + valid = nptr + } else if nptr == valid { + // Bits for this object end at exactly the end of this bitmap word. + // All good. + } else { + // Bits for this object extend into the next bitmap word. See if there + // may be any pointers recorded there. + if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 { + // No more pointers in this object after this bitmap word. + // Update size so we know not to look there. + size = valid * goarch.PtrSize + } + } + + return heapBits{addr: addr, size: size, mask: mask, valid: valid} +} + +// Returns the (absolute) address of the next known pointer and +// a heapBits iterator representing any remaining pointers. +// If there are no more pointers, returns address 0. +// Note that next does not modify h. The caller must record the result. +// +// nosplit because it is used during write barriers and must not be preempted. +// +//go:nosplit +func (h heapBits) next() (heapBits, uintptr) { + for { + if h.mask != 0 { + var i int + if goarch.PtrSize == 8 { + i = sys.TrailingZeros64(uint64(h.mask)) + } else { + i = sys.TrailingZeros32(uint32(h.mask)) + } + h.mask ^= uintptr(1) << (i & (ptrBits - 1)) + return h, h.addr + uintptr(i)*goarch.PtrSize + } + + // Skip words that we've already processed. + h.addr += h.valid * goarch.PtrSize + h.size -= h.valid * goarch.PtrSize + if h.size == 0 { + return h, 0 // no more pointers + } + + // Grab more bits and try again. + h = heapBitsForAddr(h.addr, h.size) + } +} + +// nextFast is like next, but can return 0 even when there are more pointers +// to be found. Callers should call next if nextFast returns 0 as its second +// return value. +// +// if addr, h = h.nextFast(); addr == 0 { +// if addr, h = h.next(); addr == 0 { +// ... no more pointers ... +// } +// } +// ... process pointer at addr ... +// +// nextFast is designed to be inlineable. +// +//go:nosplit +func (h heapBits) nextFast() (heapBits, uintptr) { + // TESTQ/JEQ + if h.mask == 0 { + return h, 0 + } + // BSFQ + var i int + if goarch.PtrSize == 8 { + i = sys.TrailingZeros64(uint64(h.mask)) + } else { + i = sys.TrailingZeros32(uint32(h.mask)) + } + // BTCQ + h.mask ^= uintptr(1) << (i & (ptrBits - 1)) + // LEAQ (XX)(XX*8) + return h, h.addr + uintptr(i)*goarch.PtrSize +} + +// bulkBarrierPreWrite executes a write barrier +// for every pointer slot in the memory range [src, src+size), +// using pointer/scalar information from [dst, dst+size). +// This executes the write barriers necessary before a memmove. +// src, dst, and size must be pointer-aligned. +// The range [dst, dst+size) must lie within a single object. +// It does not perform the actual writes. +// +// As a special case, src == 0 indicates that this is being used for a +// memclr. bulkBarrierPreWrite will pass 0 for the src of each write +// barrier. +// +// Callers should call bulkBarrierPreWrite immediately before +// calling memmove(dst, src, size). This function is marked nosplit +// to avoid being preempted; the GC must not stop the goroutine +// between the memmove and the execution of the barriers. +// The caller is also responsible for cgo pointer checks if this +// may be writing Go pointers into non-Go memory. +// +// The pointer bitmap is not maintained for allocations containing +// no pointers at all; any caller of bulkBarrierPreWrite must first +// make sure the underlying allocation contains pointers, usually +// by checking typ.PtrBytes. +// +// The type of the space can be provided purely as an optimization, +// however it is not used with GOEXPERIMENT=noallocheaders. +// +// Callers must perform cgo checks if goexperiment.CgoCheck2. +// +//go:nosplit +func bulkBarrierPreWrite(dst, src, size uintptr, _ *abi.Type) { + if (dst|src|size)&(goarch.PtrSize-1) != 0 { + throw("bulkBarrierPreWrite: unaligned arguments") + } + if !writeBarrier.enabled { + return + } + if s := spanOf(dst); s == nil { + // If dst is a global, use the data or BSS bitmaps to + // execute write barriers. + for _, datap := range activeModules() { + if datap.data <= dst && dst < datap.edata { + bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata) + return + } + } + for _, datap := range activeModules() { + if datap.bss <= dst && dst < datap.ebss { + bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata) + return + } + } + return + } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst { + // dst was heap memory at some point, but isn't now. + // It can't be a global. It must be either our stack, + // or in the case of direct channel sends, it could be + // another stack. Either way, no need for barriers. + // This will also catch if dst is in a freed span, + // though that should never have. + return + } + + buf := &getg().m.p.ptr().wbBuf + h := heapBitsForAddr(dst, size) + if src == 0 { + for { + var addr uintptr + if h, addr = h.next(); addr == 0 { + break + } + dstx := (*uintptr)(unsafe.Pointer(addr)) + p := buf.get1() + p[0] = *dstx + } + } else { + for { + var addr uintptr + if h, addr = h.next(); addr == 0 { + break + } + dstx := (*uintptr)(unsafe.Pointer(addr)) + srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst))) + p := buf.get2() + p[0] = *dstx + p[1] = *srcx + } + } +} + +// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but +// does not execute write barriers for [dst, dst+size). +// +// In addition to the requirements of bulkBarrierPreWrite +// callers need to ensure [dst, dst+size) is zeroed. +// +// This is used for special cases where e.g. dst was just +// created and zeroed with malloc. +// +// The type of the space can be provided purely as an optimization, +// however it is not used with GOEXPERIMENT=noallocheaders. +// +//go:nosplit +func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, _ *abi.Type) { + if (dst|src|size)&(goarch.PtrSize-1) != 0 { + throw("bulkBarrierPreWrite: unaligned arguments") + } + if !writeBarrier.enabled { + return + } + buf := &getg().m.p.ptr().wbBuf + h := heapBitsForAddr(dst, size) + for { + var addr uintptr + if h, addr = h.next(); addr == 0 { + break + } + srcx := (*uintptr)(unsafe.Pointer(addr - dst + src)) + p := buf.get1() + p[0] = *srcx + } +} + +// initHeapBits initializes the heap bitmap for a span. +// If this is a span of single pointer allocations, it initializes all +// words to pointer. If force is true, clears all bits. +func (s *mspan) initHeapBits(forceClear bool) { + if forceClear || s.spanclass.noscan() { + // Set all the pointer bits to zero. We do this once + // when the span is allocated so we don't have to do it + // for each object allocation. + base := s.base() + size := s.npages * pageSize + h := writeHeapBitsForAddr(base) + h.flush(base, size) + return + } + isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize + if !isPtrs { + return // nothing to do + } + h := writeHeapBitsForAddr(s.base()) + size := s.npages * pageSize + nptrs := size / goarch.PtrSize + for i := uintptr(0); i < nptrs; i += ptrBits { + h = h.write(^uintptr(0), ptrBits) + } + h.flush(s.base(), size) +} + +type writeHeapBits struct { + addr uintptr // address that the low bit of mask represents the pointer state of. + mask uintptr // some pointer bits starting at the address addr. + valid uintptr // number of bits in buf that are valid (including low) + low uintptr // number of low-order bits to not overwrite +} + +func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) { + // We start writing bits maybe in the middle of a heap bitmap word. + // Remember how many bits into the word we started, so we can be sure + // not to overwrite the previous bits. + h.low = addr / goarch.PtrSize % ptrBits + + // round down to heap word that starts the bitmap word. + h.addr = addr - h.low*goarch.PtrSize + + // We don't have any bits yet. + h.mask = 0 + h.valid = h.low + + return +} + +// write appends the pointerness of the next valid pointer slots +// using the low valid bits of bits. 1=pointer, 0=scalar. +func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits { + if h.valid+valid <= ptrBits { + // Fast path - just accumulate the bits. + h.mask |= bits << h.valid + h.valid += valid + return h + } + // Too many bits to fit in this word. Write the current word + // out and move on to the next word. + + data := h.mask | bits<> (ptrBits - h.valid) // leftover for next word + h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them + + // Flush mask to the memory bitmap. + // TODO: figure out how to cache arena lookup. + ai := arenaIndex(h.addr) + ha := mheap_.arenas[ai.l1()][ai.l2()] + idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords + m := uintptr(1)< ptrBits { + h = h.write(0, ptrBits) + words -= ptrBits + } + return h.write(0, words) +} + +// Flush the bits that have been written, and add zeros as needed +// to cover the full object [addr, addr+size). +func (h writeHeapBits) flush(addr, size uintptr) { + // zeros counts the number of bits needed to represent the object minus the + // number of bits we've already written. This is the number of 0 bits + // that need to be added. + zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid + + // Add zero bits up to the bitmap word boundary + if zeros > 0 { + z := ptrBits - h.valid + if z > zeros { + z = zeros + } + h.valid += z + zeros -= z + } + + // Find word in bitmap that we're going to write. + ai := arenaIndex(h.addr) + ha := mheap_.arenas[ai.l1()][ai.l2()] + idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords + + // Write remaining bits. + if h.valid != h.low { + m := uintptr(1)< 8 { + h = h.write(uintptr(*p), 8) + p = add1(p) + j -= 8 + } + h = h.write(uintptr(*p), j) + + if i+typ.Size_ == dataSize { + break // no padding after last element + } + + // Pad with zeros to the start of the next element. + h = h.pad(typ.Size_ - n*goarch.PtrSize) + } + + h.flush(x, size) + + // Erase the expanded GC program. + memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8) + return + } + + // Note about sizes: + // + // typ.Size is the number of words in the object, + // and typ.PtrBytes is the number of words in the prefix + // of the object that contains pointers. That is, the final + // typ.Size - typ.PtrBytes words contain no pointers. + // This allows optimization of a common pattern where + // an object has a small header followed by a large scalar + // buffer. If we know the pointers are over, we don't have + // to scan the buffer's heap bitmap at all. + // The 1-bit ptrmasks are sized to contain only bits for + // the typ.PtrBytes prefix, zero padded out to a full byte + // of bitmap. If there is more room in the allocated object, + // that space is pointerless. The noMorePtrs bitmap will prevent + // scanning large pointerless tails of an object. + // + // Replicated copies are not as nice: if there is an array of + // objects with scalar tails, all but the last tail does have to + // be initialized, because there is no way to say "skip forward". + + ptrs := typ.PtrBytes / goarch.PtrSize + if typ.Size_ == dataSize { // Single element + if ptrs <= ptrBits { // Single small element + m := readUintptr(typ.GCData) + h = h.write(m, ptrs) + } else { // Single large element + p := typ.GCData + for { + h = h.write(readUintptr(p), ptrBits) + p = addb(p, ptrBits/8) + ptrs -= ptrBits + if ptrs <= ptrBits { + break + } + } + m := readUintptr(p) + h = h.write(m, ptrs) + } + } else { // Repeated element + words := typ.Size_ / goarch.PtrSize // total words, including scalar tail + if words <= ptrBits { // Repeated small element + n := dataSize / typ.Size_ + m := readUintptr(typ.GCData) + // Make larger unit to repeat + for words <= ptrBits/2 { + if n&1 != 0 { + h = h.write(m, words) + } + n /= 2 + m |= m << words + ptrs += words + words *= 2 + if n == 1 { + break + } + } + for n > 1 { + h = h.write(m, words) + n-- + } + h = h.write(m, ptrs) + } else { // Repeated large element + for i := uintptr(0); true; i += typ.Size_ { + p := typ.GCData + j := ptrs + for j > ptrBits { + h = h.write(readUintptr(p), ptrBits) + p = addb(p, ptrBits/8) + j -= ptrBits + } + m := readUintptr(p) + h = h.write(m, j) + if i+typ.Size_ == dataSize { + break // don't need the trailing nonptr bits on the last element. + } + // Pad with zeros to the start of the next element. + h = h.pad(typ.Size_ - typ.PtrBytes) + } + } + } + h.flush(x, size) + + if doubleCheck { + h := heapBitsForAddr(x, size) + for i := uintptr(0); i < size; i += goarch.PtrSize { + // Compute the pointer bit we want at offset i. + want := false + if i < dataSize { + off := i % typ.Size_ + if off < typ.PtrBytes { + j := off / goarch.PtrSize + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 + } + } + if want { + var addr uintptr + h, addr = h.next() + if addr != x+i { + throw("heapBitsSetType: pointer entry not correct") + } + } + } + if _, addr := h.next(); addr != 0 { + throw("heapBitsSetType: extra pointer") + } + } +} + +// For goexperiment.AllocHeaders +func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) { + return 0 +} + +// Testing. + +// Returns GC type info for the pointer stored in ep for testing. +// If ep points to the stack, only static live information will be returned +// (i.e. not for objects which are only dynamically live stack objects). +func getgcmask(ep any) (mask []byte) { + e := *efaceOf(&ep) + p := e.data + t := e._type + // data or bss + for _, datap := range activeModules() { + // data + if datap.data <= uintptr(p) && uintptr(p) < datap.edata { + bitmap := datap.gcdatamask.bytedata + n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + mask = make([]byte, n/goarch.PtrSize) + for i := uintptr(0); i < n; i += goarch.PtrSize { + off := (uintptr(p) + i - datap.data) / goarch.PtrSize + mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 + } + return + } + + // bss + if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { + bitmap := datap.gcbssmask.bytedata + n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + mask = make([]byte, n/goarch.PtrSize) + for i := uintptr(0); i < n; i += goarch.PtrSize { + off := (uintptr(p) + i - datap.bss) / goarch.PtrSize + mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 + } + return + } + } + + // heap + if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 { + if s.spanclass.noscan() { + return nil + } + n := s.elemsize + hbits := heapBitsForAddr(base, n) + mask = make([]byte, n/goarch.PtrSize) + for { + var addr uintptr + if hbits, addr = hbits.next(); addr == 0 { + break + } + mask[(addr-base)/goarch.PtrSize] = 1 + } + // Callers expect this mask to end at the last pointer. + for len(mask) > 0 && mask[len(mask)-1] == 0 { + mask = mask[:len(mask)-1] + } + + // Make sure we keep ep alive. We may have stopped referencing + // ep's data pointer sometime before this point and it's possible + // for that memory to get freed. + KeepAlive(ep) + return + } + + // stack + if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi { + found := false + var u unwinder + for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() { + if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp { + found = true + break + } + } + if found { + locals, _, _ := u.frame.getStackMap(false) + if locals.n == 0 { + return + } + size := uintptr(locals.n) * goarch.PtrSize + n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + mask = make([]byte, n/goarch.PtrSize) + for i := uintptr(0); i < n; i += goarch.PtrSize { + off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize + mask[i/goarch.PtrSize] = locals.ptrbit(off) + } + } + return + } + + // otherwise, not something the GC knows about. + // possibly read-only data, like malloc(0). + // must not have pointers + return +} + +// userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for +// non-slice-backing-store Go values allocated in a user arena chunk. It +// sets up the heap bitmap for the value with type typ allocated at address ptr. +// base is the base address of the arena chunk. +func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) { + base := s.base() + h := writeHeapBitsForAddr(uintptr(ptr)) + + // Our last allocation might have ended right at a noMorePtrs mark, + // which we would not have erased. We need to erase that mark here, + // because we're going to start adding new heap bitmap bits. + // We only need to clear one mark, because below we make sure to + // pad out the bits with zeroes and only write one noMorePtrs bit + // for each new object. + // (This is only necessary at noMorePtrs boundaries, as noMorePtrs + // marks within an object allocated with newAt will be erased by + // the normal writeHeapBitsForAddr mechanism.) + // + // Note that we skip this if this is the first allocation in the + // arena because there's definitely no previous noMorePtrs mark + // (in fact, we *must* do this, because we're going to try to back + // up a pointer to fix this up). + if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base { + // Back up one pointer and rewrite that pointer. That will + // cause the writeHeapBits implementation to clear the + // noMorePtrs bit we need to clear. + r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize) + _, p := r.next() + b := uintptr(0) + if p == uintptr(ptr)-goarch.PtrSize { + b = 1 + } + h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize) + h = h.write(b, 1) + } + + p := typ.GCData // start of 1-bit pointer mask (or GC program) + var gcProgBits uintptr + if typ.Kind_&kindGCProg != 0 { + // Expand gc program, using the object itself for storage. + gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr)) + p = (*byte)(ptr) + } + nb := typ.PtrBytes / goarch.PtrSize + + for i := uintptr(0); i < nb; i += ptrBits { + k := nb - i + if k > ptrBits { + k = ptrBits + } + h = h.write(readUintptr(addb(p, i/8)), k) + } + // Note: we call pad here to ensure we emit explicit 0 bits + // for the pointerless tail of the object. This ensures that + // there's only a single noMorePtrs mark for the next object + // to clear. We don't need to do this to clear stale noMorePtrs + // markers from previous uses because arena chunk pointer bitmaps + // are always fully cleared when reused. + h = h.pad(typ.Size_ - typ.PtrBytes) + h.flush(uintptr(ptr), typ.Size_) + + if typ.Kind_&kindGCProg != 0 { + // Zero out temporary ptrmask buffer inside object. + memclrNoHeapPointers(ptr, (gcProgBits+7)/8) + } + + // Double-check that the bitmap was written out correctly. + // + // Derived from heapBitsSetType. + const doubleCheck = false + if doubleCheck { + size := typ.Size_ + x := uintptr(ptr) + h := heapBitsForAddr(x, size) + for i := uintptr(0); i < size; i += goarch.PtrSize { + // Compute the pointer bit we want at offset i. + want := false + off := i % typ.Size_ + if off < typ.PtrBytes { + j := off / goarch.PtrSize + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 + } + if want { + var addr uintptr + h, addr = h.next() + if addr != x+i { + throw("userArenaHeapBitsSetType: pointer entry not correct") + } + } + } + if _, addr := h.next(); addr != 0 { + throw("userArenaHeapBitsSetType: extra pointer") + } + } +} + +// For goexperiment.AllocHeaders. +type typePointers struct { + addr uintptr +} + +// For goexperiment.AllocHeaders. +// +//go:nosplit +func (span *mspan) typePointersOf(addr, size uintptr) typePointers { + panic("not implemented") +} + +// For goexperiment.AllocHeaders. +// +//go:nosplit +func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { + panic("not implemented") +} + +// For goexperiment.AllocHeaders. +// +//go:nosplit +func (tp typePointers) nextFast() (typePointers, uintptr) { + panic("not implemented") +} + +// For goexperiment.AllocHeaders. +// +//go:nosplit +func (tp typePointers) next(limit uintptr) (typePointers, uintptr) { + panic("not implemented") +} + +// For goexperiment.AllocHeaders. +// +//go:nosplit +func (tp typePointers) fastForward(n, limit uintptr) typePointers { + panic("not implemented") +} + +// For goexperiment.AllocHeaders, to pass TestIntendedInlining. +func (s *mspan) writeUserArenaHeapBits() { + panic("not implemented") +} + +// For goexperiment.AllocHeaders, to pass TestIntendedInlining. +func heapBitsSlice() { + panic("not implemented") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mcache.go b/platform/dbops/binaries/go/go/src/runtime/mcache.go new file mode 100644 index 0000000000000000000000000000000000000000..d4b6eef13ac41fb5b84c893e1eeb1b20fe8fd034 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mcache.go @@ -0,0 +1,331 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// Per-thread (in Go, per-P) cache for small objects. +// This includes a small object cache and local allocation stats. +// No locking needed because it is per-thread (per-P). +// +// mcaches are allocated from non-GC'd memory, so any heap pointers +// must be specially handled. +type mcache struct { + _ sys.NotInHeap + + // The following members are accessed on every malloc, + // so they are grouped here for better caching. + nextSample uintptr // trigger heap sample after allocating this many bytes + scanAlloc uintptr // bytes of scannable heap allocated + + // Allocator cache for tiny objects w/o pointers. + // See "Tiny allocator" comment in malloc.go. + + // tiny points to the beginning of the current tiny block, or + // nil if there is no current tiny block. + // + // tiny is a heap pointer. Since mcache is in non-GC'd memory, + // we handle it by clearing it in releaseAll during mark + // termination. + // + // tinyAllocs is the number of tiny allocations performed + // by the P that owns this mcache. + tiny uintptr + tinyoffset uintptr + tinyAllocs uintptr + + // The rest is not accessed on every malloc. + + alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass + + stackcache [_NumStackOrders]stackfreelist + + // flushGen indicates the sweepgen during which this mcache + // was last flushed. If flushGen != mheap_.sweepgen, the spans + // in this mcache are stale and need to the flushed so they + // can be swept. This is done in acquirep. + flushGen atomic.Uint32 +} + +// A gclink is a node in a linked list of blocks, like mlink, +// but it is opaque to the garbage collector. +// The GC does not trace the pointers during collection, +// and the compiler does not emit write barriers for assignments +// of gclinkptr values. Code should store references to gclinks +// as gclinkptr, not as *gclink. +type gclink struct { + next gclinkptr +} + +// A gclinkptr is a pointer to a gclink, but it is opaque +// to the garbage collector. +type gclinkptr uintptr + +// ptr returns the *gclink form of p. +// The result should be used for accessing fields, not stored +// in other data structures. +func (p gclinkptr) ptr() *gclink { + return (*gclink)(unsafe.Pointer(p)) +} + +type stackfreelist struct { + list gclinkptr // linked list of free stacks + size uintptr // total size of stacks in list +} + +// dummy mspan that contains no free objects. +var emptymspan mspan + +func allocmcache() *mcache { + var c *mcache + systemstack(func() { + lock(&mheap_.lock) + c = (*mcache)(mheap_.cachealloc.alloc()) + c.flushGen.Store(mheap_.sweepgen) + unlock(&mheap_.lock) + }) + for i := range c.alloc { + c.alloc[i] = &emptymspan + } + c.nextSample = nextSample() + return c +} + +// freemcache releases resources associated with this +// mcache and puts the object onto a free list. +// +// In some cases there is no way to simply release +// resources, such as statistics, so donate them to +// a different mcache (the recipient). +func freemcache(c *mcache) { + systemstack(func() { + c.releaseAll() + stackcache_clear(c) + + // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate + // with the stealing of gcworkbufs during garbage collection to avoid + // a race where the workbuf is double-freed. + // gcworkbuffree(c.gcworkbuf) + + lock(&mheap_.lock) + mheap_.cachealloc.free(unsafe.Pointer(c)) + unlock(&mheap_.lock) + }) +} + +// getMCache is a convenience function which tries to obtain an mcache. +// +// Returns nil if we're not bootstrapping or we don't have a P. The caller's +// P must not change, so we must be in a non-preemptible state. +func getMCache(mp *m) *mcache { + // Grab the mcache, since that's where stats live. + pp := mp.p.ptr() + var c *mcache + if pp == nil { + // We will be called without a P while bootstrapping, + // in which case we use mcache0, which is set in mallocinit. + // mcache0 is cleared when bootstrapping is complete, + // by procresize. + c = mcache0 + } else { + c = pp.mcache + } + return c +} + +// refill acquires a new span of span class spc for c. This span will +// have at least one free object. The current span in c must be full. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. +func (c *mcache) refill(spc spanClass) { + // Return the current cached span to the central lists. + s := c.alloc[spc] + + if s.allocCount != s.nelems { + throw("refill of span with free space remaining") + } + if s != &emptymspan { + // Mark this span as no longer cached. + if s.sweepgen != mheap_.sweepgen+3 { + throw("bad sweepgen in refill") + } + mheap_.central[spc].mcentral.uncacheSpan(s) + + // Count up how many slots were used and record it. + stats := memstats.heapStats.acquire() + slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) + atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed) + + // Flush tinyAllocs. + if spc == tinySpanClass { + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) + c.tinyAllocs = 0 + } + memstats.heapStats.release() + + // Count the allocs in inconsistent, internal stats. + bytesAllocated := slotsUsed * int64(s.elemsize) + gcController.totalAlloc.Add(bytesAllocated) + + // Clear the second allocCount just to be safe. + s.allocCountBeforeCache = 0 + } + + // Get a new cached span from the central lists. + s = mheap_.central[spc].mcentral.cacheSpan() + if s == nil { + throw("out of memory") + } + + if s.allocCount == s.nelems { + throw("span has no free space") + } + + // Indicate that this span is cached and prevent asynchronous + // sweeping in the next sweep phase. + s.sweepgen = mheap_.sweepgen + 3 + + // Store the current alloc count for accounting later. + s.allocCountBeforeCache = s.allocCount + + // Update heapLive and flush scanAlloc. + // + // We have not yet allocated anything new into the span, but we + // assume that all of its slots will get used, so this makes + // heapLive an overestimate. + // + // When the span gets uncached, we'll fix up this overestimate + // if necessary (see releaseAll). + // + // We pick an overestimate here because an underestimate leads + // the pacer to believe that it's in better shape than it is, + // which appears to lead to more memory used. See #53738 for + // more details. + usedBytes := uintptr(s.allocCount) * s.elemsize + gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc)) + c.scanAlloc = 0 + + c.alloc[spc] = s +} + +// allocLarge allocates a span for a large object. +func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan { + if size+_PageSize < size { + throw("out of memory") + } + npages := size >> _PageShift + if size&_PageMask != 0 { + npages++ + } + + // Deduct credit for this span allocation and sweep if + // necessary. mHeap_Alloc will also sweep npages, so this only + // pays the debt down to npage pages. + deductSweepCredit(npages*_PageSize, npages) + + spc := makeSpanClass(0, noscan) + s := mheap_.alloc(npages, spc) + if s == nil { + throw("out of memory") + } + + // Count the alloc in consistent, external stats. + stats := memstats.heapStats.acquire() + atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize)) + atomic.Xadd64(&stats.largeAllocCount, 1) + memstats.heapStats.release() + + // Count the alloc in inconsistent, internal stats. + gcController.totalAlloc.Add(int64(npages * pageSize)) + + // Update heapLive. + gcController.update(int64(s.npages*pageSize), 0) + + // Put the large span in the mcentral swept list so that it's + // visible to the background sweeper. + mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + s.limit = s.base() + size + s.initHeapBits(false) + return s +} + +func (c *mcache) releaseAll() { + // Take this opportunity to flush scanAlloc. + scanAlloc := int64(c.scanAlloc) + c.scanAlloc = 0 + + sg := mheap_.sweepgen + dHeapLive := int64(0) + for i := range c.alloc { + s := c.alloc[i] + if s != &emptymspan { + slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) + s.allocCountBeforeCache = 0 + + // Adjust smallAllocCount for whatever was allocated. + stats := memstats.heapStats.acquire() + atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed) + memstats.heapStats.release() + + // Adjust the actual allocs in inconsistent, internal stats. + // We assumed earlier that the full span gets allocated. + gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize)) + + if s.sweepgen != sg+1 { + // refill conservatively counted unallocated slots in gcController.heapLive. + // Undo this. + // + // If this span was cached before sweep, then gcController.heapLive was totally + // recomputed since caching this span, so we don't do this for stale spans. + dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize) + } + + // Release the span to the mcentral. + mheap_.central[i].mcentral.uncacheSpan(s) + c.alloc[i] = &emptymspan + } + } + // Clear tinyalloc pool. + c.tiny = 0 + c.tinyoffset = 0 + + // Flush tinyAllocs. + stats := memstats.heapStats.acquire() + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) + c.tinyAllocs = 0 + memstats.heapStats.release() + + // Update heapLive and heapScan. + gcController.update(dHeapLive, scanAlloc) +} + +// prepareForSweep flushes c if the system has entered a new sweep phase +// since c was populated. This must happen between the sweep phase +// starting and the first allocation from c. +func (c *mcache) prepareForSweep() { + // Alternatively, instead of making sure we do this on every P + // between starting the world and allocating on that P, we + // could leave allocate-black on, allow allocation to continue + // as usual, use a ragged barrier at the beginning of sweep to + // ensure all cached spans are swept, and then disable + // allocate-black. However, with this approach it's difficult + // to avoid spilling mark bits into the *next* GC cycle. + sg := mheap_.sweepgen + flushGen := c.flushGen.Load() + if flushGen == sg { + return + } else if flushGen != sg-2 { + println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg) + throw("bad flushGen") + } + c.releaseAll() + stackcache_clear(c) + c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mcentral.go b/platform/dbops/binaries/go/go/src/runtime/mcentral.go new file mode 100644 index 0000000000000000000000000000000000000000..e190b56c86b3ef4c0de2569056e94a1c0dc896b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mcentral.go @@ -0,0 +1,265 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Central free lists. +// +// See malloc.go for an overview. +// +// The mcentral doesn't actually contain the list of free objects; the mspan does. +// Each mcentral is two lists of mspans: those with free objects (c->nonempty) +// and those that are completely allocated (c->empty). + +package runtime + +import ( + "runtime/internal/atomic" + "runtime/internal/sys" +) + +// Central list of free objects of a given size. +type mcentral struct { + _ sys.NotInHeap + spanclass spanClass + + // partial and full contain two mspan sets: one of swept in-use + // spans, and one of unswept in-use spans. These two trade + // roles on each GC cycle. The unswept set is drained either by + // allocation or by the background sweeper in every GC cycle, + // so only two roles are necessary. + // + // sweepgen is increased by 2 on each GC cycle, so the swept + // spans are in partial[sweepgen/2%2] and the unswept spans are in + // partial[1-sweepgen/2%2]. Sweeping pops spans from the + // unswept set and pushes spans that are still in-use on the + // swept set. Likewise, allocating an in-use span pushes it + // on the swept set. + // + // Some parts of the sweeper can sweep arbitrary spans, and hence + // can't remove them from the unswept set, but will add the span + // to the appropriate swept list. As a result, the parts of the + // sweeper and mcentral that do consume from the unswept list may + // encounter swept spans, and these should be ignored. + partial [2]spanSet // list of spans with a free object + full [2]spanSet // list of spans with no free objects +} + +// Initialize a single central free list. +func (c *mcentral) init(spc spanClass) { + c.spanclass = spc + lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) +} + +// partialUnswept returns the spanSet which holds partially-filled +// unswept spans for this sweepgen. +func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet { + return &c.partial[1-sweepgen/2%2] +} + +// partialSwept returns the spanSet which holds partially-filled +// swept spans for this sweepgen. +func (c *mcentral) partialSwept(sweepgen uint32) *spanSet { + return &c.partial[sweepgen/2%2] +} + +// fullUnswept returns the spanSet which holds unswept spans without any +// free slots for this sweepgen. +func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet { + return &c.full[1-sweepgen/2%2] +} + +// fullSwept returns the spanSet which holds swept spans without any +// free slots for this sweepgen. +func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { + return &c.full[sweepgen/2%2] +} + +// Allocate a span to use in an mcache. +func (c *mcentral) cacheSpan() *mspan { + // Deduct credit for this span allocation and sweep if necessary. + spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize + deductSweepCredit(spanBytes, 0) + + traceDone := false + trace := traceAcquire() + if trace.ok() { + trace.GCSweepStart() + traceRelease(trace) + } + + // If we sweep spanBudget spans without finding any free + // space, just allocate a fresh span. This limits the amount + // of time we can spend trying to find free space and + // amortizes the cost of small object sweeping over the + // benefit of having a full free span to allocate from. By + // setting this to 100, we limit the space overhead to 1%. + // + // TODO(austin,mknyszek): This still has bad worst-case + // throughput. For example, this could find just one free slot + // on the 100th swept span. That limits allocation latency, but + // still has very poor throughput. We could instead keep a + // running free-to-used budget and switch to fresh span + // allocation if the budget runs low. + spanBudget := 100 + + var s *mspan + var sl sweepLocker + + // Try partial swept spans first. + sg := mheap_.sweepgen + if s = c.partialSwept(sg).pop(); s != nil { + goto havespan + } + + sl = sweep.active.begin() + if sl.valid { + // Now try partial unswept spans. + for ; spanBudget >= 0; spanBudget-- { + s = c.partialUnswept(sg).pop() + if s == nil { + break + } + if s, ok := sl.tryAcquire(s); ok { + // We got ownership of the span, so let's sweep it and use it. + s.sweep(true) + sweep.active.end(sl) + goto havespan + } + // We failed to get ownership of the span, which means it's being or + // has been swept by an asynchronous sweeper that just couldn't remove it + // from the unswept list. That sweeper took ownership of the span and + // responsibility for either freeing it to the heap or putting it on the + // right swept list. Either way, we should just ignore it (and it's unsafe + // for us to do anything else). + } + // Now try full unswept spans, sweeping them and putting them into the + // right list if we fail to get a span. + for ; spanBudget >= 0; spanBudget-- { + s = c.fullUnswept(sg).pop() + if s == nil { + break + } + if s, ok := sl.tryAcquire(s); ok { + // We got ownership of the span, so let's sweep it. + s.sweep(true) + // Check if there's any free space. + freeIndex := s.nextFreeIndex() + if freeIndex != s.nelems { + s.freeindex = freeIndex + sweep.active.end(sl) + goto havespan + } + // Add it to the swept list, because sweeping didn't give us any free space. + c.fullSwept(sg).push(s.mspan) + } + // See comment for partial unswept spans. + } + sweep.active.end(sl) + } + trace = traceAcquire() + if trace.ok() { + trace.GCSweepDone() + traceDone = true + traceRelease(trace) + } + + // We failed to get a span from the mcentral so get one from mheap. + s = c.grow() + if s == nil { + return nil + } + + // At this point s is a span that should have free slots. +havespan: + if !traceDone { + trace := traceAcquire() + if trace.ok() { + trace.GCSweepDone() + traceRelease(trace) + } + } + n := int(s.nelems) - int(s.allocCount) + if n == 0 || s.freeindex == s.nelems || s.allocCount == s.nelems { + throw("span has no free objects") + } + freeByteBase := s.freeindex &^ (64 - 1) + whichByte := freeByteBase / 8 + // Init alloc bits cache. + s.refillAllocCache(whichByte) + + // Adjust the allocCache so that s.freeindex corresponds to the low bit in + // s.allocCache. + s.allocCache >>= s.freeindex % 64 + + return s +} + +// Return span from an mcache. +// +// s must have a span class corresponding to this +// mcentral and it must not be empty. +func (c *mcentral) uncacheSpan(s *mspan) { + if s.allocCount == 0 { + throw("uncaching span but s.allocCount == 0") + } + + sg := mheap_.sweepgen + stale := s.sweepgen == sg+1 + + // Fix up sweepgen. + if stale { + // Span was cached before sweep began. It's our + // responsibility to sweep it. + // + // Set sweepgen to indicate it's not cached but needs + // sweeping and can't be allocated from. sweep will + // set s.sweepgen to indicate s is swept. + atomic.Store(&s.sweepgen, sg-1) + } else { + // Indicate that s is no longer cached. + atomic.Store(&s.sweepgen, sg) + } + + // Put the span in the appropriate place. + if stale { + // It's stale, so just sweep it. Sweeping will put it on + // the right list. + // + // We don't use a sweepLocker here. Stale cached spans + // aren't in the global sweep lists, so mark termination + // itself holds up sweep completion until all mcaches + // have been swept. + ss := sweepLocked{s} + ss.sweep(false) + } else { + if int(s.nelems)-int(s.allocCount) > 0 { + // Put it back on the partial swept list. + c.partialSwept(sg).push(s) + } else { + // There's no free space and it's not stale, so put it on the + // full swept list. + c.fullSwept(sg).push(s) + } + } +} + +// grow allocates a new empty span from the heap and initializes it for c's size class. +func (c *mcentral) grow() *mspan { + npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) + size := uintptr(class_to_size[c.spanclass.sizeclass()]) + + s := mheap_.alloc(npages, c.spanclass) + if s == nil { + return nil + } + + // Use division by multiplication and shifts to quickly compute: + // n := (npages << _PageShift) / size + n := s.divideByElemSize(npages << _PageShift) + s.limit = s.base() + size*n + s.initHeapBits(false) + return s +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mcheckmark.go b/platform/dbops/binaries/go/go/src/runtime/mcheckmark.go new file mode 100644 index 0000000000000000000000000000000000000000..73c1a10d23bc7d76c2ead06acf2f0a117588e075 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mcheckmark.go @@ -0,0 +1,104 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// GC checkmarks +// +// In a concurrent garbage collector, one worries about failing to mark +// a live object due to mutations without write barriers or bugs in the +// collector implementation. As a sanity check, the GC has a 'checkmark' +// mode that retraverses the object graph with the world stopped, to make +// sure that everything that should be marked is marked. + +package runtime + +import ( + "internal/goarch" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// A checkmarksMap stores the GC marks in "checkmarks" mode. It is a +// per-arena bitmap with a bit for every word in the arena. The mark +// is stored on the bit corresponding to the first word of the marked +// allocation. +type checkmarksMap struct { + _ sys.NotInHeap + b [heapArenaBytes / goarch.PtrSize / 8]uint8 +} + +// If useCheckmark is true, marking of an object uses the checkmark +// bits instead of the standard mark bits. +var useCheckmark = false + +// startCheckmarks prepares for the checkmarks phase. +// +// The world must be stopped. +func startCheckmarks() { + assertWorldStopped() + + // Clear all checkmarks. + for _, ai := range mheap_.allArenas { + arena := mheap_.arenas[ai.l1()][ai.l2()] + bitmap := arena.checkmarks + + if bitmap == nil { + // Allocate bitmap on first use. + bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys)) + if bitmap == nil { + throw("out of memory allocating checkmarks bitmap") + } + arena.checkmarks = bitmap + } else { + // Otherwise clear the existing bitmap. + for i := range bitmap.b { + bitmap.b[i] = 0 + } + } + } + // Enable checkmarking. + useCheckmark = true +} + +// endCheckmarks ends the checkmarks phase. +func endCheckmarks() { + if gcMarkWorkAvailable(nil) { + throw("GC work not flushed") + } + useCheckmark = false +} + +// setCheckmark throws if marking object is a checkmarks violation, +// and otherwise sets obj's checkmark. It returns true if obj was +// already checkmarked. +func setCheckmark(obj, base, off uintptr, mbits markBits) bool { + if !mbits.isMarked() { + printlock() + print("runtime: checkmarks found unexpected unmarked object obj=", hex(obj), "\n") + print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") + + // Dump the source (base) object + gcDumpObject("base", base, off) + + // Dump the object + gcDumpObject("obj", obj, ^uintptr(0)) + + getg().m.traceback = 2 + throw("checkmark found unmarked object") + } + + ai := arenaIndex(obj) + arena := mheap_.arenas[ai.l1()][ai.l2()] + arenaWord := (obj / heapArenaBytes / 8) % uintptr(len(arena.checkmarks.b)) + mask := byte(1 << ((obj / heapArenaBytes) % 8)) + bytep := &arena.checkmarks.b[arenaWord] + + if atomic.Load8(bytep)&mask != 0 { + // Already checkmarked. + return true + } + + atomic.Or8(bytep, mask) + return false +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem.go b/platform/dbops/binaries/go/go/src/runtime/mem.go new file mode 100644 index 0000000000000000000000000000000000000000..22688d51d5e3fe7b618b9669ea6c31e8661cce5d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// OS memory management abstraction layer +// +// Regions of the address space managed by the runtime may be in one of four +// states at any given time: +// 1) None - Unreserved and unmapped, the default state of any region. +// 2) Reserved - Owned by the runtime, but accessing it would cause a fault. +// Does not count against the process' memory footprint. +// 3) Prepared - Reserved, intended not to be backed by physical memory (though +// an OS may implement this lazily). Can transition efficiently to +// Ready. Accessing memory in such a region is undefined (may +// fault, may give back unexpected zeroes, etc.). +// 4) Ready - may be accessed safely. +// +// This set of states is more than is strictly necessary to support all the +// currently supported platforms. One could get by with just None, Reserved, and +// Ready. However, the Prepared state gives us flexibility for performance +// purposes. For example, on POSIX-y operating systems, Reserved is usually a +// private anonymous mmap'd region with PROT_NONE set, and to transition +// to Ready would require setting PROT_READ|PROT_WRITE. However the +// underspecification of Prepared lets us use just MADV_FREE to transition from +// Ready to Prepared. Thus with the Prepared state we can set the permission +// bits just once early on, we can efficiently tell the OS that it's free to +// take pages away from us when we don't strictly need them. +// +// This file defines a cross-OS interface for a common set of helpers +// that transition memory regions between these states. The helpers call into +// OS-specific implementations that handle errors, while the interface boundary +// implements cross-OS functionality, like updating runtime accounting. + +// sysAlloc transitions an OS-chosen region of memory from None to Ready. +// More specifically, it obtains a large chunk of zeroed memory from the +// operating system, typically on the order of a hundred kilobytes +// or a megabyte. This memory is always immediately available for use. +// +// sysStat must be non-nil. +// +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { + sysStat.add(int64(n)) + gcController.mappedReady.Add(int64(n)) + return sysAllocOS(n) +} + +// sysUnused transitions a memory region from Ready to Prepared. It notifies the +// operating system that the physical pages backing this memory region are no +// longer needed and can be reused for other purposes. The contents of a +// sysUnused memory region are considered forfeit and the region must not be +// accessed again until sysUsed is called. +func sysUnused(v unsafe.Pointer, n uintptr) { + gcController.mappedReady.Add(-int64(n)) + sysUnusedOS(v, n) +} + +// sysUsed transitions a memory region from Prepared to Ready. It notifies the +// operating system that the memory region is needed and ensures that the region +// may be safely accessed. This is typically a no-op on systems that don't have +// an explicit commit step and hard over-commit limits, but is critical on +// Windows, for example. +// +// This operation is idempotent for memory already in the Prepared state, so +// it is safe to refer, with v and n, to a range of memory that includes both +// Prepared and Ready memory. However, the caller must provide the exact amount +// of Prepared memory for accounting purposes. +func sysUsed(v unsafe.Pointer, n, prepared uintptr) { + gcController.mappedReady.Add(int64(prepared)) + sysUsedOS(v, n) +} + +// sysHugePage does not transition memory regions, but instead provides a +// hint to the OS that it would be more efficient to back this memory region +// with pages of a larger size transparently. +func sysHugePage(v unsafe.Pointer, n uintptr) { + sysHugePageOS(v, n) +} + +// sysNoHugePage does not transition memory regions, but instead provides a +// hint to the OS that it would be less efficient to back this memory region +// with pages of a larger size transparently. +func sysNoHugePage(v unsafe.Pointer, n uintptr) { + sysNoHugePageOS(v, n) +} + +// sysHugePageCollapse attempts to immediately back the provided memory region +// with huge pages. It is best-effort and may fail silently. +func sysHugePageCollapse(v unsafe.Pointer, n uintptr) { + sysHugePageCollapseOS(v, n) +} + +// sysFree transitions a memory region from any state to None. Therefore, it +// returns memory unconditionally. It is used if an out-of-memory error has been +// detected midway through an allocation or to carve out an aligned section of +// the address space. It is okay if sysFree is a no-op only if sysReserve always +// returns a memory region aligned to the heap allocator's alignment +// restrictions. +// +// sysStat must be non-nil. +// +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) + gcController.mappedReady.Add(-int64(n)) + sysFreeOS(v, n) +} + +// sysFault transitions a memory region from Ready to Reserved. It +// marks a region such that it will always fault if accessed. Used only for +// debugging the runtime. +// +// TODO(mknyszek): Currently it's true that all uses of sysFault transition +// memory from Ready to Reserved, but this may not be true in the future +// since on every platform the operation is much more general than that. +// If a transition from Prepared is ever introduced, create a new function +// that elides the Ready state accounting. +func sysFault(v unsafe.Pointer, n uintptr) { + gcController.mappedReady.Add(-int64(n)) + sysFaultOS(v, n) +} + +// sysReserve transitions a memory region from None to Reserved. It reserves +// address space in such a way that it would cause a fatal fault upon access +// (either via permissions or not committing the memory). Such a reservation is +// thus never backed by physical memory. +// +// If the pointer passed to it is non-nil, the caller wants the +// reservation there, but sysReserve can still choose another +// location if that one is unavailable. +// +// NOTE: sysReserve returns OS-aligned memory, but the heap allocator +// may use larger alignment, so the caller must be careful to realign the +// memory obtained by sysReserve. +func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { + return sysReserveOS(v, n) +} + +// sysMap transitions a memory region from Reserved to Prepared. It ensures the +// memory region can be efficiently transitioned to Ready. +// +// sysStat must be non-nil. +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) + sysMapOS(v, n) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_aix.go b/platform/dbops/binaries/go/go/src/runtime/mem_aix.go new file mode 100644 index 0000000000000000000000000000000000000000..dff2756d971ab1d36d948a8246b58d1aa1fd990c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_aix.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +// Don't split the stack as this method may be invoked without a valid G, which +// prevents us from allocating more stack. +// +//go:nosplit +func sysAllocOS(n uintptr) unsafe.Pointer { + p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + if err == _EACCES { + print("runtime: mmap: access denied\n") + exit(2) + } + if err == _EAGAIN { + print("runtime: mmap: too much locked memory (check 'ulimit -l').\n") + exit(2) + } + return nil + } + return p +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { + madvise(v, n, _MADV_DONTNEED) +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { +} + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFreeOS(v unsafe.Pointer, n uintptr) { + munmap(v, n) +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { + mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + return nil + } + return p +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { + // AIX does not allow mapping a range that is already mapped. + // So, call mprotect to change permissions. + // Note that sysMap is always called with a non-nil pointer + // since it transitions a Reserved memory region to Prepared, + // so mprotect is always possible. + _, err := mprotect(v, n, _PROT_READ|_PROT_WRITE) + if err == _ENOMEM { + throw("runtime: out of memory") + } + if err != 0 { + print("runtime: mprotect(", v, ", ", n, ") returned ", err, "\n") + throw("runtime: cannot map pages in arena address space") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_bsd.go b/platform/dbops/binaries/go/go/src/runtime/mem_bsd.go new file mode 100644 index 0000000000000000000000000000000000000000..78128aedf7b069e1028a6cc7fec03905cc5abbc1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_bsd.go @@ -0,0 +1,87 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris + +package runtime + +import ( + "unsafe" +) + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysAllocOS(n uintptr) unsafe.Pointer { + v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + return nil + } + return v +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { + if debug.madvdontneed != 0 { + madvise(v, n, _MADV_DONTNEED) + } else { + madvise(v, n, _MADV_FREE) + } +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { +} + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFreeOS(v unsafe.Pointer, n uintptr) { + munmap(v, n) +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { + mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) +} + +// Indicates not to reserve swap space for the mapping. +const _sunosMAP_NORESERVE = 0x40 + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + flags := int32(_MAP_ANON | _MAP_PRIVATE) + if GOOS == "solaris" || GOOS == "illumos" { + // Be explicit that we don't want to reserve swap space + // for PROT_NONE anonymous mappings. This avoids an issue + // wherein large mappings can cause fork to fail. + flags |= _sunosMAP_NORESERVE + } + p, err := mmap(v, n, _PROT_NONE, flags, -1, 0) + if err != 0 { + return nil + } + return p +} + +const _sunosEAGAIN = 11 +const _ENOMEM = 12 + +func sysMapOS(v unsafe.Pointer, n uintptr) { + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) { + throw("runtime: out of memory") + } + if p != v || err != 0 { + print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n") + throw("runtime: cannot map pages in arena address space") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_darwin.go b/platform/dbops/binaries/go/go/src/runtime/mem_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..ae8487127cfdd7aabd903cc06125795d6c8849fd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_darwin.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysAllocOS(n uintptr) unsafe.Pointer { + v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + return nil + } + return v +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { + // MADV_FREE_REUSABLE is like MADV_FREE except it also propagates + // accounting information about the process to task_info. + madvise(v, n, _MADV_FREE_REUSABLE) +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { + // MADV_FREE_REUSE is necessary to keep the kernel's accounting + // accurate. If called on any memory region that hasn't been + // MADV_FREE_REUSABLE'd, it's a no-op. + madvise(v, n, _MADV_FREE_REUSE) +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { +} + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFreeOS(v unsafe.Pointer, n uintptr) { + munmap(v, n) +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { + mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + return nil + } + return p +} + +const _ENOMEM = 12 + +func sysMapOS(v unsafe.Pointer, n uintptr) { + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM { + throw("runtime: out of memory") + } + if p != v || err != 0 { + print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n") + throw("runtime: cannot map pages in arena address space") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_js.go b/platform/dbops/binaries/go/go/src/runtime/mem_js.go new file mode 100644 index 0000000000000000000000000000000000000000..080b1abc6701cc85d695e3ce897f8010d75ffd14 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_js.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js + +package runtime + +// resetMemoryDataView signals the JS front-end that WebAssembly's memory.grow instruction has been used. +// This allows the front-end to replace the old DataView object with a new one. +// +//go:wasmimport gojs runtime.resetMemoryDataView +func resetMemoryDataView() diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_linux.go b/platform/dbops/binaries/go/go/src/runtime/mem_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..d63c38c209d881ffcd0b549e0146523dba009c68 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_linux.go @@ -0,0 +1,181 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +const ( + _EACCES = 13 + _EINVAL = 22 +) + +// Don't split the stack as this method may be invoked without a valid G, which +// prevents us from allocating more stack. +// +//go:nosplit +func sysAllocOS(n uintptr) unsafe.Pointer { + p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + if err == _EACCES { + print("runtime: mmap: access denied\n") + exit(2) + } + if err == _EAGAIN { + print("runtime: mmap: too much locked memory (check 'ulimit -l').\n") + exit(2) + } + return nil + } + return p +} + +var adviseUnused = uint32(_MADV_FREE) + +const madviseUnsupported = 0 + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { + if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 { + // madvise will round this to any physical page + // *covered* by this range, so an unaligned madvise + // will release more memory than intended. + throw("unaligned sysUnused") + } + + advise := atomic.Load(&adviseUnused) + if debug.madvdontneed != 0 && advise != madviseUnsupported { + advise = _MADV_DONTNEED + } + switch advise { + case _MADV_FREE: + if madvise(v, n, _MADV_FREE) == 0 { + break + } + atomic.Store(&adviseUnused, _MADV_DONTNEED) + fallthrough + case _MADV_DONTNEED: + // MADV_FREE was added in Linux 4.5. Fall back on MADV_DONTNEED if it's + // not supported. + if madvise(v, n, _MADV_DONTNEED) == 0 { + break + } + atomic.Store(&adviseUnused, madviseUnsupported) + fallthrough + case madviseUnsupported: + // Since Linux 3.18, support for madvise is optional. + // Fall back on mmap if it's not supported. + // _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE will unmap all the + // pages in the old mapping, and remap the memory region. + mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + } + + if debug.harddecommit > 0 { + p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if p != v || err != 0 { + throw("runtime: cannot disable permissions in address space") + } + } +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { + if debug.harddecommit > 0 { + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM { + throw("runtime: out of memory") + } + if p != v || err != 0 { + throw("runtime: cannot remap pages in address space") + } + return + } +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { + if physHugePageSize != 0 { + // Round v up to a huge page boundary. + beg := alignUp(uintptr(v), physHugePageSize) + // Round v+n down to a huge page boundary. + end := alignDown(uintptr(v)+n, physHugePageSize) + + if beg < end { + madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE) + } + } +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { + if uintptr(v)&(physPageSize-1) != 0 { + // The Linux implementation requires that the address + // addr be page-aligned, and allows length to be zero. + throw("unaligned sysNoHugePageOS") + } + madvise(v, n, _MADV_NOHUGEPAGE) +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { + if uintptr(v)&(physPageSize-1) != 0 { + // The Linux implementation requires that the address + // addr be page-aligned, and allows length to be zero. + throw("unaligned sysHugePageCollapseOS") + } + if physHugePageSize == 0 { + return + } + // N.B. If you find yourself debugging this code, note that + // this call can fail with EAGAIN because it's best-effort. + // Also, when it returns an error, it's only for the last + // huge page in the region requested. + // + // It can also sometimes return EINVAL if the corresponding + // region hasn't been backed by physical memory. This is + // difficult to guarantee in general, and it also means + // there's no way to distinguish whether this syscall is + // actually available. Oops. + // + // Anyway, that's why this call just doesn't bother checking + // any errors. + madvise(v, n, _MADV_COLLAPSE) +} + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFreeOS(v unsafe.Pointer, n uintptr) { + munmap(v, n) +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { + mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + if err != 0 { + return nil + } + return p +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM { + throw("runtime: out of memory") + } + if p != v || err != 0 { + print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n") + throw("runtime: cannot map pages in arena address space") + } + + // Disable huge pages if the GODEBUG for it is set. + // + // Note that there are a few sysHugePage calls that can override this, but + // they're all for GC metadata. + if debug.disablethp != 0 { + sysNoHugePageOS(v, n) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_plan9.go b/platform/dbops/binaries/go/go/src/runtime/mem_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..9b18a2919d84c805678f32200126549c18ec5661 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_plan9.go @@ -0,0 +1,21 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func sbrk(n uintptr) unsafe.Pointer { + // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c + bl := bloc + n = memRound(n) + if bl+n > blocMax { + if brk_(unsafe.Pointer(bl+n)) < 0 { + return nil + } + blocMax = bl + n + } + bloc += n + return unsafe.Pointer(bl) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_sbrk.go b/platform/dbops/binaries/go/go/src/runtime/mem_sbrk.go new file mode 100644 index 0000000000000000000000000000000000000000..dc0a764a2cac86f0c9ce9d6b46880e0ee1be3efe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_sbrk.go @@ -0,0 +1,189 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 || wasm + +package runtime + +import "unsafe" + +const memDebug = false + +var bloc uintptr +var blocMax uintptr +var memlock mutex + +type memHdr struct { + next memHdrPtr + size uintptr +} + +var memFreelist memHdrPtr // sorted in ascending order + +type memHdrPtr uintptr + +func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } +func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } + +func memAlloc(n uintptr) unsafe.Pointer { + n = memRound(n) + var prevp *memHdr + for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { + if p.size >= n { + if p.size == n { + if prevp != nil { + prevp.next = p.next + } else { + memFreelist = p.next + } + } else { + p.size -= n + p = (*memHdr)(add(unsafe.Pointer(p), p.size)) + } + *p = memHdr{} + return unsafe.Pointer(p) + } + prevp = p + } + return sbrk(n) +} + +func memFree(ap unsafe.Pointer, n uintptr) { + n = memRound(n) + memclrNoHeapPointers(ap, n) + bp := (*memHdr)(ap) + bp.size = n + bpn := uintptr(ap) + if memFreelist == 0 { + bp.next = 0 + memFreelist.set(bp) + return + } + p := memFreelist.ptr() + if bpn < uintptr(unsafe.Pointer(p)) { + memFreelist.set(bp) + if bpn+bp.size == uintptr(unsafe.Pointer(p)) { + bp.size += p.size + bp.next = p.next + *p = memHdr{} + } else { + bp.next.set(p) + } + return + } + for ; p.next != 0; p = p.next.ptr() { + if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { + break + } + } + if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { + bp.size += p.next.ptr().size + bp.next = p.next.ptr().next + *p.next.ptr() = memHdr{} + } else { + bp.next = p.next + } + if uintptr(unsafe.Pointer(p))+p.size == bpn { + p.size += bp.size + p.next = bp.next + *bp = memHdr{} + } else { + p.next.set(bp) + } +} + +func memCheck() { + if !memDebug { + return + } + for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { + if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") + throw("mem: infinite loop") + } + if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") + throw("mem: unordered list") + } + if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") + throw("mem: overlapping blocks") + } + for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { + if *(*byte)(b) != 0 { + print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") + throw("mem: uninitialised memory") + } + } + } +} + +func memRound(p uintptr) uintptr { + return alignUp(p, physPageSize) +} + +func initBloc() { + bloc = memRound(firstmoduledata.end) + blocMax = bloc +} + +func sysAllocOS(n uintptr) unsafe.Pointer { + lock(&memlock) + p := memAlloc(n) + memCheck() + unlock(&memlock) + return p +} + +func sysFreeOS(v unsafe.Pointer, n uintptr) { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + lock(&memlock) + var p unsafe.Pointer + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = sbrk(n) + } + if p == nil && v == nil { + p = memAlloc(n) + memCheck() + } + unlock(&memlock) + return p +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_wasip1.go b/platform/dbops/binaries/go/go/src/runtime/mem_wasip1.go new file mode 100644 index 0000000000000000000000000000000000000000..41ffa0ddc281303011a74fd907f001971f4cdac6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_wasip1.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 + +package runtime + +func resetMemoryDataView() { + // This function is a no-op on WASI, it is only used to notify the browser + // that its view of the WASM memory needs to be updated when compiling for + // GOOS=js. +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_wasm.go b/platform/dbops/binaries/go/go/src/runtime/mem_wasm.go new file mode 100644 index 0000000000000000000000000000000000000000..d9d32705bb6a068cef4d93ea5658f4c1f9955c79 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_wasm.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func sbrk(n uintptr) unsafe.Pointer { + grow := divRoundUp(n, physPageSize) + size := growMemory(int32(grow)) + if size < 0 { + return nil + } + resetMemoryDataView() + return unsafe.Pointer(uintptr(size) * physPageSize) +} + +// Implemented in src/runtime/sys_wasm.s +func growMemory(pages int32) int32 diff --git a/platform/dbops/binaries/go/go/src/runtime/mem_windows.go b/platform/dbops/binaries/go/go/src/runtime/mem_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..477d8988702c67e2b6cb9a705bfd1701b0473e37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mem_windows.go @@ -0,0 +1,134 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +const ( + _MEM_COMMIT = 0x1000 + _MEM_RESERVE = 0x2000 + _MEM_DECOMMIT = 0x4000 + _MEM_RELEASE = 0x8000 + + _PAGE_READWRITE = 0x0004 + _PAGE_NOACCESS = 0x0001 + + _ERROR_NOT_ENOUGH_MEMORY = 8 + _ERROR_COMMITMENT_LIMIT = 1455 +) + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysAllocOS(n uintptr) unsafe.Pointer { + return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { + r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT) + if r != 0 { + return + } + + // Decommit failed. Usual reason is that we've merged memory from two different + // VirtualAlloc calls, and Windows will only let each VirtualFree handle pages from + // a single VirtualAlloc. It is okay to specify a subset of the pages from a single alloc, + // just not pages from multiple allocs. This is a rare case, arising only when we're + // trying to give memory back to the operating system, which happens on a time + // scale of minutes. It doesn't have to be terribly fast. Instead of extra bookkeeping + // on all our VirtualAlloc calls, try freeing successively smaller pieces until + // we manage to free something, and then repeat. This ends up being O(n log n) + // in the worst case, but that's fast enough. + for n > 0 { + small := n + for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 { + small /= 2 + small &^= 4096 - 1 + } + if small < 4096 { + print("runtime: VirtualFree of ", small, " bytes failed with errno=", getlasterror(), "\n") + throw("runtime: failed to decommit pages") + } + v = add(v, small) + n -= small + } +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { + p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE) + if p == uintptr(v) { + return + } + + // Commit failed. See SysUnused. + // Hold on to n here so we can give back a better error message + // for certain cases. + k := n + for k > 0 { + small := k + for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 { + small /= 2 + small &^= 4096 - 1 + } + if small < 4096 { + errno := getlasterror() + switch errno { + case _ERROR_NOT_ENOUGH_MEMORY, _ERROR_COMMITMENT_LIMIT: + print("runtime: VirtualAlloc of ", n, " bytes failed with errno=", errno, "\n") + throw("out of memory") + default: + print("runtime: VirtualAlloc of ", small, " bytes failed with errno=", errno, "\n") + throw("runtime: failed to commit pages") + } + } + v = add(v, small) + k -= small + } +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { +} + +// Don't split the stack as this function may be invoked without a valid G, +// which prevents us from allocating more stack. +// +//go:nosplit +func sysFreeOS(v unsafe.Pointer, n uintptr) { + r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE) + if r == 0 { + print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n") + throw("runtime: failed to release pages") + } +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { + // SysUnused makes the memory inaccessible and prevents its reuse + sysUnusedOS(v, n) +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + // v is just a hint. + // First try at v. + // This will fail if any of [v, v+n) is already reserved. + v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE)) + if v != nil { + return v + } + + // Next let the kernel choose the address. + return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { +} diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_386.s b/platform/dbops/binaries/go/go/src/runtime/memclr_386.s new file mode 100644 index 0000000000000000000000000000000000000000..a72e5f228dbb6bdf0ee56420439bf588c90e7512 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_386.s @@ -0,0 +1,137 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +#include "go_asm.h" +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), DI + MOVL n+4(FP), BX + XORL AX, AX + + // MOVOU seems always faster than REP STOSL. +tail: + // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. + TESTL BX, BX + JEQ _0 + CMPL BX, $2 + JBE _1or2 + CMPL BX, $4 + JB _3 + JE _4 + CMPL BX, $8 + JBE _5through8 + CMPL BX, $16 + JBE _9through16 +#ifdef GO386_softfloat + JMP nosse2 +#endif + PXOR X0, X0 + CMPL BX, $32 + JBE _17through32 + CMPL BX, $64 + JBE _33through64 + CMPL BX, $128 + JBE _65through128 + CMPL BX, $256 + JBE _129through256 + +loop: + MOVOU X0, 0(DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, 64(DI) + MOVOU X0, 80(DI) + MOVOU X0, 96(DI) + MOVOU X0, 112(DI) + MOVOU X0, 128(DI) + MOVOU X0, 144(DI) + MOVOU X0, 160(DI) + MOVOU X0, 176(DI) + MOVOU X0, 192(DI) + MOVOU X0, 208(DI) + MOVOU X0, 224(DI) + MOVOU X0, 240(DI) + SUBL $256, BX + ADDL $256, DI + CMPL BX, $256 + JAE loop + JMP tail + +_1or2: + MOVB AX, (DI) + MOVB AX, -1(DI)(BX*1) + RET +_0: + RET +_3: + MOVW AX, (DI) + MOVB AX, 2(DI) + RET +_4: + // We need a separate case for 4 to make sure we clear pointers atomically. + MOVL AX, (DI) + RET +_5through8: + MOVL AX, (DI) + MOVL AX, -4(DI)(BX*1) + RET +_9through16: + MOVL AX, (DI) + MOVL AX, 4(DI) + MOVL AX, -8(DI)(BX*1) + MOVL AX, -4(DI)(BX*1) + RET +_17through32: + MOVOU X0, (DI) + MOVOU X0, -16(DI)(BX*1) + RET +_33through64: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET +_65through128: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, -64(DI)(BX*1) + MOVOU X0, -48(DI)(BX*1) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET +_129through256: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, 64(DI) + MOVOU X0, 80(DI) + MOVOU X0, 96(DI) + MOVOU X0, 112(DI) + MOVOU X0, -128(DI)(BX*1) + MOVOU X0, -112(DI)(BX*1) + MOVOU X0, -96(DI)(BX*1) + MOVOU X0, -80(DI)(BX*1) + MOVOU X0, -64(DI)(BX*1) + MOVOU X0, -48(DI)(BX*1) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET +nosse2: + MOVL BX, CX + SHRL $2, CX + REP + STOSL + ANDL $3, BX + JNE tail + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_amd64.s b/platform/dbops/binaries/go/go/src/runtime/memclr_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..19bfa6f20d49b5b0eaf27f07f8f402922e83535f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_amd64.s @@ -0,0 +1,218 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +#include "go_asm.h" +#include "textflag.h" +#include "asm_amd64.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +// ABIInternal for performance. +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-16 + // AX = ptr + // BX = n + MOVQ AX, DI // DI = ptr + XORQ AX, AX + + // MOVOU seems always faster than REP STOSQ when Enhanced REP STOSQ is not available. +tail: + // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. + TESTQ BX, BX + JEQ _0 + CMPQ BX, $2 + JBE _1or2 + CMPQ BX, $4 + JBE _3or4 + CMPQ BX, $8 + JB _5through7 + JE _8 + CMPQ BX, $16 + JBE _9through16 + CMPQ BX, $32 + JBE _17through32 + CMPQ BX, $64 + JBE _33through64 + CMPQ BX, $128 + JBE _65through128 + CMPQ BX, $256 + JBE _129through256 + + CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB + JNE skip_erms + + // If the size is less than 2kb, do not use ERMS as it has a big start-up cost. + // Table 3-4. Relative Performance of Memcpy() Using ERMSB Vs. 128-bit AVX + // in the Intel Optimization Guide shows better performance for ERMSB starting + // from 2KB. Benchmarks show the similar threshold for REP STOS vs AVX. + CMPQ BX, $2048 + JAE loop_preheader_erms + +skip_erms: +#ifndef hasAVX2 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JE loop_preheader_avx2 + // TODO: for really big clears, use MOVNTDQ, even without AVX2. + +loop: + MOVOU X15, 0(DI) + MOVOU X15, 16(DI) + MOVOU X15, 32(DI) + MOVOU X15, 48(DI) + MOVOU X15, 64(DI) + MOVOU X15, 80(DI) + MOVOU X15, 96(DI) + MOVOU X15, 112(DI) + MOVOU X15, 128(DI) + MOVOU X15, 144(DI) + MOVOU X15, 160(DI) + MOVOU X15, 176(DI) + MOVOU X15, 192(DI) + MOVOU X15, 208(DI) + MOVOU X15, 224(DI) + MOVOU X15, 240(DI) + SUBQ $256, BX + ADDQ $256, DI + CMPQ BX, $256 + JAE loop + JMP tail +#endif + +loop_preheader_avx2: + VPXOR X0, X0, X0 + // For smaller sizes MOVNTDQ may be faster or slower depending on hardware. + // For larger sizes it is always faster, even on dual Xeons with 30M cache. + // TODO take into account actual LLC size. E. g. glibc uses LLC size/2. + CMPQ BX, $0x2000000 + JAE loop_preheader_avx2_huge + +loop_avx2: + VMOVDQU Y0, 0(DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y0, 64(DI) + VMOVDQU Y0, 96(DI) + SUBQ $128, BX + ADDQ $128, DI + CMPQ BX, $128 + JAE loop_avx2 + VMOVDQU Y0, -32(DI)(BX*1) + VMOVDQU Y0, -64(DI)(BX*1) + VMOVDQU Y0, -96(DI)(BX*1) + VMOVDQU Y0, -128(DI)(BX*1) + VZEROUPPER + RET + +loop_preheader_erms: +#ifndef hasAVX2 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JNE loop_erms +#endif + + VPXOR X0, X0, X0 + // At this point both ERMS and AVX2 is supported. While REP STOS can use a no-RFO + // write protocol, ERMS could show the same or slower performance comparing to + // Non-Temporal Stores when the size is bigger than LLC depending on hardware. + CMPQ BX, $0x2000000 + JAE loop_preheader_avx2_huge + +loop_erms: + // STOSQ is used to guarantee that the whole zeroed pointer-sized word is visible + // for a memory subsystem as the GC requires this. + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + REP; STOSQ + JMP tail + +loop_preheader_avx2_huge: + // Align to 32 byte boundary + VMOVDQU Y0, 0(DI) + MOVQ DI, SI + ADDQ $32, DI + ANDQ $~31, DI + SUBQ DI, SI + ADDQ SI, BX +loop_avx2_huge: + VMOVNTDQ Y0, 0(DI) + VMOVNTDQ Y0, 32(DI) + VMOVNTDQ Y0, 64(DI) + VMOVNTDQ Y0, 96(DI) + SUBQ $128, BX + ADDQ $128, DI + CMPQ BX, $128 + JAE loop_avx2_huge + // In the description of MOVNTDQ in [1] + // "... fencing operation implemented with the SFENCE or MFENCE instruction + // should be used in conjunction with MOVNTDQ instructions..." + // [1] 64-ia-32-architectures-software-developer-manual-325462.pdf + SFENCE + VMOVDQU Y0, -32(DI)(BX*1) + VMOVDQU Y0, -64(DI)(BX*1) + VMOVDQU Y0, -96(DI)(BX*1) + VMOVDQU Y0, -128(DI)(BX*1) + VZEROUPPER + RET + +_1or2: + MOVB AX, (DI) + MOVB AX, -1(DI)(BX*1) + RET +_0: + RET +_3or4: + MOVW AX, (DI) + MOVW AX, -2(DI)(BX*1) + RET +_5through7: + MOVL AX, (DI) + MOVL AX, -4(DI)(BX*1) + RET +_8: + // We need a separate case for 8 to make sure we clear pointers atomically. + MOVQ AX, (DI) + RET +_9through16: + MOVQ AX, (DI) + MOVQ AX, -8(DI)(BX*1) + RET +_17through32: + MOVOU X15, (DI) + MOVOU X15, -16(DI)(BX*1) + RET +_33through64: + MOVOU X15, (DI) + MOVOU X15, 16(DI) + MOVOU X15, -32(DI)(BX*1) + MOVOU X15, -16(DI)(BX*1) + RET +_65through128: + MOVOU X15, (DI) + MOVOU X15, 16(DI) + MOVOU X15, 32(DI) + MOVOU X15, 48(DI) + MOVOU X15, -64(DI)(BX*1) + MOVOU X15, -48(DI)(BX*1) + MOVOU X15, -32(DI)(BX*1) + MOVOU X15, -16(DI)(BX*1) + RET +_129through256: + MOVOU X15, (DI) + MOVOU X15, 16(DI) + MOVOU X15, 32(DI) + MOVOU X15, 48(DI) + MOVOU X15, 64(DI) + MOVOU X15, 80(DI) + MOVOU X15, 96(DI) + MOVOU X15, 112(DI) + MOVOU X15, -128(DI)(BX*1) + MOVOU X15, -112(DI)(BX*1) + MOVOU X15, -96(DI)(BX*1) + MOVOU X15, -80(DI)(BX*1) + MOVOU X15, -64(DI)(BX*1) + MOVOU X15, -48(DI)(BX*1) + MOVOU X15, -32(DI)(BX*1) + MOVOU X15, -16(DI)(BX*1) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_arm.s b/platform/dbops/binaries/go/go/src/runtime/memclr_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..f02d058ead8b157625464ca2726eeb08c89f8b77 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_arm.s @@ -0,0 +1,91 @@ +// Inferno's libkern/memset-arm.s +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memset-arm.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "textflag.h" + +#define TO R8 +#define TOE R11 +#define N R12 +#define TMP R12 /* N and TMP don't overlap */ + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +// Also called from assembly in sys_windows_arm.s without g (but using Go stack convention). +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8 + MOVW ptr+0(FP), TO + MOVW n+4(FP), N + MOVW $0, R0 + + ADD N, TO, TOE /* to end pointer */ + + CMP $4, N /* need at least 4 bytes to copy */ + BLT _1tail + +_4align: /* align on 4 */ + AND.S $3, TO, TMP + BEQ _4aligned + + MOVBU.P R0, 1(TO) /* implicit write back */ + B _4align + +_4aligned: + SUB $31, TOE, TMP /* do 32-byte chunks if possible */ + CMP TMP, TO + BHS _4tail + + MOVW R0, R1 /* replicate */ + MOVW R0, R2 + MOVW R0, R3 + MOVW R0, R4 + MOVW R0, R5 + MOVW R0, R6 + MOVW R0, R7 + +_f32loop: + CMP TMP, TO + BHS _4tail + + MOVM.IA.W [R0-R7], (TO) + B _f32loop + +_4tail: + SUB $3, TOE, TMP /* do remaining words if possible */ +_4loop: + CMP TMP, TO + BHS _1tail + + MOVW.P R0, 4(TO) /* implicit write back */ + B _4loop + +_1tail: + CMP TO, TOE + BEQ _return + + MOVBU.P R0, 1(TO) /* implicit write back */ + B _1tail + +_return: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_arm64.s b/platform/dbops/binaries/go/go/src/runtime/memclr_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..1c35dfe0cf258bb3737fed3767d2460d7d686aaf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_arm64.s @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +// Also called from assembly in sys_windows_arm64.s without g (but using Go stack convention). +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 + CMP $16, R1 + // If n is equal to 16 bytes, use zero_exact_16 to zero + BEQ zero_exact_16 + + // If n is greater than 16 bytes, use zero_by_16 to zero + BHI zero_by_16 + + // n is less than 16 bytes + ADD R1, R0, R7 + TBZ $3, R1, less_than_8 + MOVD ZR, (R0) + MOVD ZR, -8(R7) + RET + +less_than_8: + TBZ $2, R1, less_than_4 + MOVW ZR, (R0) + MOVW ZR, -4(R7) + RET + +less_than_4: + CBZ R1, ending + MOVB ZR, (R0) + TBZ $1, R1, ending + MOVH ZR, -2(R7) + +ending: + RET + +zero_exact_16: + // n is exactly 16 bytes + STP (ZR, ZR), (R0) + RET + +zero_by_16: + // n greater than 16 bytes, check if the start address is aligned + NEG R0, R4 + ANDS $15, R4, R4 + // Try zeroing using zva if the start address is aligned with 16 + BEQ try_zva + + // Non-aligned store + STP (ZR, ZR), (R0) + // Make the destination aligned + SUB R4, R1, R1 + ADD R4, R0, R0 + B try_zva + +tail_maybe_long: + CMP $64, R1 + BHS no_zva + +tail63: + ANDS $48, R1, R3 + BEQ last16 + CMPW $32, R3 + BEQ last48 + BLT last32 + STP.P (ZR, ZR), 16(R0) +last48: + STP.P (ZR, ZR), 16(R0) +last32: + STP.P (ZR, ZR), 16(R0) + // The last store length is at most 16, so it is safe to use + // stp to write last 16 bytes +last16: + ANDS $15, R1, R1 + CBZ R1, last_end + ADD R1, R0, R0 + STP (ZR, ZR), -16(R0) +last_end: + RET + +no_zva: + SUB $16, R0, R0 + SUB $64, R1, R1 + +loop_64: + STP (ZR, ZR), 16(R0) + STP (ZR, ZR), 32(R0) + STP (ZR, ZR), 48(R0) + STP.W (ZR, ZR), 64(R0) + SUBS $64, R1, R1 + BGE loop_64 + ANDS $63, R1, ZR + ADD $16, R0, R0 + BNE tail63 + RET + +try_zva: + // Try using the ZVA feature to zero entire cache lines + // It is not meaningful to use ZVA if the block size is less than 64, + // so make sure that n is greater than or equal to 64 + CMP $63, R1 + BLE tail63 + + CMP $128, R1 + // Ensure n is at least 128 bytes, so that there is enough to copy after + // alignment. + BLT no_zva + // Check if ZVA is allowed from user code, and if so get the block size + MOVW block_size<>(SB), R5 + TBNZ $31, R5, no_zva + CBNZ R5, zero_by_line + // DCZID_EL0 bit assignments + // [63:5] Reserved + // [4] DZP, if bit set DC ZVA instruction is prohibited, else permitted + // [3:0] log2 of the block size in words, eg. if it returns 0x4 then block size is 16 words + MRS DCZID_EL0, R3 + TBZ $4, R3, init + // ZVA not available + MOVW $~0, R5 + MOVW R5, block_size<>(SB) + B no_zva + +init: + MOVW $4, R9 + ANDW $15, R3, R5 + LSLW R5, R9, R5 + MOVW R5, block_size<>(SB) + + ANDS $63, R5, R9 + // Block size is less than 64. + BNE no_zva + +zero_by_line: + CMP R5, R1 + // Not enough memory to reach alignment + BLO no_zva + SUB $1, R5, R6 + NEG R0, R4 + ANDS R6, R4, R4 + // Already aligned + BEQ aligned + + // check there is enough to copy after alignment + SUB R4, R1, R3 + + // Check that the remaining length to ZVA after alignment + // is greater than 64. + CMP $64, R3 + CCMP GE, R3, R5, $10 // condition code GE, NZCV=0b1010 + BLT no_zva + + // We now have at least 64 bytes to zero, update n + MOVD R3, R1 + +loop_zva_prolog: + STP (ZR, ZR), (R0) + STP (ZR, ZR), 16(R0) + STP (ZR, ZR), 32(R0) + SUBS $64, R4, R4 + STP (ZR, ZR), 48(R0) + ADD $64, R0, R0 + BGE loop_zva_prolog + + ADD R4, R0, R0 + +aligned: + SUB R5, R1, R1 + +loop_zva: + WORD $0xd50b7420 // DC ZVA, R0 + ADD R5, R0, R0 + SUBS R5, R1, R1 + BHS loop_zva + ANDS R6, R1, R1 + BNE tail_maybe_long + RET + +GLOBL block_size<>(SB), NOPTR, $8 diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_loong64.s b/platform/dbops/binaries/go/go/src/runtime/memclr_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..313e4d4f3379547920f38f15bb4f6921122dc4d6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_loong64.s @@ -0,0 +1,44 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 +#ifndef GOEXPERIMENT_regabiargs + MOVV ptr+0(FP), R4 + MOVV n+8(FP), R5 +#endif + ADDV R4, R5, R6 + + // if less than 8 bytes, do one byte at a time + SGTU $8, R5, R8 + BNE R8, out + + // do one byte at a time until 8-aligned + AND $7, R4, R8 + BEQ R8, words + MOVB R0, (R4) + ADDV $1, R4 + JMP -4(PC) + +words: + // do 8 bytes at a time if there is room + ADDV $-7, R6, R5 + + PCALIGN $16 + SGTU R5, R4, R8 + BEQ R8, out + MOVV R0, (R4) + ADDV $8, R4 + JMP -4(PC) + +out: + BEQ R4, R6, done + MOVB R0, (R4) + ADDV $1, R4 + JMP -3(PC) +done: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_mips64x.s b/platform/dbops/binaries/go/go/src/runtime/memclr_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..cf3a9c4ab4fb3696860c153e3eec3b14509c7fe4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_mips64x.s @@ -0,0 +1,99 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "go_asm.h" +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 + MOVV ptr+0(FP), R1 + MOVV n+8(FP), R2 + ADDV R1, R2, R4 + + // if less than 16 bytes or no MSA, do words check + SGTU $16, R2, R3 + BNE R3, no_msa + MOVBU internal∕cpu·MIPS64X+const_offsetMIPS64XHasMSA(SB), R3 + BEQ R3, R0, no_msa + + VMOVB $0, W0 + + SGTU $128, R2, R3 + BEQ R3, msa_large + + AND $15, R2, R5 + XOR R2, R5, R6 + ADDVU R1, R6 + +msa_small: + VMOVB W0, (R1) + ADDVU $16, R1 + SGTU R6, R1, R3 + BNE R3, R0, msa_small + BEQ R5, R0, done + VMOVB W0, -16(R4) + JMP done + +msa_large: + AND $127, R2, R5 + XOR R2, R5, R6 + ADDVU R1, R6 + +msa_large_loop: + VMOVB W0, (R1) + VMOVB W0, 16(R1) + VMOVB W0, 32(R1) + VMOVB W0, 48(R1) + VMOVB W0, 64(R1) + VMOVB W0, 80(R1) + VMOVB W0, 96(R1) + VMOVB W0, 112(R1) + + ADDVU $128, R1 + SGTU R6, R1, R3 + BNE R3, R0, msa_large_loop + BEQ R5, R0, done + VMOVB W0, -128(R4) + VMOVB W0, -112(R4) + VMOVB W0, -96(R4) + VMOVB W0, -80(R4) + VMOVB W0, -64(R4) + VMOVB W0, -48(R4) + VMOVB W0, -32(R4) + VMOVB W0, -16(R4) + JMP done + +no_msa: + // if less than 8 bytes, do one byte at a time + SGTU $8, R2, R3 + BNE R3, out + + // do one byte at a time until 8-aligned + AND $7, R1, R3 + BEQ R3, words + MOVB R0, (R1) + ADDV $1, R1 + JMP -4(PC) + +words: + // do 8 bytes at a time if there is room + ADDV $-7, R4, R2 + + SGTU R2, R1, R3 + BEQ R3, out + MOVV R0, (R1) + ADDV $8, R1 + JMP -4(PC) + +out: + BEQ R1, R4, done + MOVB R0, (R1) + ADDV $1, R1 + JMP -3(PC) +done: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_mipsx.s b/platform/dbops/binaries/go/go/src/runtime/memclr_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..ee3009d46b8b6f521eac35a4ec020d7a1044b8dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_mipsx.s @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" + +#ifdef GOARCH_mips +#define MOVWHI MOVWL +#define MOVWLO MOVWR +#else +#define MOVWHI MOVWR +#define MOVWLO MOVWL +#endif + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8 + MOVW n+4(FP), R2 + MOVW ptr+0(FP), R1 + + SGTU $4, R2, R3 + ADDU R2, R1, R4 + BNE R3, small_zero + +ptr_align: + AND $3, R1, R3 + BEQ R3, setup + SUBU R1, R0, R3 + AND $3, R3 // R3 contains number of bytes needed to align ptr + MOVWHI R0, 0(R1) // MOVWHI will write zeros up to next word boundary + SUBU R3, R2 + ADDU R3, R1 + +setup: + AND $31, R2, R6 + AND $3, R2, R5 + SUBU R6, R4, R6 // end pointer for 32-byte chunks + SUBU R5, R4, R5 // end pointer for 4-byte chunks + +large: + BEQ R1, R6, words + MOVW R0, 0(R1) + MOVW R0, 4(R1) + MOVW R0, 8(R1) + MOVW R0, 12(R1) + MOVW R0, 16(R1) + MOVW R0, 20(R1) + MOVW R0, 24(R1) + MOVW R0, 28(R1) + ADDU $32, R1 + JMP large + +words: + BEQ R1, R5, tail + MOVW R0, 0(R1) + ADDU $4, R1 + JMP words + +tail: + BEQ R1, R4, ret + MOVWLO R0, -1(R4) + +ret: + RET + +small_zero: + BEQ R1, R4, ret + MOVB R0, 0(R1) + ADDU $1, R1 + JMP small_zero diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_386.s b/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_386.s new file mode 100644 index 0000000000000000000000000000000000000000..54701a94536e5493cb9005dcfb82a2823a627b95 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_386.s @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), DI + MOVL n+4(FP), BX + XORL AX, AX + +tail: + TESTL BX, BX + JEQ _0 + CMPL BX, $2 + JBE _1or2 + CMPL BX, $4 + JB _3 + JE _4 + CMPL BX, $8 + JBE _5through8 + CMPL BX, $16 + JBE _9through16 + MOVL BX, CX + SHRL $2, CX + REP + STOSL + ANDL $3, BX + JNE tail + RET + +_1or2: + MOVB AX, (DI) + MOVB AX, -1(DI)(BX*1) + RET +_0: + RET +_3: + MOVW AX, (DI) + MOVB AX, 2(DI) + RET +_4: + // We need a separate case for 4 to make sure we clear pointers atomically. + MOVL AX, (DI) + RET +_5through8: + MOVL AX, (DI) + MOVL AX, -4(DI)(BX*1) + RET +_9through16: + MOVL AX, (DI) + MOVL AX, 4(DI) + MOVL AX, -8(DI)(BX*1) + MOVL AX, -4(DI)(BX*1) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_amd64.s b/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..8c6a1cc7809ac6a94a81c753009382e53d8842e6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_plan9_amd64.s @@ -0,0 +1,23 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 + MOVQ ptr+0(FP), DI + MOVQ n+8(FP), CX + MOVQ CX, BX + ANDQ $7, BX + SHRQ $3, CX + MOVQ $0, AX + CLD + REP + STOSQ + MOVQ BX, CX + REP + STOSB + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_ppc64x.s b/platform/dbops/binaries/go/go/src/runtime/memclr_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..bc4b3fc2832b7583586fa84540fe25e5fb53bb1c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_ppc64x.s @@ -0,0 +1,190 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT|NOFRAME, $0-16 + // R3 = ptr + // R4 = n + + // Determine if there are doublewords to clear +check: + ANDCC $7, R4, R5 // R5: leftover bytes to clear + SRD $3, R4, R6 // R6: double words to clear + CMP R6, $0, CR1 // CR1[EQ] set if no double words + + BC 12, 6, nozerolarge // only single bytes + CMP R4, $512 + BLT under512 // special case for < 512 + ANDCC $127, R3, R8 // check for 128 alignment of address + BEQ zero512setup + + ANDCC $7, R3, R15 + BEQ zero512xsetup // at least 8 byte aligned + + // zero bytes up to 8 byte alignment + + ANDCC $1, R3, R15 // check for byte alignment + BEQ byte2 + MOVB R0, 0(R3) // zero 1 byte + ADD $1, R3 // bump ptr by 1 + ADD $-1, R4 + +byte2: + ANDCC $2, R3, R15 // check for 2 byte alignment + BEQ byte4 + MOVH R0, 0(R3) // zero 2 bytes + ADD $2, R3 // bump ptr by 2 + ADD $-2, R4 + +byte4: + ANDCC $4, R3, R15 // check for 4 byte alignment + BEQ zero512xsetup + MOVW R0, 0(R3) // zero 4 bytes + ADD $4, R3 // bump ptr by 4 + ADD $-4, R4 + BR zero512xsetup // ptr should now be 8 byte aligned + +under512: + SRDCC $3, R6, R7 // 64 byte chunks? + XXLXOR VS32, VS32, VS32 // clear VS32 (V0) + BEQ lt64gt8 + + // Prepare to clear 64 bytes at a time. + +zero64setup: + DCBTST (R3) // prepare data cache + MOVD R7, CTR // number of 64 byte chunks + MOVD $16, R8 + MOVD $32, R16 + MOVD $48, R17 + +zero64: + STXVD2X VS32, (R3+R0) // store 16 bytes + STXVD2X VS32, (R3+R8) + STXVD2X VS32, (R3+R16) + STXVD2X VS32, (R3+R17) + ADD $64, R3 + ADD $-64, R4 + BDNZ zero64 // dec ctr, br zero64 if ctr not 0 + SRDCC $3, R4, R6 // remaining doublewords + BEQ nozerolarge + +lt64gt8: + CMP R4, $32 + BLT lt32gt8 + MOVD $16, R8 + STXVD2X VS32, (R3+R0) + STXVD2X VS32, (R3+R8) + ADD $-32, R4 + ADD $32, R3 +lt32gt8: + CMP R4, $16 + BLT lt16gt8 + STXVD2X VS32, (R3+R0) + ADD $16, R3 + ADD $-16, R4 +lt16gt8: +#ifdef GOPPC64_power10 + SLD $56, R4, R7 + STXVL V0, R3, R7 + RET +#else + CMP R4, $8 + BLT nozerolarge + MOVD R0, 0(R3) + ADD $8, R3 + ADD $-8, R4 +#endif +nozerolarge: + ANDCC $7, R4, R5 // any remaining bytes + BC 4, 1, LR // ble lr +#ifdef GOPPC64_power10 + XXLXOR VS32, VS32, VS32 // clear VS32 (V0) + SLD $56, R5, R7 + STXVL V0, R3, R7 + RET +#else + CMP R5, $4 + BLT next2 + MOVW R0, 0(R3) + ADD $4, R3 + ADD $-4, R5 +next2: + CMP R5, $2 + BLT next1 + MOVH R0, 0(R3) + ADD $2, R3 + ADD $-2, R5 +next1: + CMP R5, $0 + BC 12, 2, LR // beqlr + MOVB R0, 0(R3) + RET +#endif + +zero512xsetup: // 512 chunk with extra needed + ANDCC $8, R3, R11 // 8 byte alignment? + BEQ zero512setup16 + MOVD R0, 0(R3) // clear 8 bytes + ADD $8, R3 // update ptr to next 8 + ADD $-8, R4 // dec count by 8 + +zero512setup16: + ANDCC $127, R3, R14 // < 128 byte alignment + BEQ zero512setup // handle 128 byte alignment + MOVD $128, R15 + SUB R14, R15, R14 // find increment to 128 alignment + SRD $4, R14, R15 // number of 16 byte chunks + MOVD R15, CTR // loop counter of 16 bytes + XXLXOR VS32, VS32, VS32 // clear VS32 (V0) + +zero512preloop: // clear up to 128 alignment + STXVD2X VS32, (R3+R0) // clear 16 bytes + ADD $16, R3 // update ptr + ADD $-16, R4 // dec count + BDNZ zero512preloop + +zero512setup: // setup for dcbz loop + CMP R4, $512 // check if at least 512 + BLT remain + SRD $9, R4, R8 // loop count for 512 chunks + MOVD R8, CTR // set up counter + MOVD $128, R9 // index regs for 128 bytes + MOVD $256, R10 + MOVD $384, R11 + PCALIGN $16 +zero512: + DCBZ (R3+R0) // clear first chunk + DCBZ (R3+R9) // clear second chunk + DCBZ (R3+R10) // clear third chunk + DCBZ (R3+R11) // clear fourth chunk + ADD $512, R3 + BDNZ zero512 + ANDCC $511, R4 + +remain: + CMP R4, $128 // check if 128 byte chunks left + BLT smaller + DCBZ (R3+R0) // clear 128 + ADD $128, R3 + ADD $-128, R4 + BR remain + +smaller: + ANDCC $127, R4, R7 // find leftovers + BEQ done + CMP R7, $64 // more than 64, do 64 at a time + XXLXOR VS32, VS32, VS32 + BLT lt64gt8 // less than 64 + SRD $6, R7, R7 // set up counter for 64 + BR zero64setup + +done: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/memclr_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..16c511c603aef8100f0101e5614400763afcd032 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_riscv64.s @@ -0,0 +1,104 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 + // X10 = ptr + // X11 = n + + // If less than 8 bytes, do single byte zeroing. + MOV $8, X9 + BLT X11, X9, check4 + + // Check alignment + AND $7, X10, X5 + BEQZ X5, aligned + + // Zero one byte at a time until we reach 8 byte alignment. + SUB X5, X9, X5 + SUB X5, X11, X11 +align: + SUB $1, X5 + MOVB ZERO, 0(X10) + ADD $1, X10 + BNEZ X5, align + +aligned: + // X9 already contains $8 + BLT X11, X9, check4 + MOV $16, X9 + BLT X11, X9, zero8 + MOV $32, X9 + BLT X11, X9, zero16 + MOV $64, X9 + BLT X11, X9, zero32 +loop64: + MOV ZERO, 0(X10) + MOV ZERO, 8(X10) + MOV ZERO, 16(X10) + MOV ZERO, 24(X10) + MOV ZERO, 32(X10) + MOV ZERO, 40(X10) + MOV ZERO, 48(X10) + MOV ZERO, 56(X10) + ADD $64, X10 + SUB $64, X11 + BGE X11, X9, loop64 + BEQZ X11, done + +check32: + MOV $32, X9 + BLT X11, X9, check16 +zero32: + MOV ZERO, 0(X10) + MOV ZERO, 8(X10) + MOV ZERO, 16(X10) + MOV ZERO, 24(X10) + ADD $32, X10 + SUB $32, X11 + BEQZ X11, done + +check16: + MOV $16, X9 + BLT X11, X9, check8 +zero16: + MOV ZERO, 0(X10) + MOV ZERO, 8(X10) + ADD $16, X10 + SUB $16, X11 + BEQZ X11, done + +check8: + MOV $8, X9 + BLT X11, X9, check4 +zero8: + MOV ZERO, 0(X10) + ADD $8, X10 + SUB $8, X11 + BEQZ X11, done + +check4: + MOV $4, X9 + BLT X11, X9, loop1 +zero4: + MOVB ZERO, 0(X10) + MOVB ZERO, 1(X10) + MOVB ZERO, 2(X10) + MOVB ZERO, 3(X10) + ADD $4, X10 + SUB $4, X11 + +loop1: + BEQZ X11, done + MOVB ZERO, 0(X10) + ADD $1, X10 + SUB $1, X11 + JMP loop1 + +done: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_s390x.s b/platform/dbops/binaries/go/go/src/runtime/memclr_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..fa657ef66e6b9545417223164a4d163329fc6e1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_s390x.s @@ -0,0 +1,124 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT|NOFRAME,$0-16 + MOVD ptr+0(FP), R4 + MOVD n+8(FP), R5 + +start: + CMPBLE R5, $3, clear0to3 + CMPBLE R5, $7, clear4to7 + CMPBLE R5, $11, clear8to11 + CMPBLE R5, $15, clear12to15 + CMP R5, $32 + BGE clearmt32 + MOVD $0, 0(R4) + MOVD $0, 8(R4) + ADD $16, R4 + SUB $16, R5 + BR start + +clear0to3: + CMPBEQ R5, $0, done + CMPBNE R5, $1, clear2 + MOVB $0, 0(R4) + RET +clear2: + CMPBNE R5, $2, clear3 + MOVH $0, 0(R4) + RET +clear3: + MOVH $0, 0(R4) + MOVB $0, 2(R4) + RET + +clear4to7: + CMPBNE R5, $4, clear5 + MOVW $0, 0(R4) + RET +clear5: + CMPBNE R5, $5, clear6 + MOVW $0, 0(R4) + MOVB $0, 4(R4) + RET +clear6: + CMPBNE R5, $6, clear7 + MOVW $0, 0(R4) + MOVH $0, 4(R4) + RET +clear7: + MOVW $0, 0(R4) + MOVH $0, 4(R4) + MOVB $0, 6(R4) + RET + +clear8to11: + CMPBNE R5, $8, clear9 + MOVD $0, 0(R4) + RET +clear9: + CMPBNE R5, $9, clear10 + MOVD $0, 0(R4) + MOVB $0, 8(R4) + RET +clear10: + CMPBNE R5, $10, clear11 + MOVD $0, 0(R4) + MOVH $0, 8(R4) + RET +clear11: + MOVD $0, 0(R4) + MOVH $0, 8(R4) + MOVB $0, 10(R4) + RET + +clear12to15: + CMPBNE R5, $12, clear13 + MOVD $0, 0(R4) + MOVW $0, 8(R4) + RET +clear13: + CMPBNE R5, $13, clear14 + MOVD $0, 0(R4) + MOVW $0, 8(R4) + MOVB $0, 12(R4) + RET +clear14: + CMPBNE R5, $14, clear15 + MOVD $0, 0(R4) + MOVW $0, 8(R4) + MOVH $0, 12(R4) + RET +clear15: + MOVD $0, 0(R4) + MOVW $0, 8(R4) + MOVH $0, 12(R4) + MOVB $0, 14(R4) + RET + +clearmt32: + CMP R5, $256 + BLT clearlt256 + XC $256, 0(R4), 0(R4) + ADD $256, R4 + ADD $-256, R5 + BR clearmt32 +clearlt256: + CMPBEQ R5, $0, done + ADD $-1, R5 + EXRL $memclr_exrl_xc<>(SB), R5 +done: + RET + +// DO NOT CALL - target for exrl (execute relative long) instruction. +TEXT memclr_exrl_xc<>(SB),NOSPLIT|NOFRAME,$0-0 + XC $1, 0(R4), 0(R4) + MOVD $0, 0(R0) + RET + diff --git a/platform/dbops/binaries/go/go/src/runtime/memclr_wasm.s b/platform/dbops/binaries/go/go/src/runtime/memclr_wasm.s new file mode 100644 index 0000000000000000000000000000000000000000..19d08ffbee2ac9ba24fe283defb8fac66da9f81c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memclr_wasm.s @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memclrNoHeapPointers Go doc for important implementation constraints. + +// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R0 + MOVD n+8(FP), R1 + + Get R0 + I32WrapI64 + I32Const $0 + Get R1 + I32WrapI64 + MemoryFill + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_386.s b/platform/dbops/binaries/go/go/src/runtime/memmove_386.s new file mode 100644 index 0000000000000000000000000000000000000000..6d7e17fcbcec879022b2e0e4e9ff02089c3e8408 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_386.s @@ -0,0 +1,204 @@ +// Inferno's libkern/memmove-386.s +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !plan9 + +#include "go_asm.h" +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT, $0-12 + MOVL to+0(FP), DI + MOVL from+4(FP), SI + MOVL n+8(FP), BX + + // REP instructions have a high startup cost, so we handle small sizes + // with some straightline code. The REP MOVSL instruction is really fast + // for large sizes. The cutover is approximately 1K. We implement up to + // 128 because that is the maximum SSE register load (loading all data + // into registers lets us ignore copy direction). +tail: + // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. + TESTL BX, BX + JEQ move_0 + CMPL BX, $2 + JBE move_1or2 + CMPL BX, $4 + JB move_3 + JE move_4 + CMPL BX, $8 + JBE move_5through8 + CMPL BX, $16 + JBE move_9through16 +#ifdef GO386_softfloat + JMP nosse2 +#endif + CMPL BX, $32 + JBE move_17through32 + CMPL BX, $64 + JBE move_33through64 + CMPL BX, $128 + JBE move_65through128 + +nosse2: +/* + * check and set for backwards + */ + CMPL SI, DI + JLS back + +/* + * forward copy loop + */ +forward: + // If REP MOVSB isn't fast, don't use it + CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB + JNE fwdBy4 + + // Check alignment + MOVL SI, AX + ORL DI, AX + TESTL $3, AX + JEQ fwdBy4 + + // Do 1 byte at a time + MOVL BX, CX + REP; MOVSB + RET + +fwdBy4: + // Do 4 bytes at a time + MOVL BX, CX + SHRL $2, CX + ANDL $3, BX + REP; MOVSL + JMP tail + +/* + * check overlap + */ +back: + MOVL SI, CX + ADDL BX, CX + CMPL CX, DI + JLS forward +/* + * whole thing backwards has + * adjusted addresses + */ + + ADDL BX, DI + ADDL BX, SI + STD + +/* + * copy + */ + MOVL BX, CX + SHRL $2, CX + ANDL $3, BX + + SUBL $4, DI + SUBL $4, SI + REP; MOVSL + + CLD + ADDL $4, DI + ADDL $4, SI + SUBL BX, DI + SUBL BX, SI + JMP tail + +move_1or2: + MOVB (SI), AX + MOVB -1(SI)(BX*1), CX + MOVB AX, (DI) + MOVB CX, -1(DI)(BX*1) + RET +move_0: + RET +move_3: + MOVW (SI), AX + MOVB 2(SI), CX + MOVW AX, (DI) + MOVB CX, 2(DI) + RET +move_4: + // We need a separate case for 4 to make sure we write pointers atomically. + MOVL (SI), AX + MOVL AX, (DI) + RET +move_5through8: + MOVL (SI), AX + MOVL -4(SI)(BX*1), CX + MOVL AX, (DI) + MOVL CX, -4(DI)(BX*1) + RET +move_9through16: + MOVL (SI), AX + MOVL 4(SI), CX + MOVL -8(SI)(BX*1), DX + MOVL -4(SI)(BX*1), BP + MOVL AX, (DI) + MOVL CX, 4(DI) + MOVL DX, -8(DI)(BX*1) + MOVL BP, -4(DI)(BX*1) + RET +move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(BX*1), X1 + MOVOU X0, (DI) + MOVOU X1, -16(DI)(BX*1) + RET +move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(BX*1), X2 + MOVOU -16(SI)(BX*1), X3 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, -32(DI)(BX*1) + MOVOU X3, -16(DI)(BX*1) + RET +move_65through128: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU 48(SI), X3 + MOVOU -64(SI)(BX*1), X4 + MOVOU -48(SI)(BX*1), X5 + MOVOU -32(SI)(BX*1), X6 + MOVOU -16(SI)(BX*1), X7 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + MOVOU X3, 48(DI) + MOVOU X4, -64(DI)(BX*1) + MOVOU X5, -48(DI)(BX*1) + MOVOU X6, -32(DI)(BX*1) + MOVOU X7, -16(DI)(BX*1) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_amd64.s b/platform/dbops/binaries/go/go/src/runtime/memmove_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..018bb0b19d5701c83107a860ece309619b4ae9e6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_amd64.s @@ -0,0 +1,532 @@ +// Derived from Inferno's libkern/memmove-386.s (adapted for amd64) +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !plan9 + +#include "go_asm.h" +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +// ABIInternal for performance. +TEXT runtime·memmove(SB), NOSPLIT, $0-24 + // AX = to + // BX = from + // CX = n + MOVQ AX, DI + MOVQ BX, SI + MOVQ CX, BX + + // REP instructions have a high startup cost, so we handle small sizes + // with some straightline code. The REP MOVSQ instruction is really fast + // for large sizes. The cutover is approximately 2K. +tail: + // move_129through256 or smaller work whether or not the source and the + // destination memory regions overlap because they load all data into + // registers before writing it back. move_256through2048 on the other + // hand can be used only when the memory regions don't overlap or the copy + // direction is forward. + // + // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. + TESTQ BX, BX + JEQ move_0 + CMPQ BX, $2 + JBE move_1or2 + CMPQ BX, $4 + JB move_3 + JBE move_4 + CMPQ BX, $8 + JB move_5through7 + JE move_8 + CMPQ BX, $16 + JBE move_9through16 + CMPQ BX, $32 + JBE move_17through32 + CMPQ BX, $64 + JBE move_33through64 + CMPQ BX, $128 + JBE move_65through128 + CMPQ BX, $256 + JBE move_129through256 + + TESTB $1, runtime·useAVXmemmove(SB) + JNZ avxUnaligned + +/* + * check and set for backwards + */ + CMPQ SI, DI + JLS back + +/* + * forward copy loop + */ +forward: + CMPQ BX, $2048 + JLS move_256through2048 + + // If REP MOVSB isn't fast, don't use it + CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB + JNE fwdBy8 + + // Check alignment + MOVL SI, AX + ORL DI, AX + TESTL $7, AX + JEQ fwdBy8 + + // Do 1 byte at a time + MOVQ BX, CX + REP; MOVSB + RET + +fwdBy8: + // Do 8 bytes at a time + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + REP; MOVSQ + JMP tail + +back: +/* + * check overlap + */ + MOVQ SI, CX + ADDQ BX, CX + CMPQ CX, DI + JLS forward +/* + * whole thing backwards has + * adjusted addresses + */ + ADDQ BX, DI + ADDQ BX, SI + STD + +/* + * copy + */ + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + + SUBQ $8, DI + SUBQ $8, SI + REP; MOVSQ + + CLD + ADDQ $8, DI + ADDQ $8, SI + SUBQ BX, DI + SUBQ BX, SI + JMP tail + +move_1or2: + MOVB (SI), AX + MOVB -1(SI)(BX*1), CX + MOVB AX, (DI) + MOVB CX, -1(DI)(BX*1) + RET +move_0: + RET +move_4: + MOVL (SI), AX + MOVL AX, (DI) + RET +move_3: + MOVW (SI), AX + MOVB 2(SI), CX + MOVW AX, (DI) + MOVB CX, 2(DI) + RET +move_5through7: + MOVL (SI), AX + MOVL -4(SI)(BX*1), CX + MOVL AX, (DI) + MOVL CX, -4(DI)(BX*1) + RET +move_8: + // We need a separate case for 8 to make sure we write pointers atomically. + MOVQ (SI), AX + MOVQ AX, (DI) + RET +move_9through16: + MOVQ (SI), AX + MOVQ -8(SI)(BX*1), CX + MOVQ AX, (DI) + MOVQ CX, -8(DI)(BX*1) + RET +move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(BX*1), X1 + MOVOU X0, (DI) + MOVOU X1, -16(DI)(BX*1) + RET +move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(BX*1), X2 + MOVOU -16(SI)(BX*1), X3 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, -32(DI)(BX*1) + MOVOU X3, -16(DI)(BX*1) + RET +move_65through128: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU 48(SI), X3 + MOVOU -64(SI)(BX*1), X4 + MOVOU -48(SI)(BX*1), X5 + MOVOU -32(SI)(BX*1), X6 + MOVOU -16(SI)(BX*1), X7 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + MOVOU X3, 48(DI) + MOVOU X4, -64(DI)(BX*1) + MOVOU X5, -48(DI)(BX*1) + MOVOU X6, -32(DI)(BX*1) + MOVOU X7, -16(DI)(BX*1) + RET +move_129through256: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU 48(SI), X3 + MOVOU 64(SI), X4 + MOVOU 80(SI), X5 + MOVOU 96(SI), X6 + MOVOU 112(SI), X7 + MOVOU -128(SI)(BX*1), X8 + MOVOU -112(SI)(BX*1), X9 + MOVOU -96(SI)(BX*1), X10 + MOVOU -80(SI)(BX*1), X11 + MOVOU -64(SI)(BX*1), X12 + MOVOU -48(SI)(BX*1), X13 + MOVOU -32(SI)(BX*1), X14 + MOVOU -16(SI)(BX*1), X15 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + MOVOU X3, 48(DI) + MOVOU X4, 64(DI) + MOVOU X5, 80(DI) + MOVOU X6, 96(DI) + MOVOU X7, 112(DI) + MOVOU X8, -128(DI)(BX*1) + MOVOU X9, -112(DI)(BX*1) + MOVOU X10, -96(DI)(BX*1) + MOVOU X11, -80(DI)(BX*1) + MOVOU X12, -64(DI)(BX*1) + MOVOU X13, -48(DI)(BX*1) + MOVOU X14, -32(DI)(BX*1) + MOVOU X15, -16(DI)(BX*1) + // X15 must be zero on return + PXOR X15, X15 + RET +move_256through2048: + SUBQ $256, BX + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU 48(SI), X3 + MOVOU 64(SI), X4 + MOVOU 80(SI), X5 + MOVOU 96(SI), X6 + MOVOU 112(SI), X7 + MOVOU 128(SI), X8 + MOVOU 144(SI), X9 + MOVOU 160(SI), X10 + MOVOU 176(SI), X11 + MOVOU 192(SI), X12 + MOVOU 208(SI), X13 + MOVOU 224(SI), X14 + MOVOU 240(SI), X15 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + MOVOU X3, 48(DI) + MOVOU X4, 64(DI) + MOVOU X5, 80(DI) + MOVOU X6, 96(DI) + MOVOU X7, 112(DI) + MOVOU X8, 128(DI) + MOVOU X9, 144(DI) + MOVOU X10, 160(DI) + MOVOU X11, 176(DI) + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + CMPQ BX, $256 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + JGE move_256through2048 + // X15 must be zero on return + PXOR X15, X15 + JMP tail + +avxUnaligned: + // There are two implementations of move algorithm. + // The first one for non-overlapped memory regions. It uses forward copying. + // The second one for overlapped regions. It uses backward copying + MOVQ DI, CX + SUBQ SI, CX + // Now CX contains distance between SRC and DEST + CMPQ CX, BX + // If the distance lesser than region length it means that regions are overlapped + JC copy_backward + + // Non-temporal copy would be better for big sizes. + CMPQ BX, $0x100000 + JAE gobble_big_data_fwd + + // Memory layout on the source side + // SI CX + // |<---------BX before correction--------->| + // | |<--BX corrected-->| | + // | | |<--- AX --->| + // |<-R11->| |<-128 bytes->| + // +----------------------------------------+ + // | Head | Body | Tail | + // +-------+------------------+-------------+ + // ^ ^ ^ + // | | | + // Save head into Y4 Save tail into X5..X12 + // | + // SI+R11, where R11 = ((DI & -32) + 32) - DI + // Algorithm: + // 1. Unaligned save of the tail's 128 bytes + // 2. Unaligned save of the head's 32 bytes + // 3. Destination-aligned copying of body (128 bytes per iteration) + // 4. Put head on the new place + // 5. Put the tail on the new place + // It can be important to satisfy processor's pipeline requirements for + // small sizes as the cost of unaligned memory region copying is + // comparable with the cost of main loop. So code is slightly messed there. + // There is more clean implementation of that algorithm for bigger sizes + // where the cost of unaligned part copying is negligible. + // You can see it after gobble_big_data_fwd label. + LEAQ (SI)(BX*1), CX + MOVQ DI, R10 + // CX points to the end of buffer so we need go back slightly. We will use negative offsets there. + MOVOU -0x80(CX), X5 + MOVOU -0x70(CX), X6 + MOVQ $0x80, AX + // Align destination address + ANDQ $-32, DI + ADDQ $32, DI + // Continue tail saving. + MOVOU -0x60(CX), X7 + MOVOU -0x50(CX), X8 + // Make R11 delta between aligned and unaligned destination addresses. + MOVQ DI, R11 + SUBQ R10, R11 + // Continue tail saving. + MOVOU -0x40(CX), X9 + MOVOU -0x30(CX), X10 + // Let's make bytes-to-copy value adjusted as we've prepared unaligned part for copying. + SUBQ R11, BX + // Continue tail saving. + MOVOU -0x20(CX), X11 + MOVOU -0x10(CX), X12 + // The tail will be put on its place after main body copying. + // It's time for the unaligned heading part. + VMOVDQU (SI), Y4 + // Adjust source address to point past head. + ADDQ R11, SI + SUBQ AX, BX + // Aligned memory copying there +gobble_128_loop: + VMOVDQU (SI), Y0 + VMOVDQU 0x20(SI), Y1 + VMOVDQU 0x40(SI), Y2 + VMOVDQU 0x60(SI), Y3 + ADDQ AX, SI + VMOVDQA Y0, (DI) + VMOVDQA Y1, 0x20(DI) + VMOVDQA Y2, 0x40(DI) + VMOVDQA Y3, 0x60(DI) + ADDQ AX, DI + SUBQ AX, BX + JA gobble_128_loop + // Now we can store unaligned parts. + ADDQ AX, BX + ADDQ DI, BX + VMOVDQU Y4, (R10) + VZEROUPPER + MOVOU X5, -0x80(BX) + MOVOU X6, -0x70(BX) + MOVOU X7, -0x60(BX) + MOVOU X8, -0x50(BX) + MOVOU X9, -0x40(BX) + MOVOU X10, -0x30(BX) + MOVOU X11, -0x20(BX) + MOVOU X12, -0x10(BX) + RET + +gobble_big_data_fwd: + // There is forward copying for big regions. + // It uses non-temporal mov instructions. + // Details of this algorithm are commented previously for small sizes. + LEAQ (SI)(BX*1), CX + MOVOU -0x80(SI)(BX*1), X5 + MOVOU -0x70(CX), X6 + MOVOU -0x60(CX), X7 + MOVOU -0x50(CX), X8 + MOVOU -0x40(CX), X9 + MOVOU -0x30(CX), X10 + MOVOU -0x20(CX), X11 + MOVOU -0x10(CX), X12 + VMOVDQU (SI), Y4 + MOVQ DI, R8 + ANDQ $-32, DI + ADDQ $32, DI + MOVQ DI, R10 + SUBQ R8, R10 + SUBQ R10, BX + ADDQ R10, SI + LEAQ (DI)(BX*1), CX + SUBQ $0x80, BX +gobble_mem_fwd_loop: + PREFETCHNTA 0x1C0(SI) + PREFETCHNTA 0x280(SI) + // Prefetch values were chosen empirically. + // Approach for prefetch usage as in 9.5.6 of [1] + // [1] 64-ia-32-architectures-optimization-manual.pdf + // https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf + VMOVDQU (SI), Y0 + VMOVDQU 0x20(SI), Y1 + VMOVDQU 0x40(SI), Y2 + VMOVDQU 0x60(SI), Y3 + ADDQ $0x80, SI + VMOVNTDQ Y0, (DI) + VMOVNTDQ Y1, 0x20(DI) + VMOVNTDQ Y2, 0x40(DI) + VMOVNTDQ Y3, 0x60(DI) + ADDQ $0x80, DI + SUBQ $0x80, BX + JA gobble_mem_fwd_loop + // NT instructions don't follow the normal cache-coherency rules. + // We need SFENCE there to make copied data available timely. + SFENCE + VMOVDQU Y4, (R8) + VZEROUPPER + MOVOU X5, -0x80(CX) + MOVOU X6, -0x70(CX) + MOVOU X7, -0x60(CX) + MOVOU X8, -0x50(CX) + MOVOU X9, -0x40(CX) + MOVOU X10, -0x30(CX) + MOVOU X11, -0x20(CX) + MOVOU X12, -0x10(CX) + RET + +copy_backward: + MOVQ DI, AX + // Backward copying is about the same as the forward one. + // Firstly we load unaligned tail in the beginning of region. + MOVOU (SI), X5 + MOVOU 0x10(SI), X6 + ADDQ BX, DI + MOVOU 0x20(SI), X7 + MOVOU 0x30(SI), X8 + LEAQ -0x20(DI), R10 + MOVQ DI, R11 + MOVOU 0x40(SI), X9 + MOVOU 0x50(SI), X10 + ANDQ $0x1F, R11 + MOVOU 0x60(SI), X11 + MOVOU 0x70(SI), X12 + XORQ R11, DI + // Let's point SI to the end of region + ADDQ BX, SI + // and load unaligned head into X4. + VMOVDQU -0x20(SI), Y4 + SUBQ R11, SI + SUBQ R11, BX + // If there is enough data for non-temporal moves go to special loop + CMPQ BX, $0x100000 + JA gobble_big_data_bwd + SUBQ $0x80, BX +gobble_mem_bwd_loop: + VMOVDQU -0x20(SI), Y0 + VMOVDQU -0x40(SI), Y1 + VMOVDQU -0x60(SI), Y2 + VMOVDQU -0x80(SI), Y3 + SUBQ $0x80, SI + VMOVDQA Y0, -0x20(DI) + VMOVDQA Y1, -0x40(DI) + VMOVDQA Y2, -0x60(DI) + VMOVDQA Y3, -0x80(DI) + SUBQ $0x80, DI + SUBQ $0x80, BX + JA gobble_mem_bwd_loop + // Let's store unaligned data + VMOVDQU Y4, (R10) + VZEROUPPER + MOVOU X5, (AX) + MOVOU X6, 0x10(AX) + MOVOU X7, 0x20(AX) + MOVOU X8, 0x30(AX) + MOVOU X9, 0x40(AX) + MOVOU X10, 0x50(AX) + MOVOU X11, 0x60(AX) + MOVOU X12, 0x70(AX) + RET + +gobble_big_data_bwd: + SUBQ $0x80, BX +gobble_big_mem_bwd_loop: + PREFETCHNTA -0x1C0(SI) + PREFETCHNTA -0x280(SI) + VMOVDQU -0x20(SI), Y0 + VMOVDQU -0x40(SI), Y1 + VMOVDQU -0x60(SI), Y2 + VMOVDQU -0x80(SI), Y3 + SUBQ $0x80, SI + VMOVNTDQ Y0, -0x20(DI) + VMOVNTDQ Y1, -0x40(DI) + VMOVNTDQ Y2, -0x60(DI) + VMOVNTDQ Y3, -0x80(DI) + SUBQ $0x80, DI + SUBQ $0x80, BX + JA gobble_big_mem_bwd_loop + SFENCE + VMOVDQU Y4, (R10) + VZEROUPPER + MOVOU X5, (AX) + MOVOU X6, 0x10(AX) + MOVOU X7, 0x20(AX) + MOVOU X8, 0x30(AX) + MOVOU X9, 0x40(AX) + MOVOU X10, 0x50(AX) + MOVOU X11, 0x60(AX) + MOVOU X12, 0x70(AX) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_arm.s b/platform/dbops/binaries/go/go/src/runtime/memmove_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..43d53fa8f2a9d3a8f1e0bb661d4f47e3be9b5d90 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_arm.s @@ -0,0 +1,264 @@ +// Inferno's libkern/memmove-arm.s +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-arm.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "textflag.h" + +// TE or TS are spilled to the stack during bulk register moves. +#define TS R0 +#define TE R8 + +// Warning: the linker will use R11 to synthesize certain instructions. Please +// take care and double check with objdump. +#define FROM R11 +#define N R12 +#define TMP R12 /* N and TMP don't overlap */ +#define TMP1 R5 + +#define RSHIFT R5 +#define LSHIFT R6 +#define OFFSET R7 + +#define BR0 R0 /* shared with TS */ +#define BW0 R1 +#define BR1 R1 +#define BW1 R2 +#define BR2 R2 +#define BW2 R3 +#define BR3 R3 +#define BW3 R4 + +#define FW0 R1 +#define FR0 R2 +#define FW1 R2 +#define FR1 R3 +#define FW2 R3 +#define FR2 R4 +#define FW3 R4 +#define FR3 R8 /* shared with TE */ + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT, $4-12 +_memmove: + MOVW to+0(FP), TS + MOVW from+4(FP), FROM + MOVW n+8(FP), N + + ADD N, TS, TE /* to end pointer */ + + CMP FROM, TS + BLS _forward + +_back: + ADD N, FROM /* from end pointer */ + CMP $4, N /* need at least 4 bytes to copy */ + BLT _b1tail + +_b4align: /* align destination on 4 */ + AND.S $3, TE, TMP + BEQ _b4aligned + + MOVBU.W -1(FROM), TMP /* pre-indexed */ + MOVBU.W TMP, -1(TE) /* pre-indexed */ + B _b4align + +_b4aligned: /* is source now aligned? */ + AND.S $3, FROM, TMP + BNE _bunaligned + + ADD $31, TS, TMP /* do 32-byte chunks if possible */ + MOVW TS, savedts-4(SP) +_b32loop: + CMP TMP, TE + BLS _b4tail + + MOVM.DB.W (FROM), [R0-R7] + MOVM.DB.W [R0-R7], (TE) + B _b32loop + +_b4tail: /* do remaining words if possible */ + MOVW savedts-4(SP), TS + ADD $3, TS, TMP +_b4loop: + CMP TMP, TE + BLS _b1tail + + MOVW.W -4(FROM), TMP1 /* pre-indexed */ + MOVW.W TMP1, -4(TE) /* pre-indexed */ + B _b4loop + +_b1tail: /* remaining bytes */ + CMP TE, TS + BEQ _return + + MOVBU.W -1(FROM), TMP /* pre-indexed */ + MOVBU.W TMP, -1(TE) /* pre-indexed */ + B _b1tail + +_forward: + CMP $4, N /* need at least 4 bytes to copy */ + BLT _f1tail + +_f4align: /* align destination on 4 */ + AND.S $3, TS, TMP + BEQ _f4aligned + + MOVBU.P 1(FROM), TMP /* implicit write back */ + MOVBU.P TMP, 1(TS) /* implicit write back */ + B _f4align + +_f4aligned: /* is source now aligned? */ + AND.S $3, FROM, TMP + BNE _funaligned + + SUB $31, TE, TMP /* do 32-byte chunks if possible */ + MOVW TE, savedte-4(SP) +_f32loop: + CMP TMP, TS + BHS _f4tail + + MOVM.IA.W (FROM), [R1-R8] + MOVM.IA.W [R1-R8], (TS) + B _f32loop + +_f4tail: + MOVW savedte-4(SP), TE + SUB $3, TE, TMP /* do remaining words if possible */ +_f4loop: + CMP TMP, TS + BHS _f1tail + + MOVW.P 4(FROM), TMP1 /* implicit write back */ + MOVW.P TMP1, 4(TS) /* implicit write back */ + B _f4loop + +_f1tail: + CMP TS, TE + BEQ _return + + MOVBU.P 1(FROM), TMP /* implicit write back */ + MOVBU.P TMP, 1(TS) /* implicit write back */ + B _f1tail + +_return: + MOVW to+0(FP), R0 + RET + +_bunaligned: + CMP $2, TMP /* is TMP < 2 ? */ + + MOVW.LT $8, RSHIFT /* (R(n)<<24)|(R(n-1)>>8) */ + MOVW.LT $24, LSHIFT + MOVW.LT $1, OFFSET + + MOVW.EQ $16, RSHIFT /* (R(n)<<16)|(R(n-1)>>16) */ + MOVW.EQ $16, LSHIFT + MOVW.EQ $2, OFFSET + + MOVW.GT $24, RSHIFT /* (R(n)<<8)|(R(n-1)>>24) */ + MOVW.GT $8, LSHIFT + MOVW.GT $3, OFFSET + + ADD $16, TS, TMP /* do 16-byte chunks if possible */ + CMP TMP, TE + BLS _b1tail + + BIC $3, FROM /* align source */ + MOVW TS, savedts-4(SP) + MOVW (FROM), BR0 /* prime first block register */ + +_bu16loop: + CMP TMP, TE + BLS _bu1tail + + MOVW BR0<>RSHIFT, BW3 + + MOVW BR3<>RSHIFT, BW2 + + MOVW BR2<>RSHIFT, BW1 + + MOVW BR1<>RSHIFT, BW0 + + MOVM.DB.W [BW0-BW3], (TE) + B _bu16loop + +_bu1tail: + MOVW savedts-4(SP), TS + ADD OFFSET, FROM + B _b1tail + +_funaligned: + CMP $2, TMP + + MOVW.LT $8, RSHIFT /* (R(n+1)<<24)|(R(n)>>8) */ + MOVW.LT $24, LSHIFT + MOVW.LT $3, OFFSET + + MOVW.EQ $16, RSHIFT /* (R(n+1)<<16)|(R(n)>>16) */ + MOVW.EQ $16, LSHIFT + MOVW.EQ $2, OFFSET + + MOVW.GT $24, RSHIFT /* (R(n+1)<<8)|(R(n)>>24) */ + MOVW.GT $8, LSHIFT + MOVW.GT $1, OFFSET + + SUB $16, TE, TMP /* do 16-byte chunks if possible */ + CMP TMP, TS + BHS _f1tail + + BIC $3, FROM /* align source */ + MOVW TE, savedte-4(SP) + MOVW.P 4(FROM), FR3 /* prime last block register, implicit write back */ + +_fu16loop: + CMP TMP, TS + BHS _fu1tail + + MOVW FR3>>RSHIFT, FW0 + MOVM.IA.W (FROM), [FR0,FR1,FR2,FR3] + ORR FR0<>RSHIFT, FW1 + ORR FR1<>RSHIFT, FW2 + ORR FR2<>RSHIFT, FW3 + ORR FR3<(SB), NOSPLIT|NOFRAME, $0-24 + CBZ R2, copy0 + + // Small copies: 1..16 bytes + CMP $16, R2 + BLE copy16 + + // Large copies + CMP $128, R2 + BHI copy_long + CMP $32, R2 + BHI copy32_128 + + // Small copies: 17..32 bytes. + LDP (R1), (R6, R7) + ADD R1, R2, R4 // R4 points just past the last source byte + LDP -16(R4), (R12, R13) + STP (R6, R7), (R0) + ADD R0, R2, R5 // R5 points just past the last destination byte + STP (R12, R13), -16(R5) + RET + +// Small copies: 1..16 bytes. +copy16: + ADD R1, R2, R4 // R4 points just past the last source byte + ADD R0, R2, R5 // R5 points just past the last destination byte + CMP $8, R2 + BLT copy7 + MOVD (R1), R6 + MOVD -8(R4), R7 + MOVD R6, (R0) + MOVD R7, -8(R5) + RET + +copy7: + TBZ $2, R2, copy3 + MOVWU (R1), R6 + MOVWU -4(R4), R7 + MOVW R6, (R0) + MOVW R7, -4(R5) + RET + +copy3: + TBZ $1, R2, copy1 + MOVHU (R1), R6 + MOVHU -2(R4), R7 + MOVH R6, (R0) + MOVH R7, -2(R5) + RET + +copy1: + MOVBU (R1), R6 + MOVB R6, (R0) + +copy0: + RET + + // Medium copies: 33..128 bytes. +copy32_128: + ADD R1, R2, R4 // R4 points just past the last source byte + ADD R0, R2, R5 // R5 points just past the last destination byte + LDP (R1), (R6, R7) + LDP 16(R1), (R8, R9) + LDP -32(R4), (R10, R11) + LDP -16(R4), (R12, R13) + CMP $64, R2 + BHI copy128 + STP (R6, R7), (R0) + STP (R8, R9), 16(R0) + STP (R10, R11), -32(R5) + STP (R12, R13), -16(R5) + RET + + // Copy 65..128 bytes. +copy128: + LDP 32(R1), (R14, R15) + LDP 48(R1), (R16, R17) + CMP $96, R2 + BLS copy96 + LDP -64(R4), (R2, R3) + LDP -48(R4), (R1, R4) + STP (R2, R3), -64(R5) + STP (R1, R4), -48(R5) + +copy96: + STP (R6, R7), (R0) + STP (R8, R9), 16(R0) + STP (R14, R15), 32(R0) + STP (R16, R17), 48(R0) + STP (R10, R11), -32(R5) + STP (R12, R13), -16(R5) + RET + + // Copy more than 128 bytes. +copy_long: + ADD R1, R2, R4 // R4 points just past the last source byte + ADD R0, R2, R5 // R5 points just past the last destination byte + MOVD ZR, R7 + MOVD ZR, R8 + + CMP $1024, R2 + BLT backward_check + // feature detect to decide how to align + MOVBU runtime·arm64UseAlignedLoads(SB), R6 + CBNZ R6, use_aligned_loads + MOVD R0, R7 + MOVD R5, R8 + B backward_check +use_aligned_loads: + MOVD R1, R7 + MOVD R4, R8 + // R7 and R8 are used here for the realignment calculation. In + // the use_aligned_loads case, R7 is the src pointer and R8 is + // srcend pointer, which is used in the backward copy case. + // When doing aligned stores, R7 is the dst pointer and R8 is + // the dstend pointer. + +backward_check: + // Use backward copy if there is an overlap. + SUB R1, R0, R14 + CBZ R14, copy0 + CMP R2, R14 + BCC copy_long_backward + + // Copy 16 bytes and then align src (R1) or dst (R0) to 16-byte alignment. + LDP (R1), (R12, R13) // Load A + AND $15, R7, R14 // Calculate the realignment offset + SUB R14, R1, R1 + SUB R14, R0, R3 // move dst back same amount as src + ADD R14, R2, R2 + LDP 16(R1), (R6, R7) // Load B + STP (R12, R13), (R0) // Store A + LDP 32(R1), (R8, R9) // Load C + LDP 48(R1), (R10, R11) // Load D + LDP.W 64(R1), (R12, R13) // Load E + // 80 bytes have been loaded; if less than 80+64 bytes remain, copy from the end + SUBS $144, R2, R2 + BLS copy64_from_end + +loop64: + STP (R6, R7), 16(R3) // Store B + LDP 16(R1), (R6, R7) // Load B (next iteration) + STP (R8, R9), 32(R3) // Store C + LDP 32(R1), (R8, R9) // Load C + STP (R10, R11), 48(R3) // Store D + LDP 48(R1), (R10, R11) // Load D + STP.W (R12, R13), 64(R3) // Store E + LDP.W 64(R1), (R12, R13) // Load E + SUBS $64, R2, R2 + BHI loop64 + + // Write the last iteration and copy 64 bytes from the end. +copy64_from_end: + LDP -64(R4), (R14, R15) // Load F + STP (R6, R7), 16(R3) // Store B + LDP -48(R4), (R6, R7) // Load G + STP (R8, R9), 32(R3) // Store C + LDP -32(R4), (R8, R9) // Load H + STP (R10, R11), 48(R3) // Store D + LDP -16(R4), (R10, R11) // Load I + STP (R12, R13), 64(R3) // Store E + STP (R14, R15), -64(R5) // Store F + STP (R6, R7), -48(R5) // Store G + STP (R8, R9), -32(R5) // Store H + STP (R10, R11), -16(R5) // Store I + RET + + // Large backward copy for overlapping copies. + // Copy 16 bytes and then align srcend (R4) or dstend (R5) to 16-byte alignment. +copy_long_backward: + LDP -16(R4), (R12, R13) + AND $15, R8, R14 + SUB R14, R4, R4 + SUB R14, R2, R2 + LDP -16(R4), (R6, R7) + STP (R12, R13), -16(R5) + LDP -32(R4), (R8, R9) + LDP -48(R4), (R10, R11) + LDP.W -64(R4), (R12, R13) + SUB R14, R5, R5 + SUBS $128, R2, R2 + BLS copy64_from_start + +loop64_backward: + STP (R6, R7), -16(R5) + LDP -16(R4), (R6, R7) + STP (R8, R9), -32(R5) + LDP -32(R4), (R8, R9) + STP (R10, R11), -48(R5) + LDP -48(R4), (R10, R11) + STP.W (R12, R13), -64(R5) + LDP.W -64(R4), (R12, R13) + SUBS $64, R2, R2 + BHI loop64_backward + + // Write the last iteration and copy 64 bytes from the start. +copy64_from_start: + LDP 48(R1), (R2, R3) + STP (R6, R7), -16(R5) + LDP 32(R1), (R6, R7) + STP (R8, R9), -32(R5) + LDP 16(R1), (R8, R9) + STP (R10, R11), -48(R5) + LDP (R1), (R10, R11) + STP (R12, R13), -64(R5) + STP (R2, R3), 48(R0) + STP (R6, R7), 32(R0) + STP (R8, R9), 16(R0) + STP (R10, R11), (R0) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_linux_amd64_test.go b/platform/dbops/binaries/go/go/src/runtime/memmove_linux_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5f900623bee83d12432fc995c9d0a3046f9c040c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_linux_amd64_test.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "os" + "syscall" + "testing" + "unsafe" +) + +// TestMemmoveOverflow maps 3GB of memory and calls memmove on +// the corresponding slice. +func TestMemmoveOverflow(t *testing.T) { + t.Parallel() + // Create a temporary file. + tmp, err := os.CreateTemp("", "go-memmovetest") + if err != nil { + t.Fatal(err) + } + _, err = tmp.Write(make([]byte, 65536)) + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmp.Name()) + defer tmp.Close() + + // Set up mappings. + base, _, errno := syscall.Syscall6(syscall.SYS_MMAP, + 0xa0<<32, 3<<30, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_PRIVATE|syscall.MAP_ANONYMOUS, ^uintptr(0), 0) + if errno != 0 { + t.Skipf("could not create memory mapping: %s", errno) + } + syscall.Syscall(syscall.SYS_MUNMAP, base, 3<<30, 0) + + for off := uintptr(0); off < 3<<30; off += 65536 { + _, _, errno := syscall.Syscall6(syscall.SYS_MMAP, + base+off, 65536, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED|syscall.MAP_FIXED, tmp.Fd(), 0) + if errno != 0 { + t.Skipf("could not map a page at requested 0x%x: %s", base+off, errno) + } + defer syscall.Syscall(syscall.SYS_MUNMAP, base+off, 65536, 0) + } + + s := unsafe.Slice((*byte)(unsafe.Pointer(base)), 3<<30) + n := copy(s[1:], s) + if n != 3<<30-1 { + t.Fatalf("copied %d bytes, expected %d", n, 3<<30-1) + } + n = copy(s, s[1:]) + if n != 3<<30-1 { + t.Fatalf("copied %d bytes, expected %d", n, 3<<30-1) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_loong64.s b/platform/dbops/binaries/go/go/src/runtime/memmove_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..5b7aeba698c372fc1975ca3ab9c7ce6e6987db53 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_loong64.s @@ -0,0 +1,109 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 +#ifndef GOEXPERIMENT_regabiargs + MOVV to+0(FP), R4 + MOVV from+8(FP), R5 + MOVV n+16(FP), R6 +#endif + BNE R6, check + RET + +check: + SGTU R4, R5, R7 + BNE R7, backward + + ADDV R4, R6, R9 // end pointer + + // if the two pointers are not of same alignments, do byte copying + SUBVU R5, R4, R7 + AND $7, R7 + BNE R7, out + + // if less than 8 bytes, do byte copying + SGTU $8, R6, R7 + BNE R7, out + + // do one byte at a time until 8-aligned + AND $7, R4, R8 + BEQ R8, words + MOVB (R5), R7 + ADDV $1, R5 + MOVB R7, (R4) + ADDV $1, R4 + JMP -6(PC) + +words: + // do 8 bytes at a time if there is room + ADDV $-7, R9, R6 // R6 is end pointer-7 + + PCALIGN $16 + SGTU R6, R4, R8 + BEQ R8, out + MOVV (R5), R7 + ADDV $8, R5 + MOVV R7, (R4) + ADDV $8, R4 + JMP -6(PC) + +out: + BEQ R4, R9, done + MOVB (R5), R7 + ADDV $1, R5 + MOVB R7, (R4) + ADDV $1, R4 + JMP -5(PC) +done: + RET + +backward: + ADDV R6, R5 // from-end pointer + ADDV R4, R6, R9 // to-end pointer + + // if the two pointers are not of same alignments, do byte copying + SUBVU R9, R5, R7 + AND $7, R7 + BNE R7, out1 + + // if less than 8 bytes, do byte copying + SGTU $8, R6, R7 + BNE R7, out1 + + // do one byte at a time until 8-aligned + AND $7, R9, R8 + BEQ R8, words1 + ADDV $-1, R5 + MOVB (R5), R7 + ADDV $-1, R9 + MOVB R7, (R9) + JMP -6(PC) + +words1: + // do 8 bytes at a time if there is room + ADDV $7, R4, R6 // R6 is start pointer+7 + + PCALIGN $16 + SGTU R9, R6, R8 + BEQ R8, out1 + ADDV $-8, R5 + MOVV (R5), R7 + ADDV $-8, R9 + MOVV R7, (R9) + JMP -6(PC) + +out1: + BEQ R4, R9, done1 + ADDV $-1, R5 + MOVB (R5), R7 + ADDV $-1, R9 + MOVB R7, (R9) + JMP -5(PC) +done1: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_mips64x.s b/platform/dbops/binaries/go/go/src/runtime/memmove_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..b69178ccd38bd4785299cae57922b2d86a16d823 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_mips64x.s @@ -0,0 +1,107 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 + MOVV to+0(FP), R1 + MOVV from+8(FP), R2 + MOVV n+16(FP), R3 + BNE R3, check + RET + +check: + SGTU R1, R2, R4 + BNE R4, backward + + ADDV R1, R3, R6 // end pointer + + // if the two pointers are not of same alignments, do byte copying + SUBVU R2, R1, R4 + AND $7, R4 + BNE R4, out + + // if less than 8 bytes, do byte copying + SGTU $8, R3, R4 + BNE R4, out + + // do one byte at a time until 8-aligned + AND $7, R1, R5 + BEQ R5, words + MOVB (R2), R4 + ADDV $1, R2 + MOVB R4, (R1) + ADDV $1, R1 + JMP -6(PC) + +words: + // do 8 bytes at a time if there is room + ADDV $-7, R6, R3 // R3 is end pointer-7 + + SGTU R3, R1, R5 + BEQ R5, out + MOVV (R2), R4 + ADDV $8, R2 + MOVV R4, (R1) + ADDV $8, R1 + JMP -6(PC) + +out: + BEQ R1, R6, done + MOVB (R2), R4 + ADDV $1, R2 + MOVB R4, (R1) + ADDV $1, R1 + JMP -5(PC) +done: + RET + +backward: + ADDV R3, R2 // from-end pointer + ADDV R1, R3, R6 // to-end pointer + + // if the two pointers are not of same alignments, do byte copying + SUBVU R6, R2, R4 + AND $7, R4 + BNE R4, out1 + + // if less than 8 bytes, do byte copying + SGTU $8, R3, R4 + BNE R4, out1 + + // do one byte at a time until 8-aligned + AND $7, R6, R5 + BEQ R5, words1 + ADDV $-1, R2 + MOVB (R2), R4 + ADDV $-1, R6 + MOVB R4, (R6) + JMP -6(PC) + +words1: + // do 8 bytes at a time if there is room + ADDV $7, R1, R3 // R3 is start pointer+7 + + SGTU R6, R3, R5 + BEQ R5, out1 + ADDV $-8, R2 + MOVV (R2), R4 + ADDV $-8, R6 + MOVV R4, (R6) + JMP -6(PC) + +out1: + BEQ R1, R6, done1 + ADDV $-1, R2 + MOVB (R2), R4 + ADDV $-1, R6 + MOVB R4, (R6) + JMP -5(PC) +done1: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_mipsx.s b/platform/dbops/binaries/go/go/src/runtime/memmove_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..494288cf338cc58a6f7301960c2e24f743f9412e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_mipsx.s @@ -0,0 +1,260 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" + +#ifdef GOARCH_mips +#define MOVWHI MOVWL +#define MOVWLO MOVWR +#else +#define MOVWHI MOVWR +#define MOVWLO MOVWL +#endif + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB),NOSPLIT,$-0-12 + MOVW n+8(FP), R3 + MOVW from+4(FP), R2 + MOVW to+0(FP), R1 + + ADDU R3, R2, R4 // end pointer for source + ADDU R3, R1, R5 // end pointer for destination + + // if destination is ahead of source, start at the end of the buffer and go backward. + SGTU R1, R2, R6 + BNE R6, backward + + // if less than 4 bytes, use byte by byte copying + SGTU $4, R3, R6 + BNE R6, f_small_copy + + // align destination to 4 bytes + AND $3, R1, R6 + BEQ R6, f_dest_aligned + SUBU R1, R0, R6 + AND $3, R6 + MOVWHI 0(R2), R7 + SUBU R6, R3 + MOVWLO 3(R2), R7 + ADDU R6, R2 + MOVWHI R7, 0(R1) + ADDU R6, R1 + +f_dest_aligned: + AND $31, R3, R7 + AND $3, R3, R6 + SUBU R7, R5, R7 // end pointer for 32-byte chunks + SUBU R6, R5, R6 // end pointer for 4-byte chunks + + // if source is not aligned, use unaligned reads + AND $3, R2, R8 + BNE R8, f_large_ua + +f_large: + BEQ R1, R7, f_words + ADDU $32, R1 + MOVW 0(R2), R8 + MOVW 4(R2), R9 + MOVW 8(R2), R10 + MOVW 12(R2), R11 + MOVW 16(R2), R12 + MOVW 20(R2), R13 + MOVW 24(R2), R14 + MOVW 28(R2), R15 + ADDU $32, R2 + MOVW R8, -32(R1) + MOVW R9, -28(R1) + MOVW R10, -24(R1) + MOVW R11, -20(R1) + MOVW R12, -16(R1) + MOVW R13, -12(R1) + MOVW R14, -8(R1) + MOVW R15, -4(R1) + JMP f_large + +f_words: + BEQ R1, R6, f_tail + ADDU $4, R1 + MOVW 0(R2), R8 + ADDU $4, R2 + MOVW R8, -4(R1) + JMP f_words + +f_tail: + BEQ R1, R5, ret + MOVWLO -1(R4), R8 + MOVWLO R8, -1(R5) + +ret: + RET + +f_large_ua: + BEQ R1, R7, f_words_ua + ADDU $32, R1 + MOVWHI 0(R2), R8 + MOVWHI 4(R2), R9 + MOVWHI 8(R2), R10 + MOVWHI 12(R2), R11 + MOVWHI 16(R2), R12 + MOVWHI 20(R2), R13 + MOVWHI 24(R2), R14 + MOVWHI 28(R2), R15 + MOVWLO 3(R2), R8 + MOVWLO 7(R2), R9 + MOVWLO 11(R2), R10 + MOVWLO 15(R2), R11 + MOVWLO 19(R2), R12 + MOVWLO 23(R2), R13 + MOVWLO 27(R2), R14 + MOVWLO 31(R2), R15 + ADDU $32, R2 + MOVW R8, -32(R1) + MOVW R9, -28(R1) + MOVW R10, -24(R1) + MOVW R11, -20(R1) + MOVW R12, -16(R1) + MOVW R13, -12(R1) + MOVW R14, -8(R1) + MOVW R15, -4(R1) + JMP f_large_ua + +f_words_ua: + BEQ R1, R6, f_tail_ua + MOVWHI 0(R2), R8 + ADDU $4, R1 + MOVWLO 3(R2), R8 + ADDU $4, R2 + MOVW R8, -4(R1) + JMP f_words_ua + +f_tail_ua: + BEQ R1, R5, ret + MOVWHI -4(R4), R8 + MOVWLO -1(R4), R8 + MOVWLO R8, -1(R5) + JMP ret + +f_small_copy: + BEQ R1, R5, ret + ADDU $1, R1 + MOVB 0(R2), R6 + ADDU $1, R2 + MOVB R6, -1(R1) + JMP f_small_copy + +backward: + SGTU $4, R3, R6 + BNE R6, b_small_copy + + AND $3, R5, R6 + BEQ R6, b_dest_aligned + MOVWHI -4(R4), R7 + SUBU R6, R3 + MOVWLO -1(R4), R7 + SUBU R6, R4 + MOVWLO R7, -1(R5) + SUBU R6, R5 + +b_dest_aligned: + AND $31, R3, R7 + AND $3, R3, R6 + ADDU R7, R1, R7 + ADDU R6, R1, R6 + + AND $3, R4, R8 + BNE R8, b_large_ua + +b_large: + BEQ R5, R7, b_words + ADDU $-32, R5 + MOVW -4(R4), R8 + MOVW -8(R4), R9 + MOVW -12(R4), R10 + MOVW -16(R4), R11 + MOVW -20(R4), R12 + MOVW -24(R4), R13 + MOVW -28(R4), R14 + MOVW -32(R4), R15 + ADDU $-32, R4 + MOVW R8, 28(R5) + MOVW R9, 24(R5) + MOVW R10, 20(R5) + MOVW R11, 16(R5) + MOVW R12, 12(R5) + MOVW R13, 8(R5) + MOVW R14, 4(R5) + MOVW R15, 0(R5) + JMP b_large + +b_words: + BEQ R5, R6, b_tail + ADDU $-4, R5 + MOVW -4(R4), R8 + ADDU $-4, R4 + MOVW R8, 0(R5) + JMP b_words + +b_tail: + BEQ R5, R1, ret + MOVWHI 0(R2), R8 // R2 and R1 have the same alignment so we don't need to load a whole word + MOVWHI R8, 0(R1) + JMP ret + +b_large_ua: + BEQ R5, R7, b_words_ua + ADDU $-32, R5 + MOVWHI -4(R4), R8 + MOVWHI -8(R4), R9 + MOVWHI -12(R4), R10 + MOVWHI -16(R4), R11 + MOVWHI -20(R4), R12 + MOVWHI -24(R4), R13 + MOVWHI -28(R4), R14 + MOVWHI -32(R4), R15 + MOVWLO -1(R4), R8 + MOVWLO -5(R4), R9 + MOVWLO -9(R4), R10 + MOVWLO -13(R4), R11 + MOVWLO -17(R4), R12 + MOVWLO -21(R4), R13 + MOVWLO -25(R4), R14 + MOVWLO -29(R4), R15 + ADDU $-32, R4 + MOVW R8, 28(R5) + MOVW R9, 24(R5) + MOVW R10, 20(R5) + MOVW R11, 16(R5) + MOVW R12, 12(R5) + MOVW R13, 8(R5) + MOVW R14, 4(R5) + MOVW R15, 0(R5) + JMP b_large_ua + +b_words_ua: + BEQ R5, R6, b_tail_ua + MOVWHI -4(R4), R8 + ADDU $-4, R5 + MOVWLO -1(R4), R8 + ADDU $-4, R4 + MOVW R8, 0(R5) + JMP b_words_ua + +b_tail_ua: + BEQ R5, R1, ret + MOVWHI (R2), R8 + MOVWLO 3(R2), R8 + MOVWHI R8, 0(R1) + JMP ret + +b_small_copy: + BEQ R5, R1, ret + ADDU $-1, R5 + MOVB -1(R4), R6 + ADDU $-1, R4 + MOVB R6, 0(R5) + JMP b_small_copy diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_386.s b/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_386.s new file mode 100644 index 0000000000000000000000000000000000000000..cfce0e966e1a78fa84c24112d11b2b82062c343d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_386.s @@ -0,0 +1,137 @@ +// Inferno's libkern/memmove-386.s +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT, $0-12 + MOVL to+0(FP), DI + MOVL from+4(FP), SI + MOVL n+8(FP), BX + + // REP instructions have a high startup cost, so we handle small sizes + // with some straightline code. The REP MOVSL instruction is really fast + // for large sizes. The cutover is approximately 1K. +tail: + TESTL BX, BX + JEQ move_0 + CMPL BX, $2 + JBE move_1or2 + CMPL BX, $4 + JB move_3 + JE move_4 + CMPL BX, $8 + JBE move_5through8 + CMPL BX, $16 + JBE move_9through16 + +/* + * check and set for backwards + */ + CMPL SI, DI + JLS back + +/* + * forward copy loop + */ +forward: + MOVL BX, CX + SHRL $2, CX + ANDL $3, BX + + REP; MOVSL + JMP tail +/* + * check overlap + */ +back: + MOVL SI, CX + ADDL BX, CX + CMPL CX, DI + JLS forward +/* + * whole thing backwards has + * adjusted addresses + */ + + ADDL BX, DI + ADDL BX, SI + STD + +/* + * copy + */ + MOVL BX, CX + SHRL $2, CX + ANDL $3, BX + + SUBL $4, DI + SUBL $4, SI + REP; MOVSL + + CLD + ADDL $4, DI + ADDL $4, SI + SUBL BX, DI + SUBL BX, SI + JMP tail + +move_1or2: + MOVB (SI), AX + MOVB -1(SI)(BX*1), CX + MOVB AX, (DI) + MOVB CX, -1(DI)(BX*1) + RET +move_0: + RET +move_3: + MOVW (SI), AX + MOVB 2(SI), CX + MOVW AX, (DI) + MOVB CX, 2(DI) + RET +move_4: + // We need a separate case for 4 to make sure we write pointers atomically. + MOVL (SI), AX + MOVL AX, (DI) + RET +move_5through8: + MOVL (SI), AX + MOVL -4(SI)(BX*1), CX + MOVL AX, (DI) + MOVL CX, -4(DI)(BX*1) + RET +move_9through16: + MOVL (SI), AX + MOVL 4(SI), CX + MOVL -8(SI)(BX*1), DX + MOVL -4(SI)(BX*1), BP + MOVL AX, (DI) + MOVL CX, 4(DI) + MOVL DX, -8(DI)(BX*1) + MOVL BP, -4(DI)(BX*1) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_amd64.s b/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..217aa604683476b85674d9d18503c1fa166a5fff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_plan9_amd64.s @@ -0,0 +1,135 @@ +// Derived from Inferno's libkern/memmove-386.s (adapted for amd64) +// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT, $0-24 + + MOVQ to+0(FP), DI + MOVQ from+8(FP), SI + MOVQ n+16(FP), BX + + // REP instructions have a high startup cost, so we handle small sizes + // with some straightline code. The REP MOVSQ instruction is really fast + // for large sizes. The cutover is approximately 1K. +tail: + TESTQ BX, BX + JEQ move_0 + CMPQ BX, $2 + JBE move_1or2 + CMPQ BX, $4 + JBE move_3or4 + CMPQ BX, $8 + JB move_5through7 + JE move_8 + CMPQ BX, $16 + JBE move_9through16 + +/* + * check and set for backwards + */ + CMPQ SI, DI + JLS back + +/* + * forward copy loop + */ +forward: + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + + REP; MOVSQ + JMP tail + +back: +/* + * check overlap + */ + MOVQ SI, CX + ADDQ BX, CX + CMPQ CX, DI + JLS forward + +/* + * whole thing backwards has + * adjusted addresses + */ + ADDQ BX, DI + ADDQ BX, SI + STD + +/* + * copy + */ + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + + SUBQ $8, DI + SUBQ $8, SI + REP; MOVSQ + + CLD + ADDQ $8, DI + ADDQ $8, SI + SUBQ BX, DI + SUBQ BX, SI + JMP tail + +move_1or2: + MOVB (SI), AX + MOVB -1(SI)(BX*1), CX + MOVB AX, (DI) + MOVB CX, -1(DI)(BX*1) + RET +move_0: + RET +move_3or4: + MOVW (SI), AX + MOVW -2(SI)(BX*1), CX + MOVW AX, (DI) + MOVW CX, -2(DI)(BX*1) + RET +move_5through7: + MOVL (SI), AX + MOVL -4(SI)(BX*1), CX + MOVL AX, (DI) + MOVL CX, -4(DI)(BX*1) + RET +move_8: + // We need a separate case for 8 to make sure we write pointers atomically. + MOVQ (SI), AX + MOVQ AX, (DI) + RET +move_9through16: + MOVQ (SI), AX + MOVQ -8(SI)(BX*1), CX + MOVQ AX, (DI) + MOVQ CX, -8(DI)(BX*1) + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_ppc64x.s b/platform/dbops/binaries/go/go/src/runtime/memmove_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..18b9c850f240f3ca3021378ed826abd188e06001 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_ppc64x.s @@ -0,0 +1,220 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) + +// target address +#define TGT R3 +// source address +#define SRC R4 +// length to move +#define LEN R5 +// number of doublewords +#define DWORDS R6 +// number of bytes < 8 +#define BYTES R7 +// const 16 used as index +#define IDX16 R8 +// temp used for copies, etc. +#define TMP R9 +// number of 64 byte chunks +#define QWORDS R10 +// index values +#define IDX32 R14 +#define IDX48 R15 +#define OCTWORDS R16 + +TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 + // R3 = TGT = to + // R4 = SRC = from + // R5 = LEN = n + + // Determine if there are doublewords to + // copy so a more efficient move can be done +check: +#ifdef GOPPC64_power10 + CMP LEN, $16 + BGT mcopy + SLD $56, LEN, TMP + LXVL SRC, TMP, V0 + STXVL V0, TGT, TMP + RET +#endif +mcopy: + ANDCC $7, LEN, BYTES // R7: bytes to copy + SRD $3, LEN, DWORDS // R6: double words to copy + MOVFL CR0, CR3 // save CR from ANDCC + CMP DWORDS, $0, CR1 // CR1[EQ] set if no double words to copy + + // Determine overlap by subtracting dest - src and comparing against the + // length. This catches the cases where src and dest are in different types + // of storage such as stack and static to avoid doing backward move when not + // necessary. + + SUB SRC, TGT, TMP // dest - src + CMPU TMP, LEN, CR2 // < len? + BC 12, 8, backward // BLT CR2 backward + + // Copying forward if no overlap. + + BC 12, 6, checkbytes // BEQ CR1, checkbytes + SRDCC $3, DWORDS, OCTWORDS // 64 byte chunks? + MOVD $16, IDX16 + BEQ lt64gt8 // < 64 bytes + + // Prepare for moves of 64 bytes at a time. + +forward64setup: + DCBTST (TGT) // prepare data cache + DCBT (SRC) + MOVD OCTWORDS, CTR // Number of 64 byte chunks + MOVD $32, IDX32 + MOVD $48, IDX48 + PCALIGN $16 + +forward64: + LXVD2X (R0)(SRC), VS32 // load 64 bytes + LXVD2X (IDX16)(SRC), VS33 + LXVD2X (IDX32)(SRC), VS34 + LXVD2X (IDX48)(SRC), VS35 + ADD $64, SRC + STXVD2X VS32, (R0)(TGT) // store 64 bytes + STXVD2X VS33, (IDX16)(TGT) + STXVD2X VS34, (IDX32)(TGT) + STXVD2X VS35, (IDX48)(TGT) + ADD $64,TGT // bump up for next set + BC 16, 0, forward64 // continue + ANDCC $7, DWORDS // remaining doublewords + BEQ checkbytes // only bytes remain + +lt64gt8: + CMP DWORDS, $4 + BLT lt32gt8 + LXVD2X (R0)(SRC), VS32 + LXVD2X (IDX16)(SRC), VS33 + ADD $-4, DWORDS + STXVD2X VS32, (R0)(TGT) + STXVD2X VS33, (IDX16)(TGT) + ADD $32, SRC + ADD $32, TGT + +lt32gt8: + // At this point >= 8 and < 32 + // Move 16 bytes if possible + CMP DWORDS, $2 + BLT lt16 + LXVD2X (R0)(SRC), VS32 + ADD $-2, DWORDS + STXVD2X VS32, (R0)(TGT) + ADD $16, SRC + ADD $16, TGT + +lt16: // Move 8 bytes if possible + CMP DWORDS, $1 + BLT checkbytes +#ifdef GOPPC64_power10 + ADD $8, BYTES + SLD $56, BYTES, TMP + LXVL SRC, TMP, V0 + STXVL V0, TGT, TMP + RET +#endif + + MOVD 0(SRC), TMP + ADD $8, SRC + MOVD TMP, 0(TGT) + ADD $8, TGT +checkbytes: + BC 12, 14, LR // BEQ lr +#ifdef GOPPC64_power10 + SLD $56, BYTES, TMP + LXVL SRC, TMP, V0 + STXVL V0, TGT, TMP + RET +#endif +lt8: // Move word if possible + CMP BYTES, $4 + BLT lt4 + MOVWZ 0(SRC), TMP + ADD $-4, BYTES + MOVW TMP, 0(TGT) + ADD $4, SRC + ADD $4, TGT +lt4: // Move halfword if possible + CMP BYTES, $2 + BLT lt2 + MOVHZ 0(SRC), TMP + ADD $-2, BYTES + MOVH TMP, 0(TGT) + ADD $2, SRC + ADD $2, TGT +lt2: // Move last byte if 1 left + CMP BYTES, $1 + BC 12, 0, LR // ble lr + MOVBZ 0(SRC), TMP + MOVBZ TMP, 0(TGT) + RET + +backward: + // Copying backwards proceeds by copying R7 bytes then copying R6 double words. + // R3 and R4 are advanced to the end of the destination/source buffers + // respectively and moved back as we copy. + + ADD LEN, SRC, SRC // end of source + ADD TGT, LEN, TGT // end of dest + + BEQ nobackwardtail // earlier condition + + MOVD BYTES, CTR // bytes to move + +backwardtailloop: + MOVBZ -1(SRC), TMP // point to last byte + SUB $1,SRC + MOVBZ TMP, -1(TGT) + SUB $1,TGT + BDNZ backwardtailloop + +nobackwardtail: + BC 4, 5, LR // blelr cr1, return if DWORDS == 0 + SRDCC $2,DWORDS,QWORDS // Compute number of 32B blocks and compare to 0 + BNE backward32setup // If QWORDS != 0, start the 32B copy loop. + +backward24: + // DWORDS is a value between 1-3. + CMP DWORDS, $2 + + MOVD -8(SRC), TMP + MOVD TMP, -8(TGT) + BC 12, 0, LR // bltlr, return if DWORDS == 1 + + MOVD -16(SRC), TMP + MOVD TMP, -16(TGT) + BC 12, 2, LR // beqlr, return if DWORDS == 2 + + MOVD -24(SRC), TMP + MOVD TMP, -24(TGT) + RET + +backward32setup: + ANDCC $3,DWORDS // Compute remaining DWORDS and compare to 0 + MOVD QWORDS, CTR // set up loop ctr + MOVD $16, IDX16 // 32 bytes at a time + PCALIGN $16 + +backward32loop: + SUB $32, TGT + SUB $32, SRC + LXVD2X (R0)(SRC), VS32 // load 16x2 bytes + LXVD2X (IDX16)(SRC), VS33 + STXVD2X VS32, (R0)(TGT) // store 16x2 bytes + STXVD2X VS33, (IDX16)(TGT) + BDNZ backward32loop + BC 12, 2, LR // beqlr, return if DWORDS == 0 + BR backward24 diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_riscv64.s b/platform/dbops/binaries/go/go/src/runtime/memmove_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..e099a64100273883c8cd8728ba7778d9f8d646ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_riscv64.s @@ -0,0 +1,319 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// void runtime·memmove(void*, void*, uintptr) +TEXT runtime·memmove(SB),NOSPLIT,$-0-24 + // X10 = to + // X11 = from + // X12 = n + BEQ X10, X11, done + BEQZ X12, done + + // If the destination is ahead of the source, start at the end of the + // buffer and go backward. + BGTU X10, X11, backward + + // If less than 8 bytes, do single byte copies. + MOV $8, X9 + BLT X12, X9, f_loop4_check + + // Check alignment - if alignment differs we have to do one byte at a time. + AND $7, X10, X5 + AND $7, X11, X6 + BNE X5, X6, f_loop8_unaligned_check + BEQZ X5, f_loop_check + + // Move one byte at a time until we reach 8 byte alignment. + SUB X5, X9, X5 + SUB X5, X12, X12 +f_align: + SUB $1, X5 + MOVB 0(X11), X14 + MOVB X14, 0(X10) + ADD $1, X10 + ADD $1, X11 + BNEZ X5, f_align + +f_loop_check: + MOV $16, X9 + BLT X12, X9, f_loop8_check + MOV $32, X9 + BLT X12, X9, f_loop16_check + MOV $64, X9 + BLT X12, X9, f_loop32_check +f_loop64: + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV 16(X11), X16 + MOV 24(X11), X17 + MOV 32(X11), X18 + MOV 40(X11), X19 + MOV 48(X11), X20 + MOV 56(X11), X21 + MOV X14, 0(X10) + MOV X15, 8(X10) + MOV X16, 16(X10) + MOV X17, 24(X10) + MOV X18, 32(X10) + MOV X19, 40(X10) + MOV X20, 48(X10) + MOV X21, 56(X10) + ADD $64, X10 + ADD $64, X11 + SUB $64, X12 + BGE X12, X9, f_loop64 + BEQZ X12, done + +f_loop32_check: + MOV $32, X9 + BLT X12, X9, f_loop16_check +f_loop32: + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV 16(X11), X16 + MOV 24(X11), X17 + MOV X14, 0(X10) + MOV X15, 8(X10) + MOV X16, 16(X10) + MOV X17, 24(X10) + ADD $32, X10 + ADD $32, X11 + SUB $32, X12 + BGE X12, X9, f_loop32 + BEQZ X12, done + +f_loop16_check: + MOV $16, X9 + BLT X12, X9, f_loop8_check +f_loop16: + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV X14, 0(X10) + MOV X15, 8(X10) + ADD $16, X10 + ADD $16, X11 + SUB $16, X12 + BGE X12, X9, f_loop16 + BEQZ X12, done + +f_loop8_check: + MOV $8, X9 + BLT X12, X9, f_loop4_check +f_loop8: + MOV 0(X11), X14 + MOV X14, 0(X10) + ADD $8, X10 + ADD $8, X11 + SUB $8, X12 + BGE X12, X9, f_loop8 + BEQZ X12, done + JMP f_loop4_check + +f_loop8_unaligned_check: + MOV $8, X9 + BLT X12, X9, f_loop4_check +f_loop8_unaligned: + MOVB 0(X11), X14 + MOVB 1(X11), X15 + MOVB 2(X11), X16 + MOVB 3(X11), X17 + MOVB 4(X11), X18 + MOVB 5(X11), X19 + MOVB 6(X11), X20 + MOVB 7(X11), X21 + MOVB X14, 0(X10) + MOVB X15, 1(X10) + MOVB X16, 2(X10) + MOVB X17, 3(X10) + MOVB X18, 4(X10) + MOVB X19, 5(X10) + MOVB X20, 6(X10) + MOVB X21, 7(X10) + ADD $8, X10 + ADD $8, X11 + SUB $8, X12 + BGE X12, X9, f_loop8_unaligned + +f_loop4_check: + MOV $4, X9 + BLT X12, X9, f_loop1 +f_loop4: + MOVB 0(X11), X14 + MOVB 1(X11), X15 + MOVB 2(X11), X16 + MOVB 3(X11), X17 + MOVB X14, 0(X10) + MOVB X15, 1(X10) + MOVB X16, 2(X10) + MOVB X17, 3(X10) + ADD $4, X10 + ADD $4, X11 + SUB $4, X12 + BGE X12, X9, f_loop4 + +f_loop1: + BEQZ X12, done + MOVB 0(X11), X14 + MOVB X14, 0(X10) + ADD $1, X10 + ADD $1, X11 + SUB $1, X12 + JMP f_loop1 + +backward: + ADD X10, X12, X10 + ADD X11, X12, X11 + + // If less than 8 bytes, do single byte copies. + MOV $8, X9 + BLT X12, X9, b_loop4_check + + // Check alignment - if alignment differs we have to do one byte at a time. + AND $7, X10, X5 + AND $7, X11, X6 + BNE X5, X6, b_loop8_unaligned_check + BEQZ X5, b_loop_check + + // Move one byte at a time until we reach 8 byte alignment. + SUB X5, X12, X12 +b_align: + SUB $1, X5 + SUB $1, X10 + SUB $1, X11 + MOVB 0(X11), X14 + MOVB X14, 0(X10) + BNEZ X5, b_align + +b_loop_check: + MOV $16, X9 + BLT X12, X9, b_loop8_check + MOV $32, X9 + BLT X12, X9, b_loop16_check + MOV $64, X9 + BLT X12, X9, b_loop32_check +b_loop64: + SUB $64, X10 + SUB $64, X11 + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV 16(X11), X16 + MOV 24(X11), X17 + MOV 32(X11), X18 + MOV 40(X11), X19 + MOV 48(X11), X20 + MOV 56(X11), X21 + MOV X14, 0(X10) + MOV X15, 8(X10) + MOV X16, 16(X10) + MOV X17, 24(X10) + MOV X18, 32(X10) + MOV X19, 40(X10) + MOV X20, 48(X10) + MOV X21, 56(X10) + SUB $64, X12 + BGE X12, X9, b_loop64 + BEQZ X12, done + +b_loop32_check: + MOV $32, X9 + BLT X12, X9, b_loop16_check +b_loop32: + SUB $32, X10 + SUB $32, X11 + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV 16(X11), X16 + MOV 24(X11), X17 + MOV X14, 0(X10) + MOV X15, 8(X10) + MOV X16, 16(X10) + MOV X17, 24(X10) + SUB $32, X12 + BGE X12, X9, b_loop32 + BEQZ X12, done + +b_loop16_check: + MOV $16, X9 + BLT X12, X9, b_loop8_check +b_loop16: + SUB $16, X10 + SUB $16, X11 + MOV 0(X11), X14 + MOV 8(X11), X15 + MOV X14, 0(X10) + MOV X15, 8(X10) + SUB $16, X12 + BGE X12, X9, b_loop16 + BEQZ X12, done + +b_loop8_check: + MOV $8, X9 + BLT X12, X9, b_loop4_check +b_loop8: + SUB $8, X10 + SUB $8, X11 + MOV 0(X11), X14 + MOV X14, 0(X10) + SUB $8, X12 + BGE X12, X9, b_loop8 + BEQZ X12, done + JMP b_loop4_check + +b_loop8_unaligned_check: + MOV $8, X9 + BLT X12, X9, b_loop4_check +b_loop8_unaligned: + SUB $8, X10 + SUB $8, X11 + MOVB 0(X11), X14 + MOVB 1(X11), X15 + MOVB 2(X11), X16 + MOVB 3(X11), X17 + MOVB 4(X11), X18 + MOVB 5(X11), X19 + MOVB 6(X11), X20 + MOVB 7(X11), X21 + MOVB X14, 0(X10) + MOVB X15, 1(X10) + MOVB X16, 2(X10) + MOVB X17, 3(X10) + MOVB X18, 4(X10) + MOVB X19, 5(X10) + MOVB X20, 6(X10) + MOVB X21, 7(X10) + SUB $8, X12 + BGE X12, X9, b_loop8_unaligned + +b_loop4_check: + MOV $4, X9 + BLT X12, X9, b_loop1 +b_loop4: + SUB $4, X10 + SUB $4, X11 + MOVB 0(X11), X14 + MOVB 1(X11), X15 + MOVB 2(X11), X16 + MOVB 3(X11), X17 + MOVB X14, 0(X10) + MOVB X15, 1(X10) + MOVB X16, 2(X10) + MOVB X17, 3(X10) + SUB $4, X12 + BGE X12, X9, b_loop4 + +b_loop1: + BEQZ X12, done + SUB $1, X10 + SUB $1, X11 + MOVB 0(X11), X14 + MOVB X14, 0(X10) + SUB $1, X12 + JMP b_loop1 + +done: + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_s390x.s b/platform/dbops/binaries/go/go/src/runtime/memmove_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..f4c2b87d9297d48083370e2b9ed770ad89511158 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_s390x.s @@ -0,0 +1,191 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB),NOSPLIT|NOFRAME,$0-24 + MOVD to+0(FP), R6 + MOVD from+8(FP), R4 + MOVD n+16(FP), R5 + + CMPBEQ R6, R4, done + +start: + CMPBLE R5, $3, move0to3 + CMPBLE R5, $7, move4to7 + CMPBLE R5, $11, move8to11 + CMPBLE R5, $15, move12to15 + CMPBNE R5, $16, movemt16 + MOVD 0(R4), R7 + MOVD 8(R4), R8 + MOVD R7, 0(R6) + MOVD R8, 8(R6) + RET + +movemt16: + CMPBGT R4, R6, forwards + ADD R5, R4, R7 + CMPBLE R7, R6, forwards + ADD R5, R6, R8 +backwards: + MOVD -8(R7), R3 + MOVD R3, -8(R8) + MOVD -16(R7), R3 + MOVD R3, -16(R8) + ADD $-16, R5 + ADD $-16, R7 + ADD $-16, R8 + CMP R5, $16 + BGE backwards + BR start + +forwards: + CMPBGT R5, $64, forwards_fast + MOVD 0(R4), R3 + MOVD R3, 0(R6) + MOVD 8(R4), R3 + MOVD R3, 8(R6) + ADD $16, R4 + ADD $16, R6 + ADD $-16, R5 + CMP R5, $16 + BGE forwards + BR start + +forwards_fast: + CMP R5, $256 + BLE forwards_small + MVC $256, 0(R4), 0(R6) + ADD $256, R4 + ADD $256, R6 + ADD $-256, R5 + BR forwards_fast + +forwards_small: + CMPBEQ R5, $0, done + ADD $-1, R5 + EXRL $memmove_exrl_mvc<>(SB), R5 + RET + +move0to3: + CMPBEQ R5, $0, done +move1: + CMPBNE R5, $1, move2 + MOVB 0(R4), R3 + MOVB R3, 0(R6) + RET +move2: + CMPBNE R5, $2, move3 + MOVH 0(R4), R3 + MOVH R3, 0(R6) + RET +move3: + MOVH 0(R4), R3 + MOVB 2(R4), R7 + MOVH R3, 0(R6) + MOVB R7, 2(R6) + RET + +move4to7: + CMPBNE R5, $4, move5 + MOVW 0(R4), R3 + MOVW R3, 0(R6) + RET +move5: + CMPBNE R5, $5, move6 + MOVW 0(R4), R3 + MOVB 4(R4), R7 + MOVW R3, 0(R6) + MOVB R7, 4(R6) + RET +move6: + CMPBNE R5, $6, move7 + MOVW 0(R4), R3 + MOVH 4(R4), R7 + MOVW R3, 0(R6) + MOVH R7, 4(R6) + RET +move7: + MOVW 0(R4), R3 + MOVH 4(R4), R7 + MOVB 6(R4), R8 + MOVW R3, 0(R6) + MOVH R7, 4(R6) + MOVB R8, 6(R6) + RET + +move8to11: + CMPBNE R5, $8, move9 + MOVD 0(R4), R3 + MOVD R3, 0(R6) + RET +move9: + CMPBNE R5, $9, move10 + MOVD 0(R4), R3 + MOVB 8(R4), R7 + MOVD R3, 0(R6) + MOVB R7, 8(R6) + RET +move10: + CMPBNE R5, $10, move11 + MOVD 0(R4), R3 + MOVH 8(R4), R7 + MOVD R3, 0(R6) + MOVH R7, 8(R6) + RET +move11: + MOVD 0(R4), R3 + MOVH 8(R4), R7 + MOVB 10(R4), R8 + MOVD R3, 0(R6) + MOVH R7, 8(R6) + MOVB R8, 10(R6) + RET + +move12to15: + CMPBNE R5, $12, move13 + MOVD 0(R4), R3 + MOVW 8(R4), R7 + MOVD R3, 0(R6) + MOVW R7, 8(R6) + RET +move13: + CMPBNE R5, $13, move14 + MOVD 0(R4), R3 + MOVW 8(R4), R7 + MOVB 12(R4), R8 + MOVD R3, 0(R6) + MOVW R7, 8(R6) + MOVB R8, 12(R6) + RET +move14: + CMPBNE R5, $14, move15 + MOVD 0(R4), R3 + MOVW 8(R4), R7 + MOVH 12(R4), R8 + MOVD R3, 0(R6) + MOVW R7, 8(R6) + MOVH R8, 12(R6) + RET +move15: + MOVD 0(R4), R3 + MOVW 8(R4), R7 + MOVH 12(R4), R8 + MOVB 14(R4), R10 + MOVD R3, 0(R6) + MOVW R7, 8(R6) + MOVH R8, 12(R6) + MOVB R10, 14(R6) +done: + RET + +// DO NOT CALL - target for exrl (execute relative long) instruction. +TEXT memmove_exrl_mvc<>(SB),NOSPLIT|NOFRAME,$0-0 + MVC $1, 0(R4), 0(R6) + MOVD R0, 0(R0) + RET + diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_test.go b/platform/dbops/binaries/go/go/src/runtime/memmove_test.go new file mode 100644 index 0000000000000000000000000000000000000000..587e03d38515a42d45f51357ad9380e1297e0821 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_test.go @@ -0,0 +1,1122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "internal/race" + "internal/testenv" + . "runtime" + "sync/atomic" + "testing" + "unsafe" +) + +func TestMemmove(t *testing.T) { + if *flagQuick { + t.Skip("-quick") + } + t.Parallel() + size := 256 + if testing.Short() { + size = 128 + 16 + } + src := make([]byte, size) + dst := make([]byte, size) + for i := 0; i < size; i++ { + src[i] = byte(128 + (i & 127)) + } + for i := 0; i < size; i++ { + dst[i] = byte(i & 127) + } + for n := 0; n <= size; n++ { + for x := 0; x <= size-n; x++ { // offset in src + for y := 0; y <= size-n; y++ { // offset in dst + copy(dst[y:y+n], src[x:x+n]) + for i := 0; i < y; i++ { + if dst[i] != byte(i&127) { + t.Fatalf("prefix dst[%d] = %d", i, dst[i]) + } + } + for i := y; i < y+n; i++ { + if dst[i] != byte(128+((i-y+x)&127)) { + t.Fatalf("copied dst[%d] = %d", i, dst[i]) + } + dst[i] = byte(i & 127) // reset dst + } + for i := y + n; i < size; i++ { + if dst[i] != byte(i&127) { + t.Fatalf("suffix dst[%d] = %d", i, dst[i]) + } + } + } + } + } +} + +func TestMemmoveAlias(t *testing.T) { + if *flagQuick { + t.Skip("-quick") + } + t.Parallel() + size := 256 + if testing.Short() { + size = 128 + 16 + } + buf := make([]byte, size) + for i := 0; i < size; i++ { + buf[i] = byte(i) + } + for n := 0; n <= size; n++ { + for x := 0; x <= size-n; x++ { // src offset + for y := 0; y <= size-n; y++ { // dst offset + copy(buf[y:y+n], buf[x:x+n]) + for i := 0; i < y; i++ { + if buf[i] != byte(i) { + t.Fatalf("prefix buf[%d] = %d", i, buf[i]) + } + } + for i := y; i < y+n; i++ { + if buf[i] != byte(i-y+x) { + t.Fatalf("copied buf[%d] = %d", i, buf[i]) + } + buf[i] = byte(i) // reset buf + } + for i := y + n; i < size; i++ { + if buf[i] != byte(i) { + t.Fatalf("suffix buf[%d] = %d", i, buf[i]) + } + } + } + } + } +} + +func TestMemmoveLarge0x180000(t *testing.T) { + if testing.Short() && testenv.Builder() == "" { + t.Skip("-short") + } + + t.Parallel() + if race.Enabled { + t.Skip("skipping large memmove test under race detector") + } + testSize(t, 0x180000) +} + +func TestMemmoveOverlapLarge0x120000(t *testing.T) { + if testing.Short() && testenv.Builder() == "" { + t.Skip("-short") + } + + t.Parallel() + if race.Enabled { + t.Skip("skipping large memmove test under race detector") + } + testOverlap(t, 0x120000) +} + +func testSize(t *testing.T, size int) { + src := make([]byte, size) + dst := make([]byte, size) + _, _ = rand.Read(src) + _, _ = rand.Read(dst) + + ref := make([]byte, size) + copyref(ref, dst) + + for n := size - 50; n > 1; n >>= 1 { + for x := 0; x <= size-n; x = x*7 + 1 { // offset in src + for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst + copy(dst[y:y+n], src[x:x+n]) + copyref(ref[y:y+n], src[x:x+n]) + p := cmpb(dst, ref) + if p >= 0 { + t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, dst[p], ref[p]) + } + } + } + } +} + +func testOverlap(t *testing.T, size int) { + src := make([]byte, size) + test := make([]byte, size) + ref := make([]byte, size) + _, _ = rand.Read(src) + + for n := size - 50; n > 1; n >>= 1 { + for x := 0; x <= size-n; x = x*7 + 1 { // offset in src + for y := 0; y <= size-n; y = y*9 + 1 { // offset in dst + // Reset input + copyref(test, src) + copyref(ref, src) + copy(test[y:y+n], test[x:x+n]) + if y <= x { + copyref(ref[y:y+n], ref[x:x+n]) + } else { + copybw(ref[y:y+n], ref[x:x+n]) + } + p := cmpb(test, ref) + if p >= 0 { + t.Fatalf("Copy failed, copying from src[%d:%d] to dst[%d:%d].\nOffset %d is different, %v != %v", x, x+n, y, y+n, p, test[p], ref[p]) + } + } + } + } + +} + +// Forward copy. +func copyref(dst, src []byte) { + for i, v := range src { + dst[i] = v + } +} + +// Backwards copy +func copybw(dst, src []byte) { + if len(src) == 0 { + return + } + for i := len(src) - 1; i >= 0; i-- { + dst[i] = src[i] + } +} + +// Returns offset of difference +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:max] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func cmpb(a, b []byte) int { + l := matchLen(a, b, len(a)) + if l == len(a) { + return -1 + } + return l +} + +// Ensure that memmove writes pointers atomically, so the GC won't +// observe a partially updated pointer. +func TestMemmoveAtomicity(t *testing.T) { + if race.Enabled { + t.Skip("skip under the race detector -- this test is intentionally racy") + } + + var x int + + for _, backward := range []bool{true, false} { + for _, n := range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} { + n := n + + // test copying [N]*int. + sz := uintptr(n * PtrSize) + name := fmt.Sprint(sz) + if backward { + name += "-backward" + } else { + name += "-forward" + } + t.Run(name, func(t *testing.T) { + // Use overlapping src and dst to force forward/backward copy. + var s [100]*int + src := s[n-1 : 2*n-1] + dst := s[:n] + if backward { + src, dst = dst, src + } + for i := range src { + src[i] = &x + } + for i := range dst { + dst[i] = nil + } + + var ready atomic.Uint32 + go func() { + sp := unsafe.Pointer(&src[0]) + dp := unsafe.Pointer(&dst[0]) + ready.Store(1) + for i := 0; i < 10000; i++ { + Memmove(dp, sp, sz) + MemclrNoHeapPointers(dp, sz) + } + ready.Store(2) + }() + + for ready.Load() == 0 { + Gosched() + } + + for ready.Load() != 2 { + for i := range dst { + p := dst[i] + if p != nil && p != &x { + t.Fatalf("got partially updated pointer %p at dst[%d], want either nil or %p", p, i, &x) + } + } + } + }) + } + } +} + +func benchmarkSizes(b *testing.B, sizes []int, fn func(b *testing.B, n int)) { + for _, n := range sizes { + b.Run(fmt.Sprint(n), func(b *testing.B) { + b.SetBytes(int64(n)) + fn(b, n) + }) + } +} + +var bufSizes = []int{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 32, 64, 128, 256, 512, 1024, 2048, 4096, +} +var bufSizesOverlap = []int{ + 32, 64, 128, 256, 512, 1024, 2048, 4096, +} + +func BenchmarkMemmove(b *testing.B) { + benchmarkSizes(b, bufSizes, func(b *testing.B, n int) { + x := make([]byte, n) + y := make([]byte, n) + for i := 0; i < b.N; i++ { + copy(x, y) + } + }) +} + +func BenchmarkMemmoveOverlap(b *testing.B) { + benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) { + x := make([]byte, n+16) + for i := 0; i < b.N; i++ { + copy(x[16:n+16], x[:n]) + } + }) +} + +func BenchmarkMemmoveUnalignedDst(b *testing.B) { + benchmarkSizes(b, bufSizes, func(b *testing.B, n int) { + x := make([]byte, n+1) + y := make([]byte, n) + for i := 0; i < b.N; i++ { + copy(x[1:], y) + } + }) +} + +func BenchmarkMemmoveUnalignedDstOverlap(b *testing.B) { + benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) { + x := make([]byte, n+16) + for i := 0; i < b.N; i++ { + copy(x[16:n+16], x[1:n+1]) + } + }) +} + +func BenchmarkMemmoveUnalignedSrc(b *testing.B) { + benchmarkSizes(b, bufSizes, func(b *testing.B, n int) { + x := make([]byte, n) + y := make([]byte, n+1) + for i := 0; i < b.N; i++ { + copy(x, y[1:]) + } + }) +} + +func BenchmarkMemmoveUnalignedSrcDst(b *testing.B) { + for _, n := range []int{16, 64, 256, 4096, 65536} { + buf := make([]byte, (n+8)*2) + x := buf[:len(buf)/2] + y := buf[len(buf)/2:] + for _, off := range []int{0, 1, 4, 7} { + b.Run(fmt.Sprint("f_", n, off), func(b *testing.B) { + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + copy(x[off:n+off], y[off:n+off]) + } + }) + + b.Run(fmt.Sprint("b_", n, off), func(b *testing.B) { + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + copy(y[off:n+off], x[off:n+off]) + } + }) + } + } +} + +func BenchmarkMemmoveUnalignedSrcOverlap(b *testing.B) { + benchmarkSizes(b, bufSizesOverlap, func(b *testing.B, n int) { + x := make([]byte, n+1) + for i := 0; i < b.N; i++ { + copy(x[1:n+1], x[:n]) + } + }) +} + +func TestMemclr(t *testing.T) { + size := 512 + if testing.Short() { + size = 128 + 16 + } + mem := make([]byte, size) + for i := 0; i < size; i++ { + mem[i] = 0xee + } + for n := 0; n < size; n++ { + for x := 0; x <= size-n; x++ { // offset in mem + MemclrBytes(mem[x : x+n]) + for i := 0; i < x; i++ { + if mem[i] != 0xee { + t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i]) + } + } + for i := x; i < x+n; i++ { + if mem[i] != 0 { + t.Fatalf("failed clear mem[%d] = %d", i, mem[i]) + } + mem[i] = 0xee + } + for i := x + n; i < size; i++ { + if mem[i] != 0xee { + t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i]) + } + } + } + } +} + +func BenchmarkMemclr(b *testing.B) { + for _, n := range []int{5, 16, 64, 256, 4096, 65536} { + x := make([]byte, n) + b.Run(fmt.Sprint(n), func(b *testing.B) { + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + MemclrBytes(x) + } + }) + } + for _, m := range []int{1, 4, 8, 16, 64} { + x := make([]byte, m<<20) + b.Run(fmt.Sprint(m, "M"), func(b *testing.B) { + b.SetBytes(int64(m << 20)) + for i := 0; i < b.N; i++ { + MemclrBytes(x) + } + }) + } +} + +func BenchmarkMemclrUnaligned(b *testing.B) { + for _, off := range []int{0, 1, 4, 7} { + for _, n := range []int{5, 16, 64, 256, 4096, 65536} { + x := make([]byte, n+off) + b.Run(fmt.Sprint(off, n), func(b *testing.B) { + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + MemclrBytes(x[off:]) + } + }) + } + } + + for _, off := range []int{0, 1, 4, 7} { + for _, m := range []int{1, 4, 8, 16, 64} { + x := make([]byte, (m<<20)+off) + b.Run(fmt.Sprint(off, m, "M"), func(b *testing.B) { + b.SetBytes(int64(m << 20)) + for i := 0; i < b.N; i++ { + MemclrBytes(x[off:]) + } + }) + } + } +} + +func BenchmarkGoMemclr(b *testing.B) { + benchmarkSizes(b, []int{5, 16, 64, 256}, func(b *testing.B, n int) { + x := make([]byte, n) + for i := 0; i < b.N; i++ { + for j := range x { + x[j] = 0 + } + } + }) +} + +func BenchmarkMemclrRange(b *testing.B) { + type RunData struct { + data []int + } + + benchSizes := []RunData{ + {[]int{1043, 1078, 1894, 1582, 1044, 1165, 1467, 1100, 1919, 1562, 1932, 1645, + 1412, 1038, 1576, 1200, 1029, 1336, 1095, 1494, 1350, 1025, 1502, 1548, 1316, 1296, + 1868, 1639, 1546, 1626, 1642, 1308, 1726, 1665, 1678, 1187, 1515, 1598, 1353, 1237, + 1977, 1452, 2012, 1914, 1514, 1136, 1975, 1618, 1536, 1695, 1600, 1733, 1392, 1099, + 1358, 1996, 1224, 1783, 1197, 1838, 1460, 1556, 1554, 2020}}, // 1kb-2kb + {[]int{3964, 5139, 6573, 7775, 6553, 2413, 3466, 5394, 2469, 7336, 7091, 6745, + 4028, 5643, 6164, 3475, 4138, 6908, 7559, 3335, 5660, 4122, 3945, 2082, 7564, 6584, + 5111, 2288, 6789, 2797, 4928, 7986, 5163, 5447, 2999, 4968, 3174, 3202, 7908, 8137, + 4735, 6161, 4646, 7592, 3083, 5329, 3687, 2754, 3599, 7231, 6455, 2549, 8063, 2189, + 7121, 5048, 4277, 6626, 6306, 2815, 7473, 3963, 7549, 7255}}, // 2kb-8kb + {[]int{16304, 15936, 15760, 4736, 9136, 11184, 10160, 5952, 14560, 15744, + 6624, 5872, 13088, 14656, 14192, 10304, 4112, 10384, 9344, 4496, 11392, 7024, + 5200, 10064, 14784, 5808, 13504, 10480, 8512, 4896, 13264, 5600}}, // 4kb-16kb + {[]int{164576, 233136, 220224, 183280, 214112, 217248, 228560, 201728}}, // 128kb-256kb + } + + for _, t := range benchSizes { + total := 0 + minLen := 0 + maxLen := 0 + + for _, clrLen := range t.data { + maxLen = max(maxLen, clrLen) + if clrLen < minLen || minLen == 0 { + minLen = clrLen + } + total += clrLen + } + buffer := make([]byte, maxLen) + + text := "" + if minLen >= (1 << 20) { + text = fmt.Sprint(minLen>>20, "M ", (maxLen+(1<<20-1))>>20, "M") + } else if minLen >= (1 << 10) { + text = fmt.Sprint(minLen>>10, "K ", (maxLen+(1<<10-1))>>10, "K") + } else { + text = fmt.Sprint(minLen, " ", maxLen) + } + b.Run(text, func(b *testing.B) { + b.SetBytes(int64(total)) + for i := 0; i < b.N; i++ { + for _, clrLen := range t.data { + MemclrBytes(buffer[:clrLen]) + } + } + }) + } +} + +func BenchmarkClearFat7(b *testing.B) { + p := new([7]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [7]byte{} + } +} + +func BenchmarkClearFat8(b *testing.B) { + p := new([8 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [8 / 4]uint32{} + } +} + +func BenchmarkClearFat11(b *testing.B) { + p := new([11]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [11]byte{} + } +} + +func BenchmarkClearFat12(b *testing.B) { + p := new([12 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [12 / 4]uint32{} + } +} + +func BenchmarkClearFat13(b *testing.B) { + p := new([13]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [13]byte{} + } +} + +func BenchmarkClearFat14(b *testing.B) { + p := new([14]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [14]byte{} + } +} + +func BenchmarkClearFat15(b *testing.B) { + p := new([15]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [15]byte{} + } +} + +func BenchmarkClearFat16(b *testing.B) { + p := new([16 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [16 / 4]uint32{} + } +} + +func BenchmarkClearFat24(b *testing.B) { + p := new([24 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [24 / 4]uint32{} + } +} + +func BenchmarkClearFat32(b *testing.B) { + p := new([32 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [32 / 4]uint32{} + } +} + +func BenchmarkClearFat40(b *testing.B) { + p := new([40 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [40 / 4]uint32{} + } +} + +func BenchmarkClearFat48(b *testing.B) { + p := new([48 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [48 / 4]uint32{} + } +} + +func BenchmarkClearFat56(b *testing.B) { + p := new([56 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [56 / 4]uint32{} + } +} + +func BenchmarkClearFat64(b *testing.B) { + p := new([64 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [64 / 4]uint32{} + } +} + +func BenchmarkClearFat72(b *testing.B) { + p := new([72 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [72 / 4]uint32{} + } +} + +func BenchmarkClearFat128(b *testing.B) { + p := new([128 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [128 / 4]uint32{} + } +} + +func BenchmarkClearFat256(b *testing.B) { + p := new([256 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [256 / 4]uint32{} + } +} + +func BenchmarkClearFat512(b *testing.B) { + p := new([512 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [512 / 4]uint32{} + } +} + +func BenchmarkClearFat1024(b *testing.B) { + p := new([1024 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [1024 / 4]uint32{} + } +} + +func BenchmarkClearFat1032(b *testing.B) { + p := new([1032 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [1032 / 4]uint32{} + } +} + +func BenchmarkClearFat1040(b *testing.B) { + p := new([1040 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [1040 / 4]uint32{} + } +} + +func BenchmarkCopyFat7(b *testing.B) { + var x [7]byte + p := new([7]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat8(b *testing.B) { + var x [8 / 4]uint32 + p := new([8 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat11(b *testing.B) { + var x [11]byte + p := new([11]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat12(b *testing.B) { + var x [12 / 4]uint32 + p := new([12 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat13(b *testing.B) { + var x [13]byte + p := new([13]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat14(b *testing.B) { + var x [14]byte + p := new([14]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat15(b *testing.B) { + var x [15]byte + p := new([15]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat16(b *testing.B) { + var x [16 / 4]uint32 + p := new([16 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat24(b *testing.B) { + var x [24 / 4]uint32 + p := new([24 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat32(b *testing.B) { + var x [32 / 4]uint32 + p := new([32 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat64(b *testing.B) { + var x [64 / 4]uint32 + p := new([64 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat72(b *testing.B) { + var x [72 / 4]uint32 + p := new([72 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat128(b *testing.B) { + var x [128 / 4]uint32 + p := new([128 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat256(b *testing.B) { + var x [256 / 4]uint32 + p := new([256 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat512(b *testing.B) { + var x [512 / 4]uint32 + p := new([512 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat520(b *testing.B) { + var x [520 / 4]uint32 + p := new([520 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat1024(b *testing.B) { + var x [1024 / 4]uint32 + p := new([1024 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat1032(b *testing.B) { + var x [1032 / 4]uint32 + p := new([1032 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat1040(b *testing.B) { + var x [1040 / 4]uint32 + p := new([1040 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +// BenchmarkIssue18740 ensures that memmove uses 4 and 8 byte load/store to move 4 and 8 bytes. +// It used to do 2 2-byte load/stores, which leads to a pipeline stall +// when we try to read the result with one 4-byte load. +func BenchmarkIssue18740(b *testing.B) { + benchmarks := []struct { + name string + nbyte int + f func([]byte) uint64 + }{ + {"2byte", 2, func(buf []byte) uint64 { return uint64(binary.LittleEndian.Uint16(buf)) }}, + {"4byte", 4, func(buf []byte) uint64 { return uint64(binary.LittleEndian.Uint32(buf)) }}, + {"8byte", 8, func(buf []byte) uint64 { return binary.LittleEndian.Uint64(buf) }}, + } + + var g [4096]byte + for _, bm := range benchmarks { + buf := make([]byte, bm.nbyte) + b.Run(bm.name, func(b *testing.B) { + for j := 0; j < b.N; j++ { + for i := 0; i < 4096; i += bm.nbyte { + copy(buf[:], g[i:]) + sink += bm.f(buf[:]) + } + } + }) + } +} + +var memclrSink []int8 + +func BenchmarkMemclrKnownSize1(b *testing.B) { + var x [1]int8 + + b.SetBytes(1) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize2(b *testing.B) { + var x [2]int8 + + b.SetBytes(2) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize4(b *testing.B) { + var x [4]int8 + + b.SetBytes(4) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize8(b *testing.B) { + var x [8]int8 + + b.SetBytes(8) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize16(b *testing.B) { + var x [16]int8 + + b.SetBytes(16) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize32(b *testing.B) { + var x [32]int8 + + b.SetBytes(32) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize64(b *testing.B) { + var x [64]int8 + + b.SetBytes(64) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize112(b *testing.B) { + var x [112]int8 + + b.SetBytes(112) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} + +func BenchmarkMemclrKnownSize128(b *testing.B) { + var x [128]int8 + + b.SetBytes(128) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} + +func BenchmarkMemclrKnownSize192(b *testing.B) { + var x [192]int8 + + b.SetBytes(192) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} + +func BenchmarkMemclrKnownSize248(b *testing.B) { + var x [248]int8 + + b.SetBytes(248) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} + +func BenchmarkMemclrKnownSize256(b *testing.B) { + var x [256]int8 + + b.SetBytes(256) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize512(b *testing.B) { + var x [512]int8 + + b.SetBytes(512) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize1024(b *testing.B) { + var x [1024]int8 + + b.SetBytes(1024) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize4096(b *testing.B) { + var x [4096]int8 + + b.SetBytes(4096) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} +func BenchmarkMemclrKnownSize512KiB(b *testing.B) { + var x [524288]int8 + + b.SetBytes(524288) + for i := 0; i < b.N; i++ { + for a := range x { + x[a] = 0 + } + } + + memclrSink = x[:] +} diff --git a/platform/dbops/binaries/go/go/src/runtime/memmove_wasm.s b/platform/dbops/binaries/go/go/src/runtime/memmove_wasm.s new file mode 100644 index 0000000000000000000000000000000000000000..1be8487a9906312e623090bdcb528a537b65d1f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/memmove_wasm.s @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// See memmove Go doc for important implementation constraints. + +// func memmove(to, from unsafe.Pointer, n uintptr) +TEXT runtime·memmove(SB), NOSPLIT, $0-24 + MOVD to+0(FP), R0 + MOVD from+8(FP), R1 + MOVD n+16(FP), R2 + + Get R0 + I32WrapI64 + Get R1 + I32WrapI64 + Get R2 + I32WrapI64 + MemoryCopy + RET diff --git a/platform/dbops/binaries/go/go/src/runtime/metrics.go b/platform/dbops/binaries/go/go/src/runtime/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..f97a3804ab32201c895ea5d45a180067b6a7b305 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/metrics.go @@ -0,0 +1,871 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// Metrics implementation exported to runtime/metrics. + +import ( + "internal/godebugs" + "unsafe" +) + +var ( + // metrics is a map of runtime/metrics keys to data used by the runtime + // to sample each metric's value. metricsInit indicates it has been + // initialized. + // + // These fields are protected by metricsSema which should be + // locked/unlocked with metricsLock() / metricsUnlock(). + metricsSema uint32 = 1 + metricsInit bool + metrics map[string]metricData + + sizeClassBuckets []float64 + timeHistBuckets []float64 +) + +type metricData struct { + // deps is the set of runtime statistics that this metric + // depends on. Before compute is called, the statAggregate + // which will be passed must ensure() these dependencies. + deps statDepSet + + // compute is a function that populates a metricValue + // given a populated statAggregate structure. + compute func(in *statAggregate, out *metricValue) +} + +func metricsLock() { + // Acquire the metricsSema but with handoff. Operations are typically + // expensive enough that queueing up goroutines and handing off between + // them will be noticeably better-behaved. + semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire) + if raceenabled { + raceacquire(unsafe.Pointer(&metricsSema)) + } +} + +func metricsUnlock() { + if raceenabled { + racerelease(unsafe.Pointer(&metricsSema)) + } + semrelease(&metricsSema) +} + +// initMetrics initializes the metrics map if it hasn't been yet. +// +// metricsSema must be held. +func initMetrics() { + if metricsInit { + return + } + + sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1) + // Skip size class 0 which is a stand-in for large objects, but large + // objects are tracked separately (and they actually get placed in + // the last bucket, not the first). + sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size. + for i := 1; i < _NumSizeClasses; i++ { + // Size classes have an inclusive upper-bound + // and exclusive lower bound (e.g. 48-byte size class is + // (32, 48]) whereas we want and inclusive lower-bound + // and exclusive upper-bound (e.g. 48-byte size class is + // [33, 49)). We can achieve this by shifting all bucket + // boundaries up by 1. + // + // Also, a float64 can precisely represent integers with + // value up to 2^53 and size classes are relatively small + // (nowhere near 2^48 even) so this will give us exact + // boundaries. + sizeClassBuckets[i] = float64(class_to_size[i] + 1) + } + sizeClassBuckets = append(sizeClassBuckets, float64Inf()) + + timeHistBuckets = timeHistogramMetricsBuckets() + metrics = map[string]metricData{ + "/cgo/go-to-c-calls:calls": { + compute: func(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(NumCgoCall()) + }, + }, + "/cpu/classes/gc/mark/assist:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.gcAssistTime)) + }, + }, + "/cpu/classes/gc/mark/dedicated:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.gcDedicatedTime)) + }, + }, + "/cpu/classes/gc/mark/idle:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.gcIdleTime)) + }, + }, + "/cpu/classes/gc/pause:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.gcPauseTime)) + }, + }, + "/cpu/classes/gc/total:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.gcTotalTime)) + }, + }, + "/cpu/classes/idle:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.idleTime)) + }, + }, + "/cpu/classes/scavenge/assist:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.scavengeAssistTime)) + }, + }, + "/cpu/classes/scavenge/background:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.scavengeBgTime)) + }, + }, + "/cpu/classes/scavenge/total:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.scavengeTotalTime)) + }, + }, + "/cpu/classes/total:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.totalTime)) + }, + }, + "/cpu/classes/user:cpu-seconds": { + deps: makeStatDepSet(cpuStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(in.cpuStats.userTime)) + }, + }, + "/gc/cycles/automatic:gc-cycles": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced + }, + }, + "/gc/cycles/forced:gc-cycles": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.gcCyclesForced + }, + }, + "/gc/cycles/total:gc-cycles": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.gcCyclesDone + }, + }, + "/gc/scan/globals:bytes": { + deps: makeStatDepSet(gcStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.gcStats.globalsScan + }, + }, + "/gc/scan/heap:bytes": { + deps: makeStatDepSet(gcStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.gcStats.heapScan + }, + }, + "/gc/scan/stack:bytes": { + deps: makeStatDepSet(gcStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.gcStats.stackScan + }, + }, + "/gc/scan/total:bytes": { + deps: makeStatDepSet(gcStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.gcStats.totalScan + }, + }, + "/gc/heap/allocs-by-size:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + hist := out.float64HistOrInit(sizeClassBuckets) + hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount + // Cut off the first index which is ostensibly for size class 0, + // but large objects are tracked separately so it's actually unused. + for i, count := range in.heapStats.smallAllocCount[1:] { + hist.counts[i] = count + } + }, + }, + "/gc/heap/allocs:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.totalAllocated + }, + }, + "/gc/heap/allocs:objects": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.totalAllocs + }, + }, + "/gc/heap/frees-by-size:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + hist := out.float64HistOrInit(sizeClassBuckets) + hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount + // Cut off the first index which is ostensibly for size class 0, + // but large objects are tracked separately so it's actually unused. + for i, count := range in.heapStats.smallFreeCount[1:] { + hist.counts[i] = count + } + }, + }, + "/gc/heap/frees:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.totalFreed + }, + }, + "/gc/heap/frees:objects": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.totalFrees + }, + }, + "/gc/heap/goal:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.heapGoal + }, + }, + "/gc/gomemlimit:bytes": { + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(gcController.memoryLimit.Load()) + }, + }, + "/gc/gogc:percent": { + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(gcController.gcPercent.Load()) + }, + }, + "/gc/heap/live:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = gcController.heapMarked + }, + }, + "/gc/heap/objects:objects": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.numObjects + }, + }, + "/gc/heap/tiny/allocs:objects": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.tinyAllocCount + }, + }, + "/gc/limiter/last-enabled:gc-cycle": { + compute: func(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load()) + }, + }, + "/gc/pauses:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + // N.B. this is identical to /sched/pauses/total/gc:seconds. + sched.stwTotalTimeGC.write(out) + }, + }, + "/gc/stack/starting-size:bytes": { + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(startingStackSize) + }, + }, + "/memory/classes/heap/free:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap - + in.heapStats.inStacks - in.heapStats.inWorkBufs - + in.heapStats.inPtrScalarBits) + }, + }, + "/memory/classes/heap/objects:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.heapStats.inObjects + }, + }, + "/memory/classes/heap/released:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.released) + }, + }, + "/memory/classes/heap/stacks:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.inStacks) + }, + }, + "/memory/classes/heap/unused:bytes": { + deps: makeStatDepSet(heapStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects + }, + }, + "/memory/classes/metadata/mcache/free:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse + }, + }, + "/memory/classes/metadata/mcache/inuse:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.mCacheInUse + }, + }, + "/memory/classes/metadata/mspan/free:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse + }, + }, + "/memory/classes/metadata/mspan/inuse:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.mSpanInUse + }, + }, + "/memory/classes/metadata/other:bytes": { + deps: makeStatDepSet(heapStatsDep, sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys + }, + }, + "/memory/classes/os-stacks:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.stacksSys + }, + }, + "/memory/classes/other:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.otherSys + }, + }, + "/memory/classes/profiling/buckets:bytes": { + deps: makeStatDepSet(sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.sysStats.buckHashSys + }, + }, + "/memory/classes/total:bytes": { + deps: makeStatDepSet(heapStatsDep, sysStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(in.heapStats.committed+in.heapStats.released) + + in.sysStats.stacksSys + in.sysStats.mSpanSys + + in.sysStats.mCacheSys + in.sysStats.buckHashSys + + in.sysStats.gcMiscSys + in.sysStats.otherSys + }, + }, + "/sched/gomaxprocs:threads": { + compute: func(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(gomaxprocs) + }, + }, + "/sched/goroutines:goroutines": { + compute: func(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = uint64(gcount()) + }, + }, + "/sched/latencies:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + sched.timeToRun.write(out) + }, + }, + "/sched/pauses/stopping/gc:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + sched.stwStoppingTimeGC.write(out) + }, + }, + "/sched/pauses/stopping/other:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + sched.stwStoppingTimeOther.write(out) + }, + }, + "/sched/pauses/total/gc:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + sched.stwTotalTimeGC.write(out) + }, + }, + "/sched/pauses/total/other:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + sched.stwTotalTimeOther.write(out) + }, + }, + "/sync/mutex/wait/total:seconds": { + compute: func(_ *statAggregate, out *metricValue) { + out.kind = metricKindFloat64 + out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos())) + }, + }, + } + + for _, info := range godebugs.All { + if !info.Opaque { + metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0} + } + } + + metricsInit = true +} + +func compute0(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = 0 +} + +type metricReader func() uint64 + +func (f metricReader) compute(_ *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = f() +} + +//go:linkname godebug_registerMetric internal/godebug.registerMetric +func godebug_registerMetric(name string, read func() uint64) { + metricsLock() + initMetrics() + d, ok := metrics[name] + if !ok { + throw("runtime: unexpected metric registration for " + name) + } + d.compute = metricReader(read).compute + metrics[name] = d + metricsUnlock() +} + +// statDep is a dependency on a group of statistics +// that a metric might have. +type statDep uint + +const ( + heapStatsDep statDep = iota // corresponds to heapStatsAggregate + sysStatsDep // corresponds to sysStatsAggregate + cpuStatsDep // corresponds to cpuStatsAggregate + gcStatsDep // corresponds to gcStatsAggregate + numStatsDeps +) + +// statDepSet represents a set of statDeps. +// +// Under the hood, it's a bitmap. +type statDepSet [1]uint64 + +// makeStatDepSet creates a new statDepSet from a list of statDeps. +func makeStatDepSet(deps ...statDep) statDepSet { + var s statDepSet + for _, d := range deps { + s[d/64] |= 1 << (d % 64) + } + return s +} + +// difference returns set difference of s from b as a new set. +func (s statDepSet) difference(b statDepSet) statDepSet { + var c statDepSet + for i := range s { + c[i] = s[i] &^ b[i] + } + return c +} + +// union returns the union of the two sets as a new set. +func (s statDepSet) union(b statDepSet) statDepSet { + var c statDepSet + for i := range s { + c[i] = s[i] | b[i] + } + return c +} + +// empty returns true if there are no dependencies in the set. +func (s *statDepSet) empty() bool { + for _, c := range s { + if c != 0 { + return false + } + } + return true +} + +// has returns true if the set contains a given statDep. +func (s *statDepSet) has(d statDep) bool { + return s[d/64]&(1<<(d%64)) != 0 +} + +// heapStatsAggregate represents memory stats obtained from the +// runtime. This set of stats is grouped together because they +// depend on each other in some way to make sense of the runtime's +// current heap memory use. They're also sharded across Ps, so it +// makes sense to grab them all at once. +type heapStatsAggregate struct { + heapStatsDelta + + // Derived from values in heapStatsDelta. + + // inObjects is the bytes of memory occupied by objects, + inObjects uint64 + + // numObjects is the number of live objects in the heap. + numObjects uint64 + + // totalAllocated is the total bytes of heap objects allocated + // over the lifetime of the program. + totalAllocated uint64 + + // totalFreed is the total bytes of heap objects freed + // over the lifetime of the program. + totalFreed uint64 + + // totalAllocs is the number of heap objects allocated over + // the lifetime of the program. + totalAllocs uint64 + + // totalFrees is the number of heap objects freed over + // the lifetime of the program. + totalFrees uint64 +} + +// compute populates the heapStatsAggregate with values from the runtime. +func (a *heapStatsAggregate) compute() { + memstats.heapStats.read(&a.heapStatsDelta) + + // Calculate derived stats. + a.totalAllocs = a.largeAllocCount + a.totalFrees = a.largeFreeCount + a.totalAllocated = a.largeAlloc + a.totalFreed = a.largeFree + for i := range a.smallAllocCount { + na := a.smallAllocCount[i] + nf := a.smallFreeCount[i] + a.totalAllocs += na + a.totalFrees += nf + a.totalAllocated += na * uint64(class_to_size[i]) + a.totalFreed += nf * uint64(class_to_size[i]) + } + a.inObjects = a.totalAllocated - a.totalFreed + a.numObjects = a.totalAllocs - a.totalFrees +} + +// sysStatsAggregate represents system memory stats obtained +// from the runtime. This set of stats is grouped together because +// they're all relatively cheap to acquire and generally independent +// of one another and other runtime memory stats. The fact that they +// may be acquired at different times, especially with respect to +// heapStatsAggregate, means there could be some skew, but because of +// these stats are independent, there's no real consistency issue here. +type sysStatsAggregate struct { + stacksSys uint64 + mSpanSys uint64 + mSpanInUse uint64 + mCacheSys uint64 + mCacheInUse uint64 + buckHashSys uint64 + gcMiscSys uint64 + otherSys uint64 + heapGoal uint64 + gcCyclesDone uint64 + gcCyclesForced uint64 +} + +// compute populates the sysStatsAggregate with values from the runtime. +func (a *sysStatsAggregate) compute() { + a.stacksSys = memstats.stacks_sys.load() + a.buckHashSys = memstats.buckhash_sys.load() + a.gcMiscSys = memstats.gcMiscSys.load() + a.otherSys = memstats.other_sys.load() + a.heapGoal = gcController.heapGoal() + a.gcCyclesDone = uint64(memstats.numgc) + a.gcCyclesForced = uint64(memstats.numforcedgc) + + systemstack(func() { + lock(&mheap_.lock) + a.mSpanSys = memstats.mspan_sys.load() + a.mSpanInUse = uint64(mheap_.spanalloc.inuse) + a.mCacheSys = memstats.mcache_sys.load() + a.mCacheInUse = uint64(mheap_.cachealloc.inuse) + unlock(&mheap_.lock) + }) +} + +// cpuStatsAggregate represents CPU stats obtained from the runtime +// acquired together to avoid skew and inconsistencies. +type cpuStatsAggregate struct { + cpuStats +} + +// compute populates the cpuStatsAggregate with values from the runtime. +func (a *cpuStatsAggregate) compute() { + a.cpuStats = work.cpuStats + // TODO(mknyszek): Update the CPU stats again so that we're not + // just relying on the STW snapshot. The issue here is that currently + // this will cause non-monotonicity in the "user" CPU time metric. + // + // a.cpuStats.accumulate(nanotime(), gcphase == _GCmark) +} + +// gcStatsAggregate represents various GC stats obtained from the runtime +// acquired together to avoid skew and inconsistencies. +type gcStatsAggregate struct { + heapScan uint64 + stackScan uint64 + globalsScan uint64 + totalScan uint64 +} + +// compute populates the gcStatsAggregate with values from the runtime. +func (a *gcStatsAggregate) compute() { + a.heapScan = gcController.heapScan.Load() + a.stackScan = gcController.lastStackScan.Load() + a.globalsScan = gcController.globalsScan.Load() + a.totalScan = a.heapScan + a.stackScan + a.globalsScan +} + +// nsToSec takes a duration in nanoseconds and converts it to seconds as +// a float64. +func nsToSec(ns int64) float64 { + return float64(ns) / 1e9 +} + +// statAggregate is the main driver of the metrics implementation. +// +// It contains multiple aggregates of runtime statistics, as well +// as a set of these aggregates that it has populated. The aggregates +// are populated lazily by its ensure method. +type statAggregate struct { + ensured statDepSet + heapStats heapStatsAggregate + sysStats sysStatsAggregate + cpuStats cpuStatsAggregate + gcStats gcStatsAggregate +} + +// ensure populates statistics aggregates determined by deps if they +// haven't yet been populated. +func (a *statAggregate) ensure(deps *statDepSet) { + missing := deps.difference(a.ensured) + if missing.empty() { + return + } + for i := statDep(0); i < numStatsDeps; i++ { + if !missing.has(i) { + continue + } + switch i { + case heapStatsDep: + a.heapStats.compute() + case sysStatsDep: + a.sysStats.compute() + case cpuStatsDep: + a.cpuStats.compute() + case gcStatsDep: + a.gcStats.compute() + } + } + a.ensured = a.ensured.union(missing) +} + +// metricKind is a runtime copy of runtime/metrics.ValueKind and +// must be kept structurally identical to that type. +type metricKind int + +const ( + // These values must be kept identical to their corresponding Kind* values + // in the runtime/metrics package. + metricKindBad metricKind = iota + metricKindUint64 + metricKindFloat64 + metricKindFloat64Histogram +) + +// metricSample is a runtime copy of runtime/metrics.Sample and +// must be kept structurally identical to that type. +type metricSample struct { + name string + value metricValue +} + +// metricValue is a runtime copy of runtime/metrics.Sample and +// must be kept structurally identical to that type. +type metricValue struct { + kind metricKind + scalar uint64 // contains scalar values for scalar Kinds. + pointer unsafe.Pointer // contains non-scalar values. +} + +// float64HistOrInit tries to pull out an existing float64Histogram +// from the value, but if none exists, then it allocates one with +// the given buckets. +func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram { + var hist *metricFloat64Histogram + if v.kind == metricKindFloat64Histogram && v.pointer != nil { + hist = (*metricFloat64Histogram)(v.pointer) + } else { + v.kind = metricKindFloat64Histogram + hist = new(metricFloat64Histogram) + v.pointer = unsafe.Pointer(hist) + } + hist.buckets = buckets + if len(hist.counts) != len(hist.buckets)-1 { + hist.counts = make([]uint64, len(buckets)-1) + } + return hist +} + +// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram +// and must be kept structurally identical to that type. +type metricFloat64Histogram struct { + counts []uint64 + buckets []float64 +} + +// agg is used by readMetrics, and is protected by metricsSema. +// +// Managed as a global variable because its pointer will be +// an argument to a dynamically-defined function, and we'd +// like to avoid it escaping to the heap. +var agg statAggregate + +type metricName struct { + name string + kind metricKind +} + +// readMetricNames is the implementation of runtime/metrics.readMetricNames, +// used by the runtime/metrics test and otherwise unreferenced. +// +//go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames +func readMetricNames() []string { + metricsLock() + initMetrics() + n := len(metrics) + metricsUnlock() + + list := make([]string, 0, n) + + metricsLock() + for name := range metrics { + list = append(list, name) + } + metricsUnlock() + + return list +} + +// readMetrics is the implementation of runtime/metrics.Read. +// +//go:linkname readMetrics runtime/metrics.runtime_readMetrics +func readMetrics(samplesp unsafe.Pointer, len int, cap int) { + metricsLock() + + // Ensure the map is initialized. + initMetrics() + + // Read the metrics. + readMetricsLocked(samplesp, len, cap) + metricsUnlock() +} + +// readMetricsLocked is the internal, locked portion of readMetrics. +// +// Broken out for more robust testing. metricsLock must be held and +// initMetrics must have been called already. +func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) { + // Construct a slice from the args. + sl := slice{samplesp, len, cap} + samples := *(*[]metricSample)(unsafe.Pointer(&sl)) + + // Clear agg defensively. + agg = statAggregate{} + + // Sample. + for i := range samples { + sample := &samples[i] + data, ok := metrics[sample.name] + if !ok { + sample.value.kind = metricKindBad + continue + } + // Ensure we have all the stats we need. + // agg is populated lazily. + agg.ensure(&data.deps) + + // Compute the value based on the stats we have. + data.compute(&agg, &sample.value) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/metrics_test.go b/platform/dbops/binaries/go/go/src/runtime/metrics_test.go new file mode 100644 index 0000000000000000000000000000000000000000..586610727563c2bb75b1fecda194f59081e88fa4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/metrics_test.go @@ -0,0 +1,1327 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "fmt" + "internal/goexperiment" + "internal/profile" + "internal/testenv" + "os" + "reflect" + "runtime" + "runtime/debug" + "runtime/metrics" + "runtime/pprof" + "runtime/trace" + "slices" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +func prepareAllMetricsSamples() (map[string]metrics.Description, []metrics.Sample) { + all := metrics.All() + samples := make([]metrics.Sample, len(all)) + descs := make(map[string]metrics.Description) + for i := range all { + samples[i].Name = all[i].Name + descs[all[i].Name] = all[i] + } + return descs, samples +} + +func TestReadMetrics(t *testing.T) { + // Run a GC cycle to get some of the stats to be non-zero. + runtime.GC() + + // Set an arbitrary memory limit to check the metric for it + limit := int64(512 * 1024 * 1024) + oldLimit := debug.SetMemoryLimit(limit) + defer debug.SetMemoryLimit(oldLimit) + + // Set a GC percent to check the metric for it + gcPercent := 99 + oldGCPercent := debug.SetGCPercent(gcPercent) + defer debug.SetGCPercent(oldGCPercent) + + // Tests whether readMetrics produces values aligning + // with ReadMemStats while the world is stopped. + var mstats runtime.MemStats + _, samples := prepareAllMetricsSamples() + runtime.ReadMetricsSlow(&mstats, unsafe.Pointer(&samples[0]), len(samples), cap(samples)) + + checkUint64 := func(t *testing.T, m string, got, want uint64) { + t.Helper() + if got != want { + t.Errorf("metric %q: got %d, want %d", m, got, want) + } + } + + // Check to make sure the values we read line up with other values we read. + var allocsBySize, gcPauses, schedPausesTotalGC *metrics.Float64Histogram + var tinyAllocs uint64 + var mallocs, frees uint64 + for i := range samples { + switch name := samples[i].Name; name { + case "/cgo/go-to-c-calls:calls": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(runtime.NumCgoCall())) + case "/memory/classes/heap/free:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapIdle-mstats.HeapReleased) + case "/memory/classes/heap/released:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapReleased) + case "/memory/classes/heap/objects:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapAlloc) + case "/memory/classes/heap/unused:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapInuse-mstats.HeapAlloc) + case "/memory/classes/heap/stacks:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.StackInuse) + case "/memory/classes/metadata/mcache/free:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.MCacheSys-mstats.MCacheInuse) + case "/memory/classes/metadata/mcache/inuse:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.MCacheInuse) + case "/memory/classes/metadata/mspan/free:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanSys-mstats.MSpanInuse) + case "/memory/classes/metadata/mspan/inuse:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanInuse) + case "/memory/classes/metadata/other:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.GCSys) + case "/memory/classes/os-stacks:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.StackSys-mstats.StackInuse) + case "/memory/classes/other:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.OtherSys) + case "/memory/classes/profiling/buckets:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.BuckHashSys) + case "/memory/classes/total:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.Sys) + case "/gc/heap/allocs-by-size:bytes": + hist := samples[i].Value.Float64Histogram() + // Skip size class 0 in BySize, because it's always empty and not represented + // in the histogram. + for i, sc := range mstats.BySize[1:] { + if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s { + t.Errorf("bucket does not match size class: got %f, want %f", b, s) + // The rest of the checks aren't expected to work anyway. + continue + } + if c, m := hist.Counts[i], sc.Mallocs; c != m { + t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m) + } + } + allocsBySize = hist + case "/gc/heap/allocs:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.TotalAlloc) + case "/gc/heap/frees-by-size:bytes": + hist := samples[i].Value.Float64Histogram() + // Skip size class 0 in BySize, because it's always empty and not represented + // in the histogram. + for i, sc := range mstats.BySize[1:] { + if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s { + t.Errorf("bucket does not match size class: got %f, want %f", b, s) + // The rest of the checks aren't expected to work anyway. + continue + } + if c, f := hist.Counts[i], sc.Frees; c != f { + t.Errorf("histogram counts do not match BySize for class %d: got %d, want %d", i, c, f) + } + } + case "/gc/heap/frees:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.TotalAlloc-mstats.HeapAlloc) + case "/gc/heap/tiny/allocs:objects": + // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. + // The reason for this is because MemStats couldn't be extended at the time + // but there was a desire to have Mallocs at least be a little more representative, + // while having Mallocs - Frees still represent a live object count. + // Unfortunately, MemStats doesn't actually export a large allocation count, + // so it's impossible to pull this number out directly. + // + // Check tiny allocation count outside of this loop, by using the allocs-by-size + // histogram in order to figure out how many large objects there are. + tinyAllocs = samples[i].Value.Uint64() + // Because the next two metrics tests are checking against Mallocs and Frees, + // we can't check them directly for the same reason: we need to account for tiny + // allocations included in Mallocs and Frees. + case "/gc/heap/allocs:objects": + mallocs = samples[i].Value.Uint64() + case "/gc/heap/frees:objects": + frees = samples[i].Value.Uint64() + case "/gc/heap/live:bytes": + // Check for "obviously wrong" values. We can't check a stronger invariant, + // such as live <= HeapAlloc, because live is not 100% accurate. It's computed + // under racy conditions, and some objects may be double-counted (this is + // intentional and necessary for GC performance). + // + // Instead, check against a much more reasonable upper-bound: the amount of + // mapped heap memory. We can't possibly overcount to the point of exceeding + // total mapped heap memory, except if there's an accounting bug. + if live := samples[i].Value.Uint64(); live > mstats.HeapSys { + t.Errorf("live bytes: %d > heap sys: %d", live, mstats.HeapSys) + } else if live == 0 { + // Might happen if we don't call runtime.GC() above. + t.Error("live bytes is 0") + } + case "/gc/gomemlimit:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(limit)) + case "/gc/heap/objects:objects": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects) + case "/gc/heap/goal:bytes": + checkUint64(t, name, samples[i].Value.Uint64(), mstats.NextGC) + case "/gc/gogc:percent": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(gcPercent)) + case "/gc/cycles/automatic:gc-cycles": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC-mstats.NumForcedGC)) + case "/gc/cycles/forced:gc-cycles": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumForcedGC)) + case "/gc/cycles/total:gc-cycles": + checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC)) + case "/gc/pauses:seconds": + gcPauses = samples[i].Value.Float64Histogram() + case "/sched/pauses/total/gc:seconds": + schedPausesTotalGC = samples[i].Value.Float64Histogram() + } + } + + // Check tinyAllocs. + nonTinyAllocs := uint64(0) + for _, c := range allocsBySize.Counts { + nonTinyAllocs += c + } + checkUint64(t, "/gc/heap/tiny/allocs:objects", tinyAllocs, mstats.Mallocs-nonTinyAllocs) + + // Check allocation and free counts. + checkUint64(t, "/gc/heap/allocs:objects", mallocs, mstats.Mallocs-tinyAllocs) + checkUint64(t, "/gc/heap/frees:objects", frees, mstats.Frees-tinyAllocs) + + // Verify that /gc/pauses:seconds is a copy of /sched/pauses/total/gc:seconds + if !reflect.DeepEqual(gcPauses.Buckets, schedPausesTotalGC.Buckets) { + t.Errorf("/gc/pauses:seconds buckets %v do not match /sched/pauses/total/gc:seconds buckets %v", gcPauses.Buckets, schedPausesTotalGC.Counts) + } + if !reflect.DeepEqual(gcPauses.Counts, schedPausesTotalGC.Counts) { + t.Errorf("/gc/pauses:seconds counts %v do not match /sched/pauses/total/gc:seconds counts %v", gcPauses.Counts, schedPausesTotalGC.Counts) + } +} + +func TestReadMetricsConsistency(t *testing.T) { + // Tests whether readMetrics produces consistent, sensible values. + // The values are read concurrently with the runtime doing other + // things (e.g. allocating) so what we read can't reasonably compared + // to other runtime values (e.g. MemStats). + + // Run a few GC cycles to get some of the stats to be non-zero. + runtime.GC() + runtime.GC() + runtime.GC() + + // Set GOMAXPROCS high then sleep briefly to ensure we generate + // some idle time. + oldmaxprocs := runtime.GOMAXPROCS(10) + time.Sleep(time.Millisecond) + runtime.GOMAXPROCS(oldmaxprocs) + + // Read all the supported metrics through the metrics package. + descs, samples := prepareAllMetricsSamples() + metrics.Read(samples) + + // Check to make sure the values we read make sense. + var totalVirtual struct { + got, want uint64 + } + var objects struct { + alloc, free *metrics.Float64Histogram + allocs, frees uint64 + allocdBytes, freedBytes uint64 + total, totalBytes uint64 + } + var gc struct { + numGC uint64 + pauses uint64 + } + var totalScan struct { + got, want uint64 + } + var cpu struct { + gcAssist float64 + gcDedicated float64 + gcIdle float64 + gcPause float64 + gcTotal float64 + + idle float64 + user float64 + + scavengeAssist float64 + scavengeBg float64 + scavengeTotal float64 + + total float64 + } + for i := range samples { + kind := samples[i].Value.Kind() + if want := descs[samples[i].Name].Kind; kind != want { + t.Errorf("supported metric %q has unexpected kind: got %d, want %d", samples[i].Name, kind, want) + continue + } + if samples[i].Name != "/memory/classes/total:bytes" && strings.HasPrefix(samples[i].Name, "/memory/classes") { + v := samples[i].Value.Uint64() + totalVirtual.want += v + + // None of these stats should ever get this big. + // If they do, there's probably overflow involved, + // usually due to bad accounting. + if int64(v) < 0 { + t.Errorf("%q has high/negative value: %d", samples[i].Name, v) + } + } + switch samples[i].Name { + case "/cpu/classes/gc/mark/assist:cpu-seconds": + cpu.gcAssist = samples[i].Value.Float64() + case "/cpu/classes/gc/mark/dedicated:cpu-seconds": + cpu.gcDedicated = samples[i].Value.Float64() + case "/cpu/classes/gc/mark/idle:cpu-seconds": + cpu.gcIdle = samples[i].Value.Float64() + case "/cpu/classes/gc/pause:cpu-seconds": + cpu.gcPause = samples[i].Value.Float64() + case "/cpu/classes/gc/total:cpu-seconds": + cpu.gcTotal = samples[i].Value.Float64() + case "/cpu/classes/idle:cpu-seconds": + cpu.idle = samples[i].Value.Float64() + case "/cpu/classes/scavenge/assist:cpu-seconds": + cpu.scavengeAssist = samples[i].Value.Float64() + case "/cpu/classes/scavenge/background:cpu-seconds": + cpu.scavengeBg = samples[i].Value.Float64() + case "/cpu/classes/scavenge/total:cpu-seconds": + cpu.scavengeTotal = samples[i].Value.Float64() + case "/cpu/classes/total:cpu-seconds": + cpu.total = samples[i].Value.Float64() + case "/cpu/classes/user:cpu-seconds": + cpu.user = samples[i].Value.Float64() + case "/memory/classes/total:bytes": + totalVirtual.got = samples[i].Value.Uint64() + case "/memory/classes/heap/objects:bytes": + objects.totalBytes = samples[i].Value.Uint64() + case "/gc/heap/objects:objects": + objects.total = samples[i].Value.Uint64() + case "/gc/heap/allocs:bytes": + objects.allocdBytes = samples[i].Value.Uint64() + case "/gc/heap/allocs:objects": + objects.allocs = samples[i].Value.Uint64() + case "/gc/heap/allocs-by-size:bytes": + objects.alloc = samples[i].Value.Float64Histogram() + case "/gc/heap/frees:bytes": + objects.freedBytes = samples[i].Value.Uint64() + case "/gc/heap/frees:objects": + objects.frees = samples[i].Value.Uint64() + case "/gc/heap/frees-by-size:bytes": + objects.free = samples[i].Value.Float64Histogram() + case "/gc/cycles:gc-cycles": + gc.numGC = samples[i].Value.Uint64() + case "/gc/pauses:seconds": + h := samples[i].Value.Float64Histogram() + gc.pauses = 0 + for i := range h.Counts { + gc.pauses += h.Counts[i] + } + case "/gc/scan/heap:bytes": + totalScan.want += samples[i].Value.Uint64() + case "/gc/scan/globals:bytes": + totalScan.want += samples[i].Value.Uint64() + case "/gc/scan/stack:bytes": + totalScan.want += samples[i].Value.Uint64() + case "/gc/scan/total:bytes": + totalScan.got = samples[i].Value.Uint64() + case "/sched/gomaxprocs:threads": + if got, want := samples[i].Value.Uint64(), uint64(runtime.GOMAXPROCS(-1)); got != want { + t.Errorf("gomaxprocs doesn't match runtime.GOMAXPROCS: got %d, want %d", got, want) + } + case "/sched/goroutines:goroutines": + if samples[i].Value.Uint64() < 1 { + t.Error("number of goroutines is less than one") + } + } + } + // Only check this on Linux where we can be reasonably sure we have a high-resolution timer. + if runtime.GOOS == "linux" { + if cpu.gcDedicated <= 0 && cpu.gcAssist <= 0 && cpu.gcIdle <= 0 { + t.Errorf("found no time spent on GC work: %#v", cpu) + } + if cpu.gcPause <= 0 { + t.Errorf("found no GC pauses: %f", cpu.gcPause) + } + if cpu.idle <= 0 { + t.Errorf("found no idle time: %f", cpu.idle) + } + if total := cpu.gcDedicated + cpu.gcAssist + cpu.gcIdle + cpu.gcPause; !withinEpsilon(cpu.gcTotal, total, 0.01) { + t.Errorf("calculated total GC CPU not within 1%% of sampled total: %f vs. %f", total, cpu.gcTotal) + } + if total := cpu.scavengeAssist + cpu.scavengeBg; !withinEpsilon(cpu.scavengeTotal, total, 0.01) { + t.Errorf("calculated total scavenge CPU not within 1%% of sampled total: %f vs. %f", total, cpu.scavengeTotal) + } + if cpu.total <= 0 { + t.Errorf("found no total CPU time passed") + } + if cpu.user <= 0 { + t.Errorf("found no user time passed") + } + if total := cpu.gcTotal + cpu.scavengeTotal + cpu.user + cpu.idle; !withinEpsilon(cpu.total, total, 0.02) { + t.Errorf("calculated total CPU not within 2%% of sampled total: %f vs. %f", total, cpu.total) + } + } + if totalVirtual.got != totalVirtual.want { + t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want) + } + if got, want := objects.allocs-objects.frees, objects.total; got != want { + t.Errorf("mismatch between object alloc/free tallies and total: got %d, want %d", got, want) + } + if got, want := objects.allocdBytes-objects.freedBytes, objects.totalBytes; got != want { + t.Errorf("mismatch between object alloc/free tallies and total: got %d, want %d", got, want) + } + if b, c := len(objects.alloc.Buckets), len(objects.alloc.Counts); b != c+1 { + t.Errorf("allocs-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c) + } + if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 { + t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c) + } + if len(objects.alloc.Buckets) != len(objects.free.Buckets) { + t.Error("allocs-by-size and frees-by-size buckets don't match in length") + } else if len(objects.alloc.Counts) != len(objects.free.Counts) { + t.Error("allocs-by-size and frees-by-size counts don't match in length") + } else { + for i := range objects.alloc.Buckets { + ba := objects.alloc.Buckets[i] + bf := objects.free.Buckets[i] + if ba != bf { + t.Errorf("bucket %d is different for alloc and free hists: %f != %f", i, ba, bf) + } + } + if !t.Failed() { + var gotAlloc, gotFree uint64 + want := objects.total + for i := range objects.alloc.Counts { + if objects.alloc.Counts[i] < objects.free.Counts[i] { + t.Errorf("found more allocs than frees in object dist bucket %d", i) + continue + } + gotAlloc += objects.alloc.Counts[i] + gotFree += objects.free.Counts[i] + } + if got := gotAlloc - gotFree; got != want { + t.Errorf("object distribution counts don't match count of live objects: got %d, want %d", got, want) + } + if gotAlloc != objects.allocs { + t.Errorf("object distribution counts don't match total allocs: got %d, want %d", gotAlloc, objects.allocs) + } + if gotFree != objects.frees { + t.Errorf("object distribution counts don't match total allocs: got %d, want %d", gotFree, objects.frees) + } + } + } + // The current GC has at least 2 pauses per GC. + // Check to see if that value makes sense. + if gc.pauses < gc.numGC*2 { + t.Errorf("fewer pauses than expected: got %d, want at least %d", gc.pauses, gc.numGC*2) + } + if totalScan.got <= 0 { + t.Errorf("scannable GC space is empty: %d", totalScan.got) + } + if totalScan.got != totalScan.want { + t.Errorf("/gc/scan/total:bytes doesn't line up with sum of /gc/scan*: total %d vs. sum %d", totalScan.got, totalScan.want) + } +} + +func BenchmarkReadMetricsLatency(b *testing.B) { + stop := applyGCLoad(b) + + // Spend this much time measuring latencies. + latencies := make([]time.Duration, 0, 1024) + _, samples := prepareAllMetricsSamples() + + // Hit metrics.Read continuously and measure. + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := time.Now() + metrics.Read(samples) + latencies = append(latencies, time.Since(start)) + } + // Make sure to stop the timer before we wait! The load created above + // is very heavy-weight and not easy to stop, so we could end up + // confusing the benchmarking framework for small b.N. + b.StopTimer() + stop() + + // Disable the default */op metrics. + // ns/op doesn't mean anything because it's an average, but we + // have a sleep in our b.N loop above which skews this significantly. + b.ReportMetric(0, "ns/op") + b.ReportMetric(0, "B/op") + b.ReportMetric(0, "allocs/op") + + // Sort latencies then report percentiles. + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns") + b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns") + b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") +} + +var readMetricsSink [1024]interface{} + +func TestReadMetricsCumulative(t *testing.T) { + // Set up the set of metrics marked cumulative. + descs := metrics.All() + var samples [2][]metrics.Sample + samples[0] = make([]metrics.Sample, len(descs)) + samples[1] = make([]metrics.Sample, len(descs)) + total := 0 + for i := range samples[0] { + if !descs[i].Cumulative { + continue + } + samples[0][total].Name = descs[i].Name + total++ + } + samples[0] = samples[0][:total] + samples[1] = samples[1][:total] + copy(samples[1], samples[0]) + + // Start some noise in the background. + var wg sync.WaitGroup + wg.Add(1) + done := make(chan struct{}) + go func() { + defer wg.Done() + for { + // Add more things here that could influence metrics. + for i := 0; i < len(readMetricsSink); i++ { + readMetricsSink[i] = make([]byte, 1024) + select { + case <-done: + return + default: + } + } + runtime.GC() + } + }() + + sum := func(us []uint64) uint64 { + total := uint64(0) + for _, u := range us { + total += u + } + return total + } + + // Populate the first generation. + metrics.Read(samples[0]) + + // Check to make sure that these metrics only grow monotonically. + for gen := 1; gen < 10; gen++ { + metrics.Read(samples[gen%2]) + for i := range samples[gen%2] { + name := samples[gen%2][i].Name + vNew, vOld := samples[gen%2][i].Value, samples[1-(gen%2)][i].Value + + switch vNew.Kind() { + case metrics.KindUint64: + new := vNew.Uint64() + old := vOld.Uint64() + if new < old { + t.Errorf("%s decreased: %d < %d", name, new, old) + } + case metrics.KindFloat64: + new := vNew.Float64() + old := vOld.Float64() + if new < old { + t.Errorf("%s decreased: %f < %f", name, new, old) + } + case metrics.KindFloat64Histogram: + new := sum(vNew.Float64Histogram().Counts) + old := sum(vOld.Float64Histogram().Counts) + if new < old { + t.Errorf("%s counts decreased: %d < %d", name, new, old) + } + } + } + } + close(done) + + wg.Wait() +} + +func withinEpsilon(v1, v2, e float64) bool { + return v2-v2*e <= v1 && v1 <= v2+v2*e +} + +func TestMutexWaitTimeMetric(t *testing.T) { + var sample [1]metrics.Sample + sample[0].Name = "/sync/mutex/wait/total:seconds" + + locks := []locker2{ + new(mutex), + new(rwmutexWrite), + new(rwmutexReadWrite), + new(rwmutexWriteRead), + } + for _, lock := range locks { + t.Run(reflect.TypeOf(lock).Elem().Name(), func(t *testing.T) { + metrics.Read(sample[:]) + before := time.Duration(sample[0].Value.Float64() * 1e9) + + minMutexWaitTime := generateMutexWaitTime(lock) + + metrics.Read(sample[:]) + after := time.Duration(sample[0].Value.Float64() * 1e9) + + if wt := after - before; wt < minMutexWaitTime { + t.Errorf("too little mutex wait time: got %s, want %s", wt, minMutexWaitTime) + } + }) + } +} + +// locker2 represents an API surface of two concurrent goroutines +// locking the same resource, but through different APIs. It's intended +// to abstract over the relationship of two Lock calls or an RLock +// and a Lock call. +type locker2 interface { + Lock1() + Unlock1() + Lock2() + Unlock2() +} + +type mutex struct { + mu sync.Mutex +} + +func (m *mutex) Lock1() { m.mu.Lock() } +func (m *mutex) Unlock1() { m.mu.Unlock() } +func (m *mutex) Lock2() { m.mu.Lock() } +func (m *mutex) Unlock2() { m.mu.Unlock() } + +type rwmutexWrite struct { + mu sync.RWMutex +} + +func (m *rwmutexWrite) Lock1() { m.mu.Lock() } +func (m *rwmutexWrite) Unlock1() { m.mu.Unlock() } +func (m *rwmutexWrite) Lock2() { m.mu.Lock() } +func (m *rwmutexWrite) Unlock2() { m.mu.Unlock() } + +type rwmutexReadWrite struct { + mu sync.RWMutex +} + +func (m *rwmutexReadWrite) Lock1() { m.mu.RLock() } +func (m *rwmutexReadWrite) Unlock1() { m.mu.RUnlock() } +func (m *rwmutexReadWrite) Lock2() { m.mu.Lock() } +func (m *rwmutexReadWrite) Unlock2() { m.mu.Unlock() } + +type rwmutexWriteRead struct { + mu sync.RWMutex +} + +func (m *rwmutexWriteRead) Lock1() { m.mu.Lock() } +func (m *rwmutexWriteRead) Unlock1() { m.mu.Unlock() } +func (m *rwmutexWriteRead) Lock2() { m.mu.RLock() } +func (m *rwmutexWriteRead) Unlock2() { m.mu.RUnlock() } + +// generateMutexWaitTime causes a couple of goroutines +// to block a whole bunch of times on a sync.Mutex, returning +// the minimum amount of time that should be visible in the +// /sync/mutex-wait:seconds metric. +func generateMutexWaitTime(mu locker2) time.Duration { + // Set up the runtime to always track casgstatus transitions for metrics. + *runtime.CasGStatusAlwaysTrack = true + + mu.Lock1() + + // Start up a goroutine to wait on the lock. + gc := make(chan *runtime.G) + done := make(chan bool) + go func() { + gc <- runtime.Getg() + + for { + mu.Lock2() + mu.Unlock2() + if <-done { + return + } + } + }() + gp := <-gc + + // Set the block time high enough so that it will always show up, even + // on systems with coarse timer granularity. + const blockTime = 100 * time.Millisecond + + // Make sure the goroutine spawned above actually blocks on the lock. + for { + if runtime.GIsWaitingOnMutex(gp) { + break + } + runtime.Gosched() + } + + // Let some amount of time pass. + time.Sleep(blockTime) + + // Let the other goroutine acquire the lock. + mu.Unlock1() + done <- true + + // Reset flag. + *runtime.CasGStatusAlwaysTrack = false + return blockTime +} + +// See issue #60276. +func TestCPUMetricsSleep(t *testing.T) { + if runtime.GOOS == "wasip1" { + // Since wasip1 busy-waits in the scheduler, there's no meaningful idle + // time. This is accurately reflected in the metrics, but it means this + // test is basically meaningless on this platform. + t.Skip("wasip1 currently busy-waits in idle time; test not applicable") + } + + names := []string{ + "/cpu/classes/idle:cpu-seconds", + + "/cpu/classes/gc/mark/assist:cpu-seconds", + "/cpu/classes/gc/mark/dedicated:cpu-seconds", + "/cpu/classes/gc/mark/idle:cpu-seconds", + "/cpu/classes/gc/pause:cpu-seconds", + "/cpu/classes/gc/total:cpu-seconds", + "/cpu/classes/scavenge/assist:cpu-seconds", + "/cpu/classes/scavenge/background:cpu-seconds", + "/cpu/classes/scavenge/total:cpu-seconds", + "/cpu/classes/total:cpu-seconds", + "/cpu/classes/user:cpu-seconds", + } + prep := func() []metrics.Sample { + mm := make([]metrics.Sample, len(names)) + for i := range names { + mm[i].Name = names[i] + } + return mm + } + m1, m2 := prep(), prep() + + const ( + // Expected time spent idle. + dur = 100 * time.Millisecond + + // maxFailures is the number of consecutive failures requires to cause the test to fail. + maxFailures = 10 + ) + + failureIdleTimes := make([]float64, 0, maxFailures) + + // If the bug we expect is happening, then the Sleep CPU time will be accounted for + // as user time rather than idle time. In an ideal world we'd expect the whole application + // to go instantly idle the moment this goroutine goes to sleep, and stay asleep for that + // duration. However, the Go runtime can easily eat into idle time while this goroutine is + // blocked in a sleep. For example, slow platforms might spend more time expected in the + // scheduler. Another example is that a Go runtime background goroutine could run while + // everything else is idle. Lastly, if a running goroutine is descheduled by the OS, enough + // time may pass such that the goroutine is ready to wake, even though the runtime couldn't + // observe itself as idle with nanotime. + // + // To deal with all this, we give a half-proc's worth of leniency. + // + // We also retry multiple times to deal with the fact that the OS might deschedule us before + // we yield and go idle. That has a rare enough chance that retries should resolve it. + // If the issue we expect is happening, it should be persistent. + minIdleCPUSeconds := dur.Seconds() * (float64(runtime.GOMAXPROCS(-1)) - 0.5) + + // Let's make sure there's no background scavenge work to do. + // + // The runtime.GC calls below ensure the background sweeper + // will not run during the idle period. + debug.FreeOSMemory() + + for retries := 0; retries < maxFailures; retries++ { + // Read 1. + runtime.GC() // Update /cpu/classes metrics. + metrics.Read(m1) + + // Sleep. + time.Sleep(dur) + + // Read 2. + runtime.GC() // Update /cpu/classes metrics. + metrics.Read(m2) + + dt := m2[0].Value.Float64() - m1[0].Value.Float64() + if dt >= minIdleCPUSeconds { + // All is well. Test passed. + return + } + failureIdleTimes = append(failureIdleTimes, dt) + // Try again. + } + + // We couldn't observe the expected idle time even once. + for i, dt := range failureIdleTimes { + t.Logf("try %2d: idle time = %.5fs\n", i+1, dt) + } + t.Logf("try %d breakdown:\n", len(failureIdleTimes)) + for i := range names { + if m1[i].Value.Kind() == metrics.KindBad { + continue + } + t.Logf("\t%s %0.3f\n", names[i], m2[i].Value.Float64()-m1[i].Value.Float64()) + } + t.Errorf(`time.Sleep did not contribute enough to "idle" class: minimum idle time = %.5fs`, minIdleCPUSeconds) +} + +// Call f() and verify that the correct STW metrics increment. If isGC is true, +// fn triggers a GC STW. Otherwise, fn triggers an other STW. +func testSchedPauseMetrics(t *testing.T, fn func(t *testing.T), isGC bool) { + m := []metrics.Sample{ + {Name: "/sched/pauses/stopping/gc:seconds"}, + {Name: "/sched/pauses/stopping/other:seconds"}, + {Name: "/sched/pauses/total/gc:seconds"}, + {Name: "/sched/pauses/total/other:seconds"}, + } + + stoppingGC := &m[0] + stoppingOther := &m[1] + totalGC := &m[2] + totalOther := &m[3] + + sampleCount := func(s *metrics.Sample) uint64 { + h := s.Value.Float64Histogram() + + var n uint64 + for _, c := range h.Counts { + n += c + } + return n + } + + // Read baseline. + metrics.Read(m) + + baselineStartGC := sampleCount(stoppingGC) + baselineStartOther := sampleCount(stoppingOther) + baselineTotalGC := sampleCount(totalGC) + baselineTotalOther := sampleCount(totalOther) + + fn(t) + + metrics.Read(m) + + if isGC { + if got := sampleCount(stoppingGC); got <= baselineStartGC { + t.Errorf("/sched/pauses/stopping/gc:seconds sample count %d did not increase from baseline of %d", got, baselineStartGC) + } + if got := sampleCount(totalGC); got <= baselineTotalGC { + t.Errorf("/sched/pauses/total/gc:seconds sample count %d did not increase from baseline of %d", got, baselineTotalGC) + } + + if got := sampleCount(stoppingOther); got != baselineStartOther { + t.Errorf("/sched/pauses/stopping/other:seconds sample count %d changed from baseline of %d", got, baselineStartOther) + } + if got := sampleCount(totalOther); got != baselineTotalOther { + t.Errorf("/sched/pauses/stopping/other:seconds sample count %d changed from baseline of %d", got, baselineTotalOther) + } + } else { + if got := sampleCount(stoppingGC); got != baselineStartGC { + t.Errorf("/sched/pauses/stopping/gc:seconds sample count %d changed from baseline of %d", got, baselineStartGC) + } + if got := sampleCount(totalGC); got != baselineTotalGC { + t.Errorf("/sched/pauses/total/gc:seconds sample count %d changed from baseline of %d", got, baselineTotalGC) + } + + if got := sampleCount(stoppingOther); got <= baselineStartOther { + t.Errorf("/sched/pauses/stopping/other:seconds sample count %d did not increase from baseline of %d", got, baselineStartOther) + } + if got := sampleCount(totalOther); got <= baselineTotalOther { + t.Errorf("/sched/pauses/stopping/other:seconds sample count %d did not increase from baseline of %d", got, baselineTotalOther) + } + } +} + +func TestSchedPauseMetrics(t *testing.T) { + tests := []struct { + name string + isGC bool + fn func(t *testing.T) + }{ + { + name: "runtime.GC", + isGC: true, + fn: func(t *testing.T) { + runtime.GC() + }, + }, + { + name: "runtime.GOMAXPROCS", + fn: func(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("GOMAXPROCS >1 not supported on wasm") + } + + n := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(n) + + runtime.GOMAXPROCS(n + 1) + }, + }, + { + name: "runtime.GoroutineProfile", + fn: func(t *testing.T) { + var s [1]runtime.StackRecord + runtime.GoroutineProfile(s[:]) + }, + }, + { + name: "runtime.ReadMemStats", + fn: func(t *testing.T) { + var mstats runtime.MemStats + runtime.ReadMemStats(&mstats) + }, + }, + { + name: "runtime.Stack", + fn: func(t *testing.T) { + var b [64]byte + runtime.Stack(b[:], true) + }, + }, + { + name: "runtime/debug.WriteHeapDump", + fn: func(t *testing.T) { + if runtime.GOOS == "js" { + t.Skip("WriteHeapDump not supported on js") + } + + f, err := os.CreateTemp(t.TempDir(), "heapdumptest") + if err != nil { + t.Fatalf("os.CreateTemp failed: %v", err) + } + defer os.Remove(f.Name()) + defer f.Close() + debug.WriteHeapDump(f.Fd()) + }, + }, + { + name: "runtime/trace.Start", + fn: func(t *testing.T) { + if trace.IsEnabled() { + t.Skip("tracing already enabled") + } + + var buf bytes.Buffer + if err := trace.Start(&buf); err != nil { + t.Errorf("trace.Start err got %v want nil", err) + } + trace.Stop() + }, + }, + } + + // These tests count STW pauses, classified based on whether they're related + // to the GC or not. Disable automatic GC cycles during the test so we don't + // have an incidental GC pause when we're trying to observe only + // non-GC-related pauses. This is especially important for the + // runtime/trace.Start test, since (as of this writing) that will block + // until any active GC mark phase completes. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + runtime.GC() + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + testSchedPauseMetrics(t, tc.fn, tc.isGC) + }) + } +} + +func TestRuntimeLockMetricsAndProfile(t *testing.T) { + testenv.SkipFlaky(t, 64253) + + old := runtime.SetMutexProfileFraction(0) // enabled during sub-tests + defer runtime.SetMutexProfileFraction(old) + if old != 0 { + t.Fatalf("need MutexProfileRate 0, got %d", old) + } + + { + before := os.Getenv("GODEBUG") + for _, s := range strings.Split(before, ",") { + if strings.HasPrefix(s, "runtimecontentionstacks=") { + t.Logf("GODEBUG includes explicit setting %q", s) + } + } + defer func() { os.Setenv("GODEBUG", before) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,runtimecontentionstacks=1", before)) + } + + t.Logf("NumCPU %d", runtime.NumCPU()) + t.Logf("GOMAXPROCS %d", runtime.GOMAXPROCS(0)) + if minCPU := 2; runtime.NumCPU() < minCPU { + t.Skipf("creating and observing contention on runtime-internal locks requires NumCPU >= %d", minCPU) + } + + loadProfile := func(t *testing.T) *profile.Profile { + var w bytes.Buffer + pprof.Lookup("mutex").WriteTo(&w, 0) + p, err := profile.Parse(&w) + if err != nil { + t.Fatalf("failed to parse profile: %v", err) + } + if err := p.CheckValid(); err != nil { + t.Fatalf("invalid profile: %v", err) + } + return p + } + + measureDelta := func(t *testing.T, fn func()) (metricGrowth, profileGrowth float64, p *profile.Profile) { + beforeProfile := loadProfile(t) + beforeMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}} + metrics.Read(beforeMetrics) + + fn() + + afterProfile := loadProfile(t) + afterMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}} + metrics.Read(afterMetrics) + + sumSamples := func(p *profile.Profile, i int) int64 { + var sum int64 + for _, s := range p.Sample { + sum += s.Value[i] + } + return sum + } + + metricGrowth = afterMetrics[0].Value.Float64() - beforeMetrics[0].Value.Float64() + profileGrowth = float64(sumSamples(afterProfile, 1)-sumSamples(beforeProfile, 1)) * time.Nanosecond.Seconds() + + // The internal/profile package does not support compaction; this delta + // profile will include separate positive and negative entries. + p = afterProfile.Copy() + if len(beforeProfile.Sample) > 0 { + err := p.Merge(beforeProfile, -1) + if err != nil { + t.Fatalf("Merge profiles: %v", err) + } + } + + return metricGrowth, profileGrowth, p + } + + testcase := func(strictTiming bool, acceptStacks [][]string, workers int, fn func() bool) func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) { + return func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) { + metricGrowth, profileGrowth, p := measureDelta(t, func() { + var started, stopped sync.WaitGroup + started.Add(workers) + stopped.Add(workers) + for i := 0; i < workers; i++ { + w := &contentionWorker{ + before: func() { + started.Done() + started.Wait() + }, + after: func() { + stopped.Done() + }, + fn: fn, + } + go w.run() + } + stopped.Wait() + }) + + if profileGrowth == 0 { + t.Errorf("no increase in mutex profile") + } + if metricGrowth == 0 && strictTiming { + // If the critical section is very short, systems with low timer + // resolution may be unable to measure it via nanotime. + t.Errorf("no increase in /sync/mutex/wait/total:seconds metric") + } + // This comparison is possible because the time measurements in support of + // runtime/pprof and runtime/metrics for runtime-internal locks are so close + // together. It doesn't work as well for user-space contention, where the + // involved goroutines are not _Grunnable the whole time and so need to pass + // through the scheduler. + t.Logf("lock contention growth in runtime/pprof's view (%fs)", profileGrowth) + t.Logf("lock contention growth in runtime/metrics' view (%fs)", metricGrowth) + + acceptStacks = append([][]string(nil), acceptStacks...) + for i, stk := range acceptStacks { + if goexperiment.StaticLockRanking { + if !slices.ContainsFunc(stk, func(s string) bool { + return s == "runtime.systemstack" || s == "runtime.mcall" || s == "runtime.mstart" + }) { + // stk is a call stack that is still on the user stack when + // it calls runtime.unlock. Add the extra function that + // we'll see, when the static lock ranking implementation of + // runtime.unlockWithRank switches to the system stack. + stk = append([]string{"runtime.unlockWithRank"}, stk...) + } + } + acceptStacks[i] = stk + } + + var stks [][]string + values := make([][2]int64, len(acceptStacks)) + for _, s := range p.Sample { + var have []string + for _, loc := range s.Location { + for _, line := range loc.Line { + have = append(have, line.Function.Name) + } + } + stks = append(stks, have) + for i, stk := range acceptStacks { + if slices.Equal(have, stk) { + values[i][0] += s.Value[0] + values[i][1] += s.Value[1] + } + } + } + for i, stk := range acceptStacks { + n += values[i][0] + value += values[i][1] + t.Logf("stack %v has samples totaling n=%d value=%d", stk, values[i][0], values[i][1]) + } + if n == 0 && value == 0 { + t.Logf("profile:\n%s", p) + for _, have := range stks { + t.Logf("have stack %v", have) + } + for _, stk := range acceptStacks { + t.Errorf("want stack %v", stk) + } + } + + return metricGrowth, profileGrowth, n, value + } + } + + name := t.Name() + + t.Run("runtime.lock", func(t *testing.T) { + mus := make([]runtime.Mutex, 100) + var needContention atomic.Int64 + delay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks + delayMicros := delay.Microseconds() + + // The goroutine that acquires the lock will only proceed when it + // detects that its partner is contended for the lock. That will lead to + // live-lock if anything (such as a STW) prevents the partner goroutine + // from running. Allowing the contention workers to pause and restart + // (to allow a STW to proceed) makes it harder to confirm that we're + // counting the correct number of contention events, since some locks + // will end up contended twice. Instead, disable the GC. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + const workers = 2 + if runtime.GOMAXPROCS(0) < workers { + t.Skipf("contention on runtime-internal locks requires GOMAXPROCS >= %d", workers) + } + + fn := func() bool { + n := int(needContention.Load()) + if n < 0 { + return false + } + mu := &mus[n] + + runtime.Lock(mu) + for int(needContention.Load()) == n { + if runtime.MutexContended(mu) { + // make them wait a little while + for start := runtime.Nanotime(); (runtime.Nanotime()-start)/1000 < delayMicros; { + runtime.Usleep(uint32(delayMicros)) + } + break + } + } + runtime.Unlock(mu) + needContention.Store(int64(n - 1)) + + return true + } + + stks := [][]string{{ + "runtime.unlock", + "runtime_test." + name + ".func5.1", + "runtime_test.(*contentionWorker).run", + }} + + t.Run("sample-1", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(1) + defer runtime.SetMutexProfileFraction(old) + + needContention.Store(int64(len(mus) - 1)) + metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t) + + if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); have < want { + // The test imposes a delay with usleep, verified with calls to + // nanotime. Compare against the runtime/metrics package's view + // (based on nanotime) rather than runtime/pprof's view (based + // on cputicks). + t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want) + } + if have, want := n, int64(len(mus)); have != want { + t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want) + } + + const slop = 1.5 // account for nanotime vs cputicks + if profileGrowth > slop*metricGrowth || metricGrowth > slop*profileGrowth { + t.Errorf("views differ by more than %fx", slop) + } + }) + + t.Run("sample-2", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(2) + defer runtime.SetMutexProfileFraction(old) + + needContention.Store(int64(len(mus) - 1)) + metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t) + + // With 100 trials and profile fraction of 2, we expect to capture + // 50 samples. Allow the test to pass if we get at least 20 samples; + // the CDF of the binomial distribution says there's less than a + // 1e-9 chance of that, which is an acceptably low flakiness rate. + const samplingSlop = 2.5 + + if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); samplingSlop*have < want { + // The test imposes a delay with usleep, verified with calls to + // nanotime. Compare against the runtime/metrics package's view + // (based on nanotime) rather than runtime/pprof's view (based + // on cputicks). + t.Errorf("runtime/metrics reported less than the known minimum contention duration (%f * %fs < %fs)", samplingSlop, have, want) + } + if have, want := n, int64(len(mus)); float64(have) > float64(want)*samplingSlop || float64(want) > float64(have)*samplingSlop { + t.Errorf("mutex profile reported contention count too different from the expected count (%d far from %d)", have, want) + } + + const timerSlop = 1.5 * samplingSlop // account for nanotime vs cputicks, plus the two views' independent sampling + if profileGrowth > timerSlop*metricGrowth || metricGrowth > timerSlop*profileGrowth { + t.Errorf("views differ by more than %fx", timerSlop) + } + }) + }) + + t.Run("runtime.semrelease", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(1) + defer runtime.SetMutexProfileFraction(old) + + const workers = 3 + if runtime.GOMAXPROCS(0) < workers { + t.Skipf("creating and observing contention on runtime-internal semaphores requires GOMAXPROCS >= %d", workers) + } + + var sem uint32 = 1 + var tries atomic.Int32 + tries.Store(10_000_000) // prefer controlled failure to timeout + var sawContention atomic.Int32 + var need int32 = 1 + fn := func() bool { + if sawContention.Load() >= need { + return false + } + if tries.Add(-1) < 0 { + return false + } + + runtime.Semacquire(&sem) + runtime.Semrelease1(&sem, false, 0) + if runtime.MutexContended(runtime.SemRootLock(&sem)) { + sawContention.Add(1) + } + return true + } + + stks := [][]string{ + { + "runtime.unlock", + "runtime.semrelease1", + "runtime_test.TestRuntimeLockMetricsAndProfile.func6.1", + "runtime_test.(*contentionWorker).run", + }, + { + "runtime.unlock", + "runtime.semacquire1", + "runtime.semacquire", + "runtime_test.TestRuntimeLockMetricsAndProfile.func6.1", + "runtime_test.(*contentionWorker).run", + }, + } + + // Verify that we get call stack we expect, with anything more than zero + // cycles / zero samples. The duration of each contention event is too + // small relative to the expected overhead for us to verify its value + // more directly. Leave that to the explicit lock/unlock test. + + testcase(false, stks, workers, fn)(t) + + if remaining := tries.Load(); remaining >= 0 { + t.Logf("finished test early (%d tries remaining)", remaining) + } + }) +} + +// contentionWorker provides cleaner call stacks for lock contention profile tests +type contentionWorker struct { + before func() + fn func() bool + after func() +} + +func (w *contentionWorker) run() { + defer w.after() + w.before() + + for w.fn() { + } +} + +func TestMetricHeapUnusedLargeObjectOverflow(t *testing.T) { + // This test makes sure /memory/classes/heap/unused:bytes + // doesn't overflow when allocating and deallocating large + // objects. It is a regression test for #67019. + done := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + for i := 0; i < 10; i++ { + runtime.Escape(make([]byte, 1<<20)) + } + runtime.GC() + select { + case <-done: + return + default: + } + } + }() + s := []metrics.Sample{ + {Name: "/memory/classes/heap/unused:bytes"}, + } + for i := 0; i < 1000; i++ { + metrics.Read(s) + if s[0].Value.Uint64() > 1<<40 { + t.Errorf("overflow") + break + } + } + done <- struct{}{} + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mfinal.go b/platform/dbops/binaries/go/go/src/runtime/mfinal.go new file mode 100644 index 0000000000000000000000000000000000000000..7d9d547c0f99d597aa73d49d5889cd2486496bd4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mfinal.go @@ -0,0 +1,552 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: finalizers and block profiling. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "internal/goexperiment" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// finblock is an array of finalizers to be executed. finblocks are +// arranged in a linked list for the finalizer queue. +// +// finblock is allocated from non-GC'd memory, so any heap pointers +// must be specially handled. GC currently assumes that the finalizer +// queue does not grow during marking (but it can shrink). +type finblock struct { + _ sys.NotInHeap + alllink *finblock + next *finblock + cnt uint32 + _ int32 + fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer +} + +var fingStatus atomic.Uint32 + +// finalizer goroutine status. +const ( + fingUninitialized uint32 = iota + fingCreated uint32 = 1 << (iota - 1) + fingRunningFinalizer + fingWait + fingWake +) + +var finlock mutex // protects the following variables +var fing *g // goroutine that runs finalizers +var finq *finblock // list of finalizers that are to be executed +var finc *finblock // cache of free blocks +var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte + +var allfin *finblock // list of all blocks + +// NOTE: Layout known to queuefinalizer. +type finalizer struct { + fn *funcval // function to call (may be a heap pointer) + arg unsafe.Pointer // ptr to object (may be a heap pointer) + nret uintptr // bytes of return values from fn + fint *_type // type of first argument of fn + ot *ptrtype // type of ptr to object (may be a heap pointer) +} + +var finalizer1 = [...]byte{ + // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here) + // Each byte describes 8 words. + // Need 8 Finalizers described by 5 bytes before pattern repeats: + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // ptr ptr INT ptr ptr + // aka + // + // ptr ptr INT ptr ptr ptr ptr INT + // ptr ptr ptr ptr INT ptr ptr ptr + // ptr INT ptr ptr ptr ptr INT ptr + // ptr ptr ptr INT ptr ptr ptr ptr + // INT ptr ptr ptr ptr INT ptr ptr + // + // Assumptions about Finalizer layout checked below. + 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7, + 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7, + 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7, + 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7, + 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7, +} + +// lockRankMayQueueFinalizer records the lock ranking effects of a +// function that may call queuefinalizer. +func lockRankMayQueueFinalizer() { + lockWithRankMayAcquire(&finlock, getLockRank(&finlock)) +} + +func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) { + if gcphase != _GCoff { + // Currently we assume that the finalizer queue won't + // grow during marking so we don't have to rescan it + // during mark termination. If we ever need to lift + // this assumption, we can do it by adding the + // necessary barriers to queuefinalizer (which it may + // have automatically). + throw("queuefinalizer during GC") + } + + lock(&finlock) + if finq == nil || finq.cnt == uint32(len(finq.fin)) { + if finc == nil { + finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys)) + finc.alllink = allfin + allfin = finc + if finptrmask[0] == 0 { + // Build pointer mask for Finalizer array in block. + // Check assumptions made in finalizer1 array above. + if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize || + unsafe.Offsetof(finalizer{}.fn) != 0 || + unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize || + unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize || + unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize || + unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) { + throw("finalizer out of sync") + } + for i := range finptrmask { + finptrmask[i] = finalizer1[i%len(finalizer1)] + } + } + } + block := finc + finc = block.next + block.next = finq + finq = block + } + f := &finq.fin[finq.cnt] + atomic.Xadd(&finq.cnt, +1) // Sync with markroots + f.fn = fn + f.nret = nret + f.fint = fint + f.ot = ot + f.arg = p + unlock(&finlock) + fingStatus.Or(fingWake) +} + +//go:nowritebarrier +func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) { + for fb := allfin; fb != nil; fb = fb.alllink { + for i := uint32(0); i < fb.cnt; i++ { + f := &fb.fin[i] + callback(f.fn, f.arg, f.nret, f.fint, f.ot) + } + } +} + +func wakefing() *g { + if ok := fingStatus.CompareAndSwap(fingCreated|fingWait|fingWake, fingCreated); ok { + return fing + } + return nil +} + +func createfing() { + // start the finalizer goroutine exactly once + if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) { + go runfinq() + } +} + +func finalizercommit(gp *g, lock unsafe.Pointer) bool { + unlock((*mutex)(lock)) + // fingStatus should be modified after fing is put into a waiting state + // to avoid waking fing in running state, even if it is about to be parked. + fingStatus.Or(fingWait) + return true +} + +// This is the goroutine that runs all of the finalizers. +func runfinq() { + var ( + frame unsafe.Pointer + framecap uintptr + argRegs int + ) + + gp := getg() + lock(&finlock) + fing = gp + unlock(&finlock) + + for { + lock(&finlock) + fb := finq + finq = nil + if fb == nil { + gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceBlockSystemGoroutine, 1) + continue + } + argRegs = intArgRegs + unlock(&finlock) + if raceenabled { + racefingo() + } + for fb != nil { + for i := fb.cnt; i > 0; i-- { + f := &fb.fin[i-1] + + var regs abi.RegArgs + // The args may be passed in registers or on stack. Even for + // the register case, we still need the spill slots. + // TODO: revisit if we remove spill slots. + // + // Unfortunately because we can have an arbitrary + // amount of returns and it would be complex to try and + // figure out how many of those can get passed in registers, + // just conservatively assume none of them do. + framesz := unsafe.Sizeof((any)(nil)) + f.nret + if framecap < framesz { + // The frame does not contain pointers interesting for GC, + // all not yet finalized objects are stored in finq. + // If we do not mark it as FlagNoScan, + // the last finalized object is not collected. + frame = mallocgc(framesz, nil, true) + framecap = framesz + } + + if f.fint == nil { + throw("missing type in runfinq") + } + r := frame + if argRegs > 0 { + r = unsafe.Pointer(®s.Ints) + } else { + // frame is effectively uninitialized + // memory. That means we have to clear + // it before writing to it to avoid + // confusing the write barrier. + *(*[2]uintptr)(frame) = [2]uintptr{} + } + switch f.fint.Kind_ & kindMask { + case kindPtr: + // direct use of pointer + *(*unsafe.Pointer)(r) = f.arg + case kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(f.fint)) + // set up with empty interface + (*eface)(r)._type = &f.ot.Type + (*eface)(r).data = f.arg + if len(ityp.Methods) != 0 { + // convert to interface with methods + // this conversion is guaranteed to succeed - we checked in SetFinalizer + (*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type) + } + default: + throw("bad kind in runfinq") + } + fingStatus.Or(fingRunningFinalizer) + reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), ®s) + fingStatus.And(^fingRunningFinalizer) + + // Drop finalizer queue heap references + // before hiding them from markroot. + // This also ensures these will be + // clear if we reuse the finalizer. + f.fn = nil + f.arg = nil + f.ot = nil + atomic.Store(&fb.cnt, i-1) + } + next := fb.next + lock(&finlock) + fb.next = finc + finc = fb + unlock(&finlock) + fb = next + } + } +} + +func isGoPointerWithoutSpan(p unsafe.Pointer) bool { + // 0-length objects are okay. + if p == unsafe.Pointer(&zerobase) { + return true + } + + // Global initializers might be linker-allocated. + // var Foo = &Object{} + // func main() { + // runtime.SetFinalizer(Foo, nil) + // } + // The relevant segments are: noptrdata, data, bss, noptrbss. + // We cannot assume they are in any order or even contiguous, + // due to external linking. + for datap := &firstmoduledata; datap != nil; datap = datap.next { + if datap.noptrdata <= uintptr(p) && uintptr(p) < datap.enoptrdata || + datap.data <= uintptr(p) && uintptr(p) < datap.edata || + datap.bss <= uintptr(p) && uintptr(p) < datap.ebss || + datap.noptrbss <= uintptr(p) && uintptr(p) < datap.enoptrbss { + return true + } + } + return false +} + +// blockUntilEmptyFinalizerQueue blocks until either the finalizer +// queue is emptied (and the finalizers have executed) or the timeout +// is reached. Returns true if the finalizer queue was emptied. +// This is used by the runtime and sync tests. +func blockUntilEmptyFinalizerQueue(timeout int64) bool { + start := nanotime() + for nanotime()-start < timeout { + lock(&finlock) + // We know the queue has been drained when both finq is nil + // and the finalizer g has stopped executing. + empty := finq == nil + empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait + unlock(&finlock) + if empty { + return true + } + Gosched() + } + return false +} + +// SetFinalizer sets the finalizer associated with obj to the provided +// finalizer function. When the garbage collector finds an unreachable block +// with an associated finalizer, it clears the association and runs +// finalizer(obj) in a separate goroutine. This makes obj reachable again, +// but now without an associated finalizer. Assuming that SetFinalizer +// is not called again, the next time the garbage collector sees +// that obj is unreachable, it will free obj. +// +// SetFinalizer(obj, nil) clears any finalizer associated with obj. +// +// The argument obj must be a pointer to an object allocated by calling +// new, by taking the address of a composite literal, or by taking the +// address of a local variable. +// The argument finalizer must be a function that takes a single argument +// to which obj's type can be assigned, and can have arbitrary ignored return +// values. If either of these is not true, SetFinalizer may abort the +// program. +// +// Finalizers are run in dependency order: if A points at B, both have +// finalizers, and they are otherwise unreachable, only the finalizer +// for A runs; once A is freed, the finalizer for B can run. +// If a cyclic structure includes a block with a finalizer, that +// cycle is not guaranteed to be garbage collected and the finalizer +// is not guaranteed to run, because there is no ordering that +// respects the dependencies. +// +// The finalizer is scheduled to run at some arbitrary time after the +// program can no longer reach the object to which obj points. +// There is no guarantee that finalizers will run before a program exits, +// so typically they are useful only for releasing non-memory resources +// associated with an object during a long-running program. +// For example, an [os.File] object could use a finalizer to close the +// associated operating system file descriptor when a program discards +// an os.File without calling Close, but it would be a mistake +// to depend on a finalizer to flush an in-memory I/O buffer such as a +// [bufio.Writer], because the buffer would not be flushed at program exit. +// +// It is not guaranteed that a finalizer will run if the size of *obj is +// zero bytes, because it may share same address with other zero-size +// objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees. +// +// It is not guaranteed that a finalizer will run for objects allocated +// in initializers for package-level variables. Such objects may be +// linker-allocated, not heap-allocated. +// +// Note that because finalizers may execute arbitrarily far into the future +// after an object is no longer referenced, the runtime is allowed to perform +// a space-saving optimization that batches objects together in a single +// allocation slot. The finalizer for an unreferenced object in such an +// allocation may never run if it always exists in the same batch as a +// referenced object. Typically, this batching only happens for tiny +// (on the order of 16 bytes or less) and pointer-free objects. +// +// A finalizer may run as soon as an object becomes unreachable. +// In order to use finalizers correctly, the program must ensure that +// the object is reachable until it is no longer required. +// Objects stored in global variables, or that can be found by tracing +// pointers from a global variable, are reachable. For other objects, +// pass the object to a call of the [KeepAlive] function to mark the +// last point in the function where the object must be reachable. +// +// For example, if p points to a struct, such as os.File, that contains +// a file descriptor d, and p has a finalizer that closes that file +// descriptor, and if the last use of p in a function is a call to +// syscall.Write(p.d, buf, size), then p may be unreachable as soon as +// the program enters [syscall.Write]. The finalizer may run at that moment, +// closing p.d, causing syscall.Write to fail because it is writing to +// a closed file descriptor (or, worse, to an entirely different +// file descriptor opened by a different goroutine). To avoid this problem, +// call KeepAlive(p) after the call to syscall.Write. +// +// A single goroutine runs all finalizers for a program, sequentially. +// If a finalizer must run for a long time, it should do so by starting +// a new goroutine. +// +// In the terminology of the Go memory model, a call +// SetFinalizer(x, f) “synchronizes before” the finalization call f(x). +// However, there is no guarantee that KeepAlive(x) or any other use of x +// “synchronizes before” f(x), so in general a finalizer should use a mutex +// or other synchronization mechanism if it needs to access mutable state in x. +// For example, consider a finalizer that inspects a mutable field in x +// that is modified from time to time in the main program before x +// becomes unreachable and the finalizer is invoked. +// The modifications in the main program and the inspection in the finalizer +// need to use appropriate synchronization, such as mutexes or atomic updates, +// to avoid read-write races. +func SetFinalizer(obj any, finalizer any) { + if debug.sbrk != 0 { + // debug.sbrk never frees memory, so no finalizers run + // (and we don't have the data structures to record them). + return + } + e := efaceOf(&obj) + etyp := e._type + if etyp == nil { + throw("runtime.SetFinalizer: first argument is nil") + } + if etyp.Kind_&kindMask != kindPtr { + throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer") + } + ot := (*ptrtype)(unsafe.Pointer(etyp)) + if ot.Elem == nil { + throw("nil elem type!") + } + + if inUserArenaChunk(uintptr(e.data)) { + // Arena-allocated objects are not eligible for finalizers. + throw("runtime.SetFinalizer: first argument was allocated into an arena") + } + + // find the containing object + base, span, _ := findObject(uintptr(e.data), 0, 0) + + if base == 0 { + if isGoPointerWithoutSpan(e.data) { + return + } + throw("runtime.SetFinalizer: pointer not in allocated block") + } + + // Move base forward if we've got an allocation header. + if goexperiment.AllocHeaders && !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 { + base += mallocHeaderSize + } + + if uintptr(e.data) != base { + // As an implementation detail we allow to set finalizers for an inner byte + // of an object if it could come from tiny alloc (see mallocgc for details). + if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize { + throw("runtime.SetFinalizer: pointer not at beginning of allocated block") + } + } + + f := efaceOf(&finalizer) + ftyp := f._type + if ftyp == nil { + // switch to system stack and remove finalizer + systemstack(func() { + removefinalizer(e.data) + }) + return + } + + if ftyp.Kind_&kindMask != kindFunc { + throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function") + } + ft := (*functype)(unsafe.Pointer(ftyp)) + if ft.IsVariadic() { + throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string() + " because dotdotdot") + } + if ft.InCount != 1 { + throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) + } + fint := ft.InSlice()[0] + switch { + case fint == etyp: + // ok - same type + goto okarg + case fint.Kind_&kindMask == kindPtr: + if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem { + // ok - not same type, but both pointers, + // one or the other is unnamed, and same element type, so assignable. + goto okarg + } + case fint.Kind_&kindMask == kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(fint)) + if len(ityp.Methods) == 0 { + // ok - satisfies empty interface + goto okarg + } + if itab := assertE2I2(ityp, efaceOf(&obj)._type); itab != nil { + goto okarg + } + } + throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) +okarg: + // compute size needed for return parameters + nret := uintptr(0) + for _, t := range ft.OutSlice() { + nret = alignUp(nret, uintptr(t.Align_)) + t.Size_ + } + nret = alignUp(nret, goarch.PtrSize) + + // make sure we have a finalizer goroutine + createfing() + + systemstack(func() { + if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) { + throw("runtime.SetFinalizer: finalizer already set") + } + }) +} + +// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic. +// +//go:noinline + +// KeepAlive marks its argument as currently reachable. +// This ensures that the object is not freed, and its finalizer is not run, +// before the point in the program where KeepAlive is called. +// +// A very simplified example showing where KeepAlive is required: +// +// type File struct { d int } +// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) +// // ... do something if err != nil ... +// p := &File{d} +// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) +// var buf [10]byte +// n, err := syscall.Read(p.d, buf[:]) +// // Ensure p is not finalized until Read returns. +// runtime.KeepAlive(p) +// // No more uses of p after this point. +// +// Without the KeepAlive call, the finalizer could run at the start of +// [syscall.Read], closing the file descriptor before syscall.Read makes +// the actual system call. +// +// Note: KeepAlive should only be used to prevent finalizers from +// running prematurely. In particular, when used with [unsafe.Pointer], +// the rules for valid uses of unsafe.Pointer still apply. +func KeepAlive(x any) { + // Introduce a use of x that the compiler can't eliminate. + // This makes sure x is alive on entry. We need x to be alive + // on entry for "defer runtime.KeepAlive(x)"; see issue 21402. + if cgoAlwaysFalse { + println(x) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mfinal_test.go b/platform/dbops/binaries/go/go/src/runtime/mfinal_test.go new file mode 100644 index 0000000000000000000000000000000000000000..87d31c472c1b01110348d990009b3b68f5584b3a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mfinal_test.go @@ -0,0 +1,250 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "testing" + "time" + "unsafe" +) + +type Tintptr *int // assignable to *int +type Tint int // *Tint implements Tinter, interface{} + +func (t *Tint) m() {} + +type Tinter interface { + m() +} + +func TestFinalizerType(t *testing.T) { + ch := make(chan bool, 10) + finalize := func(x *int) { + if *x != 97531 { + t.Errorf("finalizer %d, want %d", *x, 97531) + } + ch <- true + } + + var finalizerTests = []struct { + convert func(*int) any + finalizer any + }{ + {func(x *int) any { return x }, func(v *int) { finalize(v) }}, + {func(x *int) any { return Tintptr(x) }, func(v Tintptr) { finalize(v) }}, + {func(x *int) any { return Tintptr(x) }, func(v *int) { finalize(v) }}, + {func(x *int) any { return (*Tint)(x) }, func(v *Tint) { finalize((*int)(v)) }}, + {func(x *int) any { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }}, + // Test case for argument spill slot. + // If the spill slot was not counted for the frame size, it will (incorrectly) choose + // call32 as the result has (exactly) 32 bytes. When the argument actually spills, + // it clobbers the caller's frame (likely the return PC). + {func(x *int) any { return x }, func(v any) [4]int64 { + print() // force spill + finalize(v.(*int)) + return [4]int64{} + }}, + } + + for _, tt := range finalizerTests { + done := make(chan bool, 1) + go func() { + // allocate struct with pointer to avoid hitting tinyalloc. + // Otherwise we can't be sure when the allocation will + // be freed. + type T struct { + v int + p unsafe.Pointer + } + v := &new(T).v + *v = 97531 + runtime.SetFinalizer(tt.convert(v), tt.finalizer) + v = nil + done <- true + }() + <-done + runtime.GC() + <-ch + } +} + +type bigValue struct { + fill uint64 + it bool + up string +} + +func TestFinalizerInterfaceBig(t *testing.T) { + ch := make(chan bool) + done := make(chan bool, 1) + go func() { + v := &bigValue{0xDEADBEEFDEADBEEF, true, "It matters not how strait the gate"} + old := *v + runtime.SetFinalizer(v, func(v any) { + i, ok := v.(*bigValue) + if !ok { + t.Errorf("finalizer called with type %T, want *bigValue", v) + } + if *i != old { + t.Errorf("finalizer called with %+v, want %+v", *i, old) + } + close(ch) + }) + v = nil + done <- true + }() + <-done + runtime.GC() + <-ch +} + +func fin(v *int) { +} + +// Verify we don't crash at least. golang.org/issue/6857 +func TestFinalizerZeroSizedStruct(t *testing.T) { + type Z struct{} + z := new(Z) + runtime.SetFinalizer(z, func(*Z) {}) +} + +func BenchmarkFinalizer(b *testing.B) { + const Batch = 1000 + b.RunParallel(func(pb *testing.PB) { + var data [Batch]*int + for i := 0; i < Batch; i++ { + data[i] = new(int) + } + for pb.Next() { + for i := 0; i < Batch; i++ { + runtime.SetFinalizer(data[i], fin) + } + for i := 0; i < Batch; i++ { + runtime.SetFinalizer(data[i], nil) + } + } + }) +} + +func BenchmarkFinalizerRun(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + v := new(int) + runtime.SetFinalizer(v, fin) + } + }) +} + +// One chunk must be exactly one sizeclass in size. +// It should be a sizeclass not used much by others, so we +// have a greater chance of finding adjacent ones. +// size class 19: 320 byte objects, 25 per page, 1 page alloc at a time +const objsize = 320 + +type objtype [objsize]byte + +func adjChunks() (*objtype, *objtype) { + var s []*objtype + + for { + c := new(objtype) + for _, d := range s { + if uintptr(unsafe.Pointer(c))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(d)) { + return c, d + } + if uintptr(unsafe.Pointer(d))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(c)) { + return d, c + } + } + s = append(s, c) + } +} + +// Make sure an empty slice on the stack doesn't pin the next object in memory. +func TestEmptySlice(t *testing.T) { + x, y := adjChunks() + + // the pointer inside xs points to y. + xs := x[objsize:] // change objsize to objsize-1 and the test passes + + fin := make(chan bool, 1) + runtime.SetFinalizer(y, func(z *objtype) { fin <- true }) + runtime.GC() + <-fin + xsglobal = xs // keep empty slice alive until here +} + +var xsglobal []byte + +func adjStringChunk() (string, *objtype) { + b := make([]byte, objsize) + for { + s := string(b) + t := new(objtype) + p := *(*uintptr)(unsafe.Pointer(&s)) + q := uintptr(unsafe.Pointer(t)) + if p+objsize == q { + return s, t + } + } +} + +// Make sure an empty string on the stack doesn't pin the next object in memory. +func TestEmptyString(t *testing.T) { + x, y := adjStringChunk() + + ss := x[objsize:] // change objsize to objsize-1 and the test passes + fin := make(chan bool, 1) + // set finalizer on string contents of y + runtime.SetFinalizer(y, func(z *objtype) { fin <- true }) + runtime.GC() + <-fin + ssglobal = ss // keep 0-length string live until here +} + +var ssglobal string + +// Test for issue 7656. +func TestFinalizerOnGlobal(t *testing.T) { + runtime.SetFinalizer(Foo1, func(p *Object1) {}) + runtime.SetFinalizer(Foo2, func(p *Object2) {}) + runtime.SetFinalizer(Foo1, nil) + runtime.SetFinalizer(Foo2, nil) +} + +type Object1 struct { + Something []byte +} + +type Object2 struct { + Something byte +} + +var ( + Foo2 = &Object2{} + Foo1 = &Object1{} +) + +func TestDeferKeepAlive(t *testing.T) { + if *flagQuick { + t.Skip("-quick") + } + + // See issue 21402. + t.Parallel() + type T *int // needs to be a pointer base type to avoid tinyalloc and its never-finalized behavior. + x := new(T) + finRun := false + runtime.SetFinalizer(x, func(x *T) { + finRun = true + }) + defer runtime.KeepAlive(x) + runtime.GC() + time.Sleep(time.Second) + if finRun { + t.Errorf("finalizer ran prematurely") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mfixalloc.go b/platform/dbops/binaries/go/go/src/runtime/mfixalloc.go new file mode 100644 index 0000000000000000000000000000000000000000..7760ada3977adfa4d33f1a64c55800dcce74092e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mfixalloc.go @@ -0,0 +1,109 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fixed-size object allocator. Returned memory is not zeroed. +// +// See malloc.go for overview. + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +// fixalloc is a simple free-list allocator for fixed size objects. +// Malloc uses a FixAlloc wrapped around sysAlloc to manage its +// mcache and mspan objects. +// +// Memory returned by fixalloc.alloc is zeroed by default, but the +// caller may take responsibility for zeroing allocations by setting +// the zero flag to false. This is only safe if the memory never +// contains heap pointers. +// +// The caller is responsible for locking around FixAlloc calls. +// Callers can keep state in the object but the first word is +// smashed by freeing and reallocating. +// +// Consider marking fixalloc'd types not in heap by embedding +// runtime/internal/sys.NotInHeap. +type fixalloc struct { + size uintptr + first func(arg, p unsafe.Pointer) // called first time p is returned + arg unsafe.Pointer + list *mlink + chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers + nchunk uint32 // bytes remaining in current chunk + nalloc uint32 // size of new chunks in bytes + inuse uintptr // in-use bytes now + stat *sysMemStat + zero bool // zero allocations +} + +// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).) +// Since assignments to mlink.next will result in a write barrier being performed +// this cannot be used by some of the internal GC structures. For example when +// the sweeper is placing an unmarked object on the free list it does not want the +// write barrier to be called since that could result in the object being reachable. +type mlink struct { + _ sys.NotInHeap + next *mlink +} + +// Initialize f to allocate objects of the given size, +// using the allocator to obtain chunks of memory. +func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat) { + if size > _FixAllocChunk { + throw("runtime: fixalloc size too large") + } + size = max(size, unsafe.Sizeof(mlink{})) + + f.size = size + f.first = first + f.arg = arg + f.list = nil + f.chunk = 0 + f.nchunk = 0 + f.nalloc = uint32(_FixAllocChunk / size * size) // Round _FixAllocChunk down to an exact multiple of size to eliminate tail waste + f.inuse = 0 + f.stat = stat + f.zero = true +} + +func (f *fixalloc) alloc() unsafe.Pointer { + if f.size == 0 { + print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n") + throw("runtime: internal error") + } + + if f.list != nil { + v := unsafe.Pointer(f.list) + f.list = f.list.next + f.inuse += f.size + if f.zero { + memclrNoHeapPointers(v, f.size) + } + return v + } + if uintptr(f.nchunk) < f.size { + f.chunk = uintptr(persistentalloc(uintptr(f.nalloc), 0, f.stat)) + f.nchunk = f.nalloc + } + + v := unsafe.Pointer(f.chunk) + if f.first != nil { + f.first(f.arg, v) + } + f.chunk = f.chunk + f.size + f.nchunk -= uint32(f.size) + f.inuse += f.size + return v +} + +func (f *fixalloc) free(p unsafe.Pointer) { + f.inuse -= f.size + v := (*mlink)(p) + v.next = f.list + f.list = v +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgc.go b/platform/dbops/binaries/go/go/src/runtime/mgc.go new file mode 100644 index 0000000000000000000000000000000000000000..6c51517522a1740453b0ac50037ad82c69bf5966 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgc.go @@ -0,0 +1,1843 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector (GC). +// +// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple +// GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is +// non-generational and non-compacting. Allocation is done using size segregated per P allocation +// areas to minimize fragmentation while eliminating locks in the common case. +// +// The algorithm decomposes into several steps. +// This is a high level description of the algorithm being used. For an overview of GC a good +// place to start is Richard Jones' gchandbook.org. +// +// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see +// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978. +// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), +// 966-975. +// For journal quality proofs that these steps are complete, correct, and terminate see +// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world. +// Concurrency and Computation: Practice and Experience 15(3-5), 2003. +// +// 1. GC performs sweep termination. +// +// a. Stop the world. This causes all Ps to reach a GC safe-point. +// +// b. Sweep any unswept spans. There will only be unswept spans if +// this GC cycle was forced before the expected time. +// +// 2. GC performs the mark phase. +// +// a. Prepare for the mark phase by setting gcphase to _GCmark +// (from _GCoff), enabling the write barrier, enabling mutator +// assists, and enqueueing root mark jobs. No objects may be +// scanned until all Ps have enabled the write barrier, which is +// accomplished using STW. +// +// b. Start the world. From this point, GC work is done by mark +// workers started by the scheduler and by assists performed as +// part of allocation. The write barrier shades both the +// overwritten pointer and the new pointer value for any pointer +// writes (see mbarrier.go for details). Newly allocated objects +// are immediately marked black. +// +// c. GC performs root marking jobs. This includes scanning all +// stacks, shading all globals, and shading any heap pointers in +// off-heap runtime data structures. Scanning a stack stops a +// goroutine, shades any pointers found on its stack, and then +// resumes the goroutine. +// +// d. GC drains the work queue of grey objects, scanning each grey +// object to black and shading all pointers found in the object +// (which in turn may add those pointers to the work queue). +// +// e. Because GC work is spread across local caches, GC uses a +// distributed termination algorithm to detect when there are no +// more root marking jobs or grey objects (see gcMarkDone). At this +// point, GC transitions to mark termination. +// +// 3. GC performs mark termination. +// +// a. Stop the world. +// +// b. Set gcphase to _GCmarktermination, and disable workers and +// assists. +// +// c. Perform housekeeping like flushing mcaches. +// +// 4. GC performs the sweep phase. +// +// a. Prepare for the sweep phase by setting gcphase to _GCoff, +// setting up sweep state and disabling the write barrier. +// +// b. Start the world. From this point on, newly allocated objects +// are white, and allocating sweeps spans before use if necessary. +// +// c. GC does concurrent sweeping in the background and in response +// to allocation. See description below. +// +// 5. When sufficient allocation has taken place, replay the sequence +// starting with 1 above. See discussion of GC rate below. + +// Concurrent sweep. +// +// The sweep phase proceeds concurrently with normal program execution. +// The heap is swept span-by-span both lazily (when a goroutine needs another span) +// and concurrently in a background goroutine (this helps programs that are not CPU bound). +// At the end of STW mark termination all spans are marked as "needs sweeping". +// +// The background sweeper goroutine simply sweeps spans one-by-one. +// +// To avoid requesting more OS memory while there are unswept spans, when a +// goroutine needs another span, it first attempts to reclaim that much memory +// by sweeping. When a goroutine needs to allocate a new small-object span, it +// sweeps small-object spans for the same object size until it frees at least +// one object. When a goroutine needs to allocate large-object span from heap, +// it sweeps spans until it frees at least that many pages into heap. There is +// one case where this may not suffice: if a goroutine sweeps and frees two +// nonadjacent one-page spans to the heap, it will allocate a new two-page +// span, but there can still be other one-page unswept spans which could be +// combined into a two-page span. +// +// It's critical to ensure that no operations proceed on unswept spans (that would corrupt +// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache, +// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it. +// When a goroutine explicitly frees an object or sets a finalizer, it ensures that +// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish). +// The finalizer goroutine is kicked off only when all spans are swept. +// When the next GC starts, it sweeps all not-yet-swept spans (if any). + +// GC rate. +// Next GC is after we've allocated an extra amount of memory proportional to +// the amount already in use. The proportion is controlled by GOGC environment variable +// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M +// (this mark is computed by the gcController.heapGoal method). This keeps the GC cost in +// linear proportion to the allocation cost. Adjusting GOGC just changes the linear constant +// (and also the amount of extra memory used). + +// Oblets +// +// In order to prevent long pauses while scanning large objects and to +// improve parallelism, the garbage collector breaks up scan jobs for +// objects larger than maxObletBytes into "oblets" of at most +// maxObletBytes. When scanning encounters the beginning of a large +// object, it scans only the first oblet and enqueues the remaining +// oblets as new scan jobs. + +package runtime + +import ( + "internal/cpu" + "runtime/internal/atomic" + "unsafe" +) + +const ( + _DebugGC = 0 + _FinBlockSize = 4 * 1024 + + // concurrentSweep is a debug flag. Disabling this flag + // ensures all spans are swept while the world is stopped. + concurrentSweep = true + + // debugScanConservative enables debug logging for stack + // frames that are scanned conservatively. + debugScanConservative = false + + // sweepMinHeapDistance is a lower bound on the heap distance + // (in bytes) reserved for concurrent sweeping between GC + // cycles. + sweepMinHeapDistance = 1024 * 1024 +) + +// heapObjectsCanMove always returns false in the current garbage collector. +// It exists for go4.org/unsafe/assume-no-moving-gc, which is an +// unfortunate idea that had an even more unfortunate implementation. +// Every time a new Go release happened, the package stopped building, +// and the authors had to add a new file with a new //go:build line, and +// then the entire ecosystem of packages with that as a dependency had to +// explicitly update to the new version. Many packages depend on +// assume-no-moving-gc transitively, through paths like +// inet.af/netaddr -> go4.org/intern -> assume-no-moving-gc. +// This was causing a significant amount of friction around each new +// release, so we added this bool for the package to //go:linkname +// instead. The bool is still unfortunate, but it's not as bad as +// breaking the ecosystem on every new release. +// +// If the Go garbage collector ever does move heap objects, we can set +// this to true to break all the programs using assume-no-moving-gc. +// +//go:linkname heapObjectsCanMove +func heapObjectsCanMove() bool { + return false +} + +func gcinit() { + if unsafe.Sizeof(workbuf{}) != _WorkbufSize { + throw("size of Workbuf is suboptimal") + } + // No sweep on the first cycle. + sweep.active.state.Store(sweepDrainedMask) + + // Initialize GC pacer state. + // Use the environment variable GOGC for the initial gcPercent value. + // Use the environment variable GOMEMLIMIT for the initial memoryLimit value. + gcController.init(readGOGC(), readGOMEMLIMIT()) + + work.startSema = 1 + work.markDoneSema = 1 + lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) + lockInit(&work.assistQueue.lock, lockRankAssistQueue) + lockInit(&work.wbufSpans.lock, lockRankWbufSpans) +} + +// gcenable is called after the bulk of the runtime initialization, +// just before we're about to start letting user code run. +// It kicks off the background sweeper goroutine, the background +// scavenger goroutine, and enables GC. +func gcenable() { + // Kick off sweeping and scavenging. + c := make(chan int, 2) + go bgsweep(c) + go bgscavenge(c) + <-c + <-c + memstats.enablegc = true // now that runtime is initialized, GC is okay +} + +// Garbage collector phase. +// Indicates to write barrier and synchronization task to perform. +var gcphase uint32 + +// The compiler knows about this variable. +// If you change it, you must change builtin/runtime.go, too. +// If you change the first four bytes, you must also change the write +// barrier insertion code. +var writeBarrier struct { + enabled bool // compiler emits a check of this before calling write barrier + pad [3]byte // compiler uses 32-bit load for "enabled" field + alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load +} + +// gcBlackenEnabled is 1 if mutator assists and background mark +// workers are allowed to blacken objects. This must only be set when +// gcphase == _GCmark. +var gcBlackenEnabled uint32 + +const ( + _GCoff = iota // GC not running; sweeping in background, write barrier disabled + _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED + _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED +) + +//go:nosplit +func setGCPhase(x uint32) { + atomic.Store(&gcphase, x) + writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination +} + +// gcMarkWorkerMode represents the mode that a concurrent mark worker +// should operate in. +// +// Concurrent marking happens through four different mechanisms. One +// is mutator assists, which happen in response to allocations and are +// not scheduled. The other three are variations in the per-P mark +// workers and are distinguished by gcMarkWorkerMode. +type gcMarkWorkerMode int + +const ( + // gcMarkWorkerNotWorker indicates that the next scheduled G is not + // starting work and the mode should be ignored. + gcMarkWorkerNotWorker gcMarkWorkerMode = iota + + // gcMarkWorkerDedicatedMode indicates that the P of a mark + // worker is dedicated to running that mark worker. The mark + // worker should run without preemption. + gcMarkWorkerDedicatedMode + + // gcMarkWorkerFractionalMode indicates that a P is currently + // running the "fractional" mark worker. The fractional worker + // is necessary when GOMAXPROCS*gcBackgroundUtilization is not + // an integer and using only dedicated workers would result in + // utilization too far from the target of gcBackgroundUtilization. + // The fractional worker should run until it is preempted and + // will be scheduled to pick up the fractional part of + // GOMAXPROCS*gcBackgroundUtilization. + gcMarkWorkerFractionalMode + + // gcMarkWorkerIdleMode indicates that a P is running the mark + // worker because it has nothing else to do. The idle worker + // should run until it is preempted and account its time + // against gcController.idleMarkTime. + gcMarkWorkerIdleMode +) + +// gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes +// to use in execution traces. +var gcMarkWorkerModeStrings = [...]string{ + "Not worker", + "GC (dedicated)", + "GC (fractional)", + "GC (idle)", +} + +// pollFractionalWorkerExit reports whether a fractional mark worker +// should self-preempt. It assumes it is called from the fractional +// worker. +func pollFractionalWorkerExit() bool { + // This should be kept in sync with the fractional worker + // scheduler logic in findRunnableGCWorker. + now := nanotime() + delta := now - gcController.markStartTime + if delta <= 0 { + return true + } + p := getg().m.p.ptr() + selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime) + // Add some slack to the utilization goal so that the + // fractional worker isn't behind again the instant it exits. + return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal +} + +var work workType + +type workType struct { + full lfstack // lock-free list of full blocks workbuf + _ cpu.CacheLinePad // prevents false-sharing between full and empty + empty lfstack // lock-free list of empty blocks workbuf + _ cpu.CacheLinePad // prevents false-sharing between empty and nproc/nwait + + wbufSpans struct { + lock mutex + // free is a list of spans dedicated to workbufs, but + // that don't currently contain any workbufs. + free mSpanList + // busy is a list of all spans containing workbufs on + // one of the workbuf lists. + busy mSpanList + } + + // Restore 64-bit alignment on 32-bit. + _ uint32 + + // bytesMarked is the number of bytes marked this cycle. This + // includes bytes blackened in scanned objects, noscan objects + // that go straight to black, and permagrey objects scanned by + // markroot during the concurrent scan phase. This is updated + // atomically during the cycle. Updates may be batched + // arbitrarily, since the value is only read at the end of the + // cycle. + // + // Because of benign races during marking, this number may not + // be the exact number of marked bytes, but it should be very + // close. + // + // Put this field here because it needs 64-bit atomic access + // (and thus 8-byte alignment even on 32-bit architectures). + bytesMarked uint64 + + markrootNext uint32 // next markroot job + markrootJobs uint32 // number of markroot jobs + + nproc uint32 + tstart int64 + nwait uint32 + + // Number of roots of various root types. Set by gcMarkRootPrepare. + // + // nStackRoots == len(stackRoots), but we have nStackRoots for + // consistency. + nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int + + // Base indexes of each root type. Set by gcMarkRootPrepare. + baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32 + + // stackRoots is a snapshot of all of the Gs that existed + // before the beginning of concurrent marking. The backing + // store of this must not be modified because it might be + // shared with allgs. + stackRoots []*g + + // Each type of GC state transition is protected by a lock. + // Since multiple threads can simultaneously detect the state + // transition condition, any thread that detects a transition + // condition must acquire the appropriate transition lock, + // re-check the transition condition and return if it no + // longer holds or perform the transition if it does. + // Likewise, any transition must invalidate the transition + // condition before releasing the lock. This ensures that each + // transition is performed by exactly one thread and threads + // that need the transition to happen block until it has + // happened. + // + // startSema protects the transition from "off" to mark or + // mark termination. + startSema uint32 + // markDoneSema protects transitions from mark to mark termination. + markDoneSema uint32 + + bgMarkReady note // signal background mark worker has started + bgMarkDone uint32 // cas to 1 when at a background mark completion point + // Background mark completion signaling + + // mode is the concurrency mode of the current GC cycle. + mode gcMode + + // userForced indicates the current GC cycle was forced by an + // explicit user call. + userForced bool + + // initialHeapLive is the value of gcController.heapLive at the + // beginning of this GC cycle. + initialHeapLive uint64 + + // assistQueue is a queue of assists that are blocked because + // there was neither enough credit to steal or enough work to + // do. + assistQueue struct { + lock mutex + q gQueue + } + + // sweepWaiters is a list of blocked goroutines to wake when + // we transition from mark termination to sweep. + sweepWaiters struct { + lock mutex + list gList + } + + // cycles is the number of completed GC cycles, where a GC + // cycle is sweep termination, mark, mark termination, and + // sweep. This differs from memstats.numgc, which is + // incremented at mark termination. + cycles atomic.Uint32 + + // Timing/utilization stats for this cycle. + stwprocs, maxprocs int32 + tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start + + pauseNS int64 // total STW time this cycle + + // debug.gctrace heap sizes for this cycle. + heap0, heap1, heap2 uint64 + + // Cumulative estimated CPU usage. + cpuStats +} + +// GC runs a garbage collection and blocks the caller until the +// garbage collection is complete. It may also block the entire +// program. +func GC() { + // We consider a cycle to be: sweep termination, mark, mark + // termination, and sweep. This function shouldn't return + // until a full cycle has been completed, from beginning to + // end. Hence, we always want to finish up the current cycle + // and start a new one. That means: + // + // 1. In sweep termination, mark, or mark termination of cycle + // N, wait until mark termination N completes and transitions + // to sweep N. + // + // 2. In sweep N, help with sweep N. + // + // At this point we can begin a full cycle N+1. + // + // 3. Trigger cycle N+1 by starting sweep termination N+1. + // + // 4. Wait for mark termination N+1 to complete. + // + // 5. Help with sweep N+1 until it's done. + // + // This all has to be written to deal with the fact that the + // GC may move ahead on its own. For example, when we block + // until mark termination N, we may wake up in cycle N+2. + + // Wait until the current sweep termination, mark, and mark + // termination complete. + n := work.cycles.Load() + gcWaitOnMark(n) + + // We're now in sweep N or later. Trigger GC cycle N+1, which + // will first finish sweep N if necessary and then enter sweep + // termination N+1. + gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1}) + + // Wait for mark termination N+1 to complete. + gcWaitOnMark(n + 1) + + // Finish sweep N+1 before returning. We do this both to + // complete the cycle and because runtime.GC() is often used + // as part of tests and benchmarks to get the system into a + // relatively stable and isolated state. + for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) { + Gosched() + } + + // Callers may assume that the heap profile reflects the + // just-completed cycle when this returns (historically this + // happened because this was a STW GC), but right now the + // profile still reflects mark termination N, not N+1. + // + // As soon as all of the sweep frees from cycle N+1 are done, + // we can go ahead and publish the heap profile. + // + // First, wait for sweeping to finish. (We know there are no + // more spans on the sweep queue, but we may be concurrently + // sweeping spans, so we have to wait.) + for work.cycles.Load() == n+1 && !isSweepDone() { + Gosched() + } + + // Now we're really done with sweeping, so we can publish the + // stable heap profile. Only do this if we haven't already hit + // another mark termination. + mp := acquirem() + cycle := work.cycles.Load() + if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) { + mProf_PostSweep() + } + releasem(mp) +} + +// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has +// already completed this mark phase, it returns immediately. +func gcWaitOnMark(n uint32) { + for { + // Disable phase transitions. + lock(&work.sweepWaiters.lock) + nMarks := work.cycles.Load() + if gcphase != _GCmark { + // We've already completed this cycle's mark. + nMarks++ + } + if nMarks > n { + // We're done. + unlock(&work.sweepWaiters.lock) + return + } + + // Wait until sweep termination, mark, and mark + // termination of cycle N complete. + work.sweepWaiters.list.push(getg()) + goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1) + } +} + +// gcMode indicates how concurrent a GC cycle should be. +type gcMode int + +const ( + gcBackgroundMode gcMode = iota // concurrent GC and sweep + gcForceMode // stop-the-world GC now, concurrent sweep + gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user) +) + +// A gcTrigger is a predicate for starting a GC cycle. Specifically, +// it is an exit condition for the _GCoff phase. +type gcTrigger struct { + kind gcTriggerKind + now int64 // gcTriggerTime: current time + n uint32 // gcTriggerCycle: cycle number to start +} + +type gcTriggerKind int + +const ( + // gcTriggerHeap indicates that a cycle should be started when + // the heap size reaches the trigger heap size computed by the + // controller. + gcTriggerHeap gcTriggerKind = iota + + // gcTriggerTime indicates that a cycle should be started when + // it's been more than forcegcperiod nanoseconds since the + // previous GC cycle. + gcTriggerTime + + // gcTriggerCycle indicates that a cycle should be started if + // we have not yet started cycle number gcTrigger.n (relative + // to work.cycles). + gcTriggerCycle +) + +// test reports whether the trigger condition is satisfied, meaning +// that the exit condition for the _GCoff phase has been met. The exit +// condition should be tested when allocating. +func (t gcTrigger) test() bool { + if !memstats.enablegc || panicking.Load() != 0 || gcphase != _GCoff { + return false + } + switch t.kind { + case gcTriggerHeap: + trigger, _ := gcController.trigger() + return gcController.heapLive.Load() >= trigger + case gcTriggerTime: + if gcController.gcPercent.Load() < 0 { + return false + } + lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) + return lastgc != 0 && t.now-lastgc > forcegcperiod + case gcTriggerCycle: + // t.n > work.cycles, but accounting for wraparound. + return int32(t.n-work.cycles.Load()) > 0 + } + return true +} + +// gcStart starts the GC. It transitions from _GCoff to _GCmark (if +// debug.gcstoptheworld == 0) or performs all of GC (if +// debug.gcstoptheworld != 0). +// +// This may return without performing this transition in some cases, +// such as when called on a system stack or with locks held. +func gcStart(trigger gcTrigger) { + // Since this is called from malloc and malloc is called in + // the guts of a number of libraries that might be holding + // locks, don't attempt to start GC in non-preemptible or + // potentially unstable situations. + mp := acquirem() + if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" { + releasem(mp) + return + } + releasem(mp) + mp = nil + + // Pick up the remaining unswept/not being swept spans concurrently + // + // This shouldn't happen if we're being invoked in background + // mode since proportional sweep should have just finished + // sweeping everything, but rounding errors, etc, may leave a + // few spans unswept. In forced mode, this is necessary since + // GC can be forced at any point in the sweeping cycle. + // + // We check the transition condition continuously here in case + // this G gets delayed in to the next GC cycle. + for trigger.test() && sweepone() != ^uintptr(0) { + } + + // Perform GC initialization and the sweep termination + // transition. + semacquire(&work.startSema) + // Re-check transition condition under transition lock. + if !trigger.test() { + semrelease(&work.startSema) + return + } + + // In gcstoptheworld debug mode, upgrade the mode accordingly. + // We do this after re-checking the transition condition so + // that multiple goroutines that detect the heap trigger don't + // start multiple STW GCs. + mode := gcBackgroundMode + if debug.gcstoptheworld == 1 { + mode = gcForceMode + } else if debug.gcstoptheworld == 2 { + mode = gcForceBlockMode + } + + // Ok, we're doing it! Stop everybody else + semacquire(&gcsema) + semacquire(&worldsema) + + // For stats, check if this GC was forced by the user. + // Update it under gcsema to avoid gctrace getting wrong values. + work.userForced = trigger.kind == gcTriggerCycle + + trace := traceAcquire() + if trace.ok() { + trace.GCStart() + traceRelease(trace) + } + + // Check that all Ps have finished deferred mcache flushes. + for _, p := range allp { + if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen { + println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen) + throw("p mcache not flushed") + } + } + + gcBgMarkStartWorkers() + + systemstack(gcResetMarkState) + + work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs + if work.stwprocs > ncpu { + // This is used to compute CPU time of the STW phases, + // so it can't be more than ncpu, even if GOMAXPROCS is. + work.stwprocs = ncpu + } + work.heap0 = gcController.heapLive.Load() + work.pauseNS = 0 + work.mode = mode + + now := nanotime() + work.tSweepTerm = now + var stw worldStop + systemstack(func() { + stw = stopTheWorldWithSema(stwGCSweepTerm) + }) + // Finish sweep before we start concurrent scan. + systemstack(func() { + finishsweep_m() + }) + + // clearpools before we start the GC. If we wait the memory will not be + // reclaimed until the next GC cycle. + clearpools() + + work.cycles.Add(1) + + // Assists and workers can start the moment we start + // the world. + gcController.startCycle(now, int(gomaxprocs), trigger) + + // Notify the CPU limiter that assists may begin. + gcCPULimiter.startGCTransition(true, now) + + // In STW mode, disable scheduling of user Gs. This may also + // disable scheduling of this goroutine, so it may block as + // soon as we start the world again. + if mode != gcBackgroundMode { + schedEnableUser(false) + } + + // Enter concurrent mark phase and enable + // write barriers. + // + // Because the world is stopped, all Ps will + // observe that write barriers are enabled by + // the time we start the world and begin + // scanning. + // + // Write barriers must be enabled before assists are + // enabled because they must be enabled before + // any non-leaf heap objects are marked. Since + // allocations are blocked until assists can + // happen, we want to enable assists as early as + // possible. + setGCPhase(_GCmark) + + gcBgMarkPrepare() // Must happen before assists are enabled. + gcMarkRootPrepare() + + // Mark all active tinyalloc blocks. Since we're + // allocating from these, they need to be black like + // other allocations. The alternative is to blacken + // the tiny block on every allocation from it, which + // would slow down the tiny allocator. + gcMarkTinyAllocs() + + // At this point all Ps have enabled the write + // barrier, thus maintaining the no white to + // black invariant. Enable mutator assists to + // put back-pressure on fast allocating + // mutators. + atomic.Store(&gcBlackenEnabled, 1) + + // In STW mode, we could block the instant systemstack + // returns, so make sure we're not preemptible. + mp = acquirem() + + // Concurrent mark. + systemstack(func() { + now = startTheWorldWithSema(0, stw) + work.pauseNS += now - stw.start + work.tMark = now + + sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm) + work.cpuStats.gcPauseTime += sweepTermCpu + work.cpuStats.gcTotalTime += sweepTermCpu + + // Release the CPU limiter. + gcCPULimiter.finishGCTransition(now) + }) + + // Release the world sema before Gosched() in STW mode + // because we will need to reacquire it later but before + // this goroutine becomes runnable again, and we could + // self-deadlock otherwise. + semrelease(&worldsema) + releasem(mp) + + // Make sure we block instead of returning to user code + // in STW mode. + if mode != gcBackgroundMode { + Gosched() + } + + semrelease(&work.startSema) +} + +// gcMarkDoneFlushed counts the number of P's with flushed work. +// +// Ideally this would be a captured local in gcMarkDone, but forEachP +// escapes its callback closure, so it can't capture anything. +// +// This is protected by markDoneSema. +var gcMarkDoneFlushed uint32 + +// gcMarkDone transitions the GC from mark to mark termination if all +// reachable objects have been marked (that is, there are no grey +// objects and can be no more in the future). Otherwise, it flushes +// all local work to the global queues where it can be discovered by +// other workers. +// +// This should be called when all local mark work has been drained and +// there are no remaining workers. Specifically, when +// +// work.nwait == work.nproc && !gcMarkWorkAvailable(p) +// +// The calling context must be preemptible. +// +// Flushing local work is important because idle Ps may have local +// work queued. This is the only way to make that work visible and +// drive GC to completion. +// +// It is explicitly okay to have write barriers in this function. If +// it does transition to mark termination, then all reachable objects +// have been marked, so the write barrier cannot shade any more +// objects. +func gcMarkDone() { + // Ensure only one thread is running the ragged barrier at a + // time. + semacquire(&work.markDoneSema) + +top: + // Re-check transition condition under transition lock. + // + // It's critical that this checks the global work queues are + // empty before performing the ragged barrier. Otherwise, + // there could be global work that a P could take after the P + // has passed the ragged barrier. + if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { + semrelease(&work.markDoneSema) + return + } + + // forEachP needs worldsema to execute, and we'll need it to + // stop the world later, so acquire worldsema now. + semacquire(&worldsema) + + // Flush all local buffers and collect flushedWork flags. + gcMarkDoneFlushed = 0 + forEachP(waitReasonGCMarkTermination, func(pp *p) { + // Flush the write barrier buffer, since this may add + // work to the gcWork. + wbBufFlush1(pp) + + // Flush the gcWork, since this may create global work + // and set the flushedWork flag. + // + // TODO(austin): Break up these workbufs to + // better distribute work. + pp.gcw.dispose() + // Collect the flushedWork flag. + if pp.gcw.flushedWork { + atomic.Xadd(&gcMarkDoneFlushed, 1) + pp.gcw.flushedWork = false + } + }) + + if gcMarkDoneFlushed != 0 { + // More grey objects were discovered since the + // previous termination check, so there may be more + // work to do. Keep going. It's possible the + // transition condition became true again during the + // ragged barrier, so re-check it. + semrelease(&worldsema) + goto top + } + + // There was no global work, no local work, and no Ps + // communicated work since we took markDoneSema. Therefore + // there are no grey objects and no more objects can be + // shaded. Transition to mark termination. + now := nanotime() + work.tMarkTerm = now + getg().m.preemptoff = "gcing" + var stw worldStop + systemstack(func() { + stw = stopTheWorldWithSema(stwGCMarkTerm) + }) + // The gcphase is _GCmark, it will transition to _GCmarktermination + // below. The important thing is that the wb remains active until + // all marking is complete. This includes writes made by the GC. + + // There is sometimes work left over when we enter mark termination due + // to write barriers performed after the completion barrier above. + // Detect this and resume concurrent mark. This is obviously + // unfortunate. + // + // See issue #27993 for details. + // + // Switch to the system stack to call wbBufFlush1, though in this case + // it doesn't matter because we're non-preemptible anyway. + restart := false + systemstack(func() { + for _, p := range allp { + wbBufFlush1(p) + if !p.gcw.empty() { + restart = true + break + } + } + }) + if restart { + getg().m.preemptoff = "" + systemstack(func() { + now := startTheWorldWithSema(0, stw) + work.pauseNS += now - stw.start + }) + semrelease(&worldsema) + goto top + } + + gcComputeStartingStackSize() + + // Disable assists and background workers. We must do + // this before waking blocked assists. + atomic.Store(&gcBlackenEnabled, 0) + + // Notify the CPU limiter that GC assists will now cease. + gcCPULimiter.startGCTransition(false, now) + + // Wake all blocked assists. These will run when we + // start the world again. + gcWakeAllAssists() + + // Likewise, release the transition lock. Blocked + // workers and assists will run when we start the + // world again. + semrelease(&work.markDoneSema) + + // In STW mode, re-enable user goroutines. These will be + // queued to run after we start the world. + schedEnableUser(true) + + // endCycle depends on all gcWork cache stats being flushed. + // The termination algorithm above ensured that up to + // allocations since the ragged barrier. + gcController.endCycle(now, int(gomaxprocs), work.userForced) + + // Perform mark termination. This will restart the world. + gcMarkTermination(stw) +} + +// World must be stopped and mark assists and background workers must be +// disabled. +func gcMarkTermination(stw worldStop) { + // Start marktermination (write barrier remains enabled for now). + setGCPhase(_GCmarktermination) + + work.heap1 = gcController.heapLive.Load() + startTime := nanotime() + + mp := acquirem() + mp.preemptoff = "gcing" + mp.traceback = 2 + curgp := mp.curg + // N.B. The execution tracer is not aware of this status + // transition and handles it specially based on the + // wait reason. + casGToWaiting(curgp, _Grunning, waitReasonGarbageCollection) + + // Run gc on the g0 stack. We do this so that the g stack + // we're currently running on will no longer change. Cuts + // the root set down a bit (g0 stacks are not scanned, and + // we don't need to scan gc's internal state). We also + // need to switch to g0 so we can shrink the stack. + systemstack(func() { + gcMark(startTime) + // Must return immediately. + // The outer function's stack may have moved + // during gcMark (it shrinks stacks, including the + // outer function's stack), so we must not refer + // to any of its variables. Return back to the + // non-system stack to pick up the new addresses + // before continuing. + }) + + var stwSwept bool + systemstack(func() { + work.heap2 = work.bytesMarked + if debug.gccheckmark > 0 { + // Run a full non-parallel, stop-the-world + // mark using checkmark bits, to check that we + // didn't forget to mark anything during the + // concurrent mark process. + startCheckmarks() + gcResetMarkState() + gcw := &getg().m.p.ptr().gcw + gcDrain(gcw, 0) + wbBufFlush1(getg().m.p.ptr()) + gcw.dispose() + endCheckmarks() + } + + // marking is complete so we can turn the write barrier off + setGCPhase(_GCoff) + stwSwept = gcSweep(work.mode) + }) + + mp.traceback = 0 + casgstatus(curgp, _Gwaiting, _Grunning) + + trace := traceAcquire() + if trace.ok() { + trace.GCDone() + traceRelease(trace) + } + + // all done + mp.preemptoff = "" + + if gcphase != _GCoff { + throw("gc done but gcphase != _GCoff") + } + + // Record heapInUse for scavenger. + memstats.lastHeapInUse = gcController.heapInUse.load() + + // Update GC trigger and pacing, as well as downstream consumers + // of this pacing information, for the next cycle. + systemstack(gcControllerCommit) + + // Update timing memstats + now := nanotime() + sec, nsec, _ := time_now() + unixNow := sec*1e9 + int64(nsec) + work.pauseNS += now - stw.start + work.tEnd = now + atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user + atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us + memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS) + memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) + memstats.pause_total_ns += uint64(work.pauseNS) + + markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm) + work.cpuStats.gcPauseTime += markTermCpu + work.cpuStats.gcTotalTime += markTermCpu + + // Accumulate CPU stats. + // + // Pass gcMarkPhase=true so we can get all the latest GC CPU stats in there too. + work.cpuStats.accumulate(now, true) + + // Compute overall GC CPU utilization. + // Omit idle marking time from the overall utilization here since it's "free". + memstats.gc_cpu_fraction = float64(work.cpuStats.gcTotalTime-work.cpuStats.gcIdleTime) / float64(work.cpuStats.totalTime) + + // Reset assist time and background time stats. + // + // Do this now, instead of at the start of the next GC cycle, because + // these two may keep accumulating even if the GC is not active. + scavenge.assistTime.Store(0) + scavenge.backgroundTime.Store(0) + + // Reset idle time stat. + sched.idleTime.Store(0) + + if work.userForced { + memstats.numforcedgc++ + } + + // Bump GC cycle count and wake goroutines waiting on sweep. + lock(&work.sweepWaiters.lock) + memstats.numgc++ + injectglist(&work.sweepWaiters.list) + unlock(&work.sweepWaiters.lock) + + // Increment the scavenge generation now. + // + // This moment represents peak heap in use because we're + // about to start sweeping. + mheap_.pages.scav.index.nextGen() + + // Release the CPU limiter. + gcCPULimiter.finishGCTransition(now) + + // Finish the current heap profiling cycle and start a new + // heap profiling cycle. We do this before starting the world + // so events don't leak into the wrong cycle. + mProf_NextCycle() + + // There may be stale spans in mcaches that need to be swept. + // Those aren't tracked in any sweep lists, so we need to + // count them against sweep completion until we ensure all + // those spans have been forced out. + // + // If gcSweep fully swept the heap (for example if the sweep + // is not concurrent due to a GODEBUG setting), then we expect + // the sweepLocker to be invalid, since sweeping is done. + // + // N.B. Below we might duplicate some work from gcSweep; this is + // fine as all that work is idempotent within a GC cycle, and + // we're still holding worldsema so a new cycle can't start. + sl := sweep.active.begin() + if !stwSwept && !sl.valid { + throw("failed to set sweep barrier") + } else if stwSwept && sl.valid { + throw("non-concurrent sweep failed to drain all sweep queues") + } + + systemstack(func() { + // The memstats updated above must be updated with the world + // stopped to ensure consistency of some values, such as + // sched.idleTime and sched.totaltime. memstats also include + // the pause time (work,pauseNS), forcing computation of the + // total pause time before the pause actually ends. + // + // Here we reuse the same now for start the world so that the + // time added to /sched/pauses/total/gc:seconds will be + // consistent with the value in memstats. + startTheWorldWithSema(now, stw) + }) + + // Flush the heap profile so we can start a new cycle next GC. + // This is relatively expensive, so we don't do it with the + // world stopped. + mProf_Flush() + + // Prepare workbufs for freeing by the sweeper. We do this + // asynchronously because it can take non-trivial time. + prepareFreeWorkbufs() + + // Free stack spans. This must be done between GC cycles. + systemstack(freeStackSpans) + + // Ensure all mcaches are flushed. Each P will flush its own + // mcache before allocating, but idle Ps may not. Since this + // is necessary to sweep all spans, we need to ensure all + // mcaches are flushed before we start the next GC cycle. + // + // While we're here, flush the page cache for idle Ps to avoid + // having pages get stuck on them. These pages are hidden from + // the scavenger, so in small idle heaps a significant amount + // of additional memory might be held onto. + // + // Also, flush the pinner cache, to avoid leaking that memory + // indefinitely. + forEachP(waitReasonFlushProcCaches, func(pp *p) { + pp.mcache.prepareForSweep() + if pp.status == _Pidle { + systemstack(func() { + lock(&mheap_.lock) + pp.pcache.flush(&mheap_.pages) + unlock(&mheap_.lock) + }) + } + pp.pinnerCache = nil + }) + if sl.valid { + // Now that we've swept stale spans in mcaches, they don't + // count against unswept spans. + // + // Note: this sweepLocker may not be valid if sweeping had + // already completed during the STW. See the corresponding + // begin() call that produced sl. + sweep.active.end(sl) + } + + // Print gctrace before dropping worldsema. As soon as we drop + // worldsema another cycle could start and smash the stats + // we're trying to print. + if debug.gctrace > 0 { + util := int(memstats.gc_cpu_fraction * 100) + + var sbuf [24]byte + printlock() + print("gc ", memstats.numgc, + " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", + util, "%: ") + prev := work.tSweepTerm + for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { + if i != 0 { + print("+") + } + print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev)))) + prev = ns + } + print(" ms clock, ") + for i, ns := range []int64{ + int64(work.stwprocs) * (work.tMark - work.tSweepTerm), + gcController.assistTime.Load(), + gcController.dedicatedMarkTime.Load() + gcController.fractionalMarkTime.Load(), + gcController.idleMarkTime.Load(), + markTermCpu, + } { + if i == 2 || i == 3 { + // Separate mark time components with /. + print("/") + } else if i != 0 { + print("+") + } + print(string(fmtNSAsMS(sbuf[:], uint64(ns)))) + } + print(" ms cpu, ", + work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ", + gcController.lastHeapGoal>>20, " MB goal, ", + gcController.lastStackScan.Load()>>20, " MB stacks, ", + gcController.globalsScan.Load()>>20, " MB globals, ", + work.maxprocs, " P") + if work.userForced { + print(" (forced)") + } + print("\n") + printunlock() + } + + // Set any arena chunks that were deferred to fault. + lock(&userArenaState.lock) + faultList := userArenaState.fault + userArenaState.fault = nil + unlock(&userArenaState.lock) + for _, lc := range faultList { + lc.mspan.setUserArenaChunkToFault() + } + + // Enable huge pages on some metadata if we cross a heap threshold. + if gcController.heapGoal() > minHeapForMetadataHugePages { + systemstack(func() { + mheap_.enableMetadataHugePages() + }) + } + + semrelease(&worldsema) + semrelease(&gcsema) + // Careful: another GC cycle may start now. + + releasem(mp) + mp = nil + + // now that gc is done, kick off finalizer thread if needed + if !concurrentSweep { + // give the queued finalizers, if any, a chance to run + Gosched() + } +} + +// gcBgMarkStartWorkers prepares background mark worker goroutines. These +// goroutines will not run until the mark phase, but they must be started while +// the work is not stopped and from a regular G stack. The caller must hold +// worldsema. +func gcBgMarkStartWorkers() { + // Background marking is performed by per-P G's. Ensure that each P has + // a background GC G. + // + // Worker Gs don't exit if gomaxprocs is reduced. If it is raised + // again, we can reuse the old workers; no need to create new workers. + for gcBgMarkWorkerCount < gomaxprocs { + go gcBgMarkWorker() + + notetsleepg(&work.bgMarkReady, -1) + noteclear(&work.bgMarkReady) + // The worker is now guaranteed to be added to the pool before + // its P's next findRunnableGCWorker. + + gcBgMarkWorkerCount++ + } +} + +// gcBgMarkPrepare sets up state for background marking. +// Mutator assists must not yet be enabled. +func gcBgMarkPrepare() { + // Background marking will stop when the work queues are empty + // and there are no more workers (note that, since this is + // concurrent, this may be a transient state, but mark + // termination will clean it up). Between background workers + // and assists, we don't really know how many workers there + // will be, so we pretend to have an arbitrarily large number + // of workers, almost all of which are "waiting". While a + // worker is working it decrements nwait. If nproc == nwait, + // there are no workers. + work.nproc = ^uint32(0) + work.nwait = ^uint32(0) +} + +// gcBgMarkWorkerNode is an entry in the gcBgMarkWorkerPool. It points to a single +// gcBgMarkWorker goroutine. +type gcBgMarkWorkerNode struct { + // Unused workers are managed in a lock-free stack. This field must be first. + node lfnode + + // The g of this worker. + gp guintptr + + // Release this m on park. This is used to communicate with the unlock + // function, which cannot access the G's stack. It is unused outside of + // gcBgMarkWorker(). + m muintptr +} + +func gcBgMarkWorker() { + gp := getg() + + // We pass node to a gopark unlock function, so it can't be on + // the stack (see gopark). Prevent deadlock from recursively + // starting GC by disabling preemption. + gp.m.preemptoff = "GC worker init" + node := new(gcBgMarkWorkerNode) + gp.m.preemptoff = "" + + node.gp.set(gp) + + node.m.set(acquirem()) + notewakeup(&work.bgMarkReady) + // After this point, the background mark worker is generally scheduled + // cooperatively by gcController.findRunnableGCWorker. While performing + // work on the P, preemption is disabled because we are working on + // P-local work buffers. When the preempt flag is set, this puts itself + // into _Gwaiting to be woken up by gcController.findRunnableGCWorker + // at the appropriate time. + // + // When preemption is enabled (e.g., while in gcMarkDone), this worker + // may be preempted and schedule as a _Grunnable G from a runq. That is + // fine; it will eventually gopark again for further scheduling via + // findRunnableGCWorker. + // + // Since we disable preemption before notifying bgMarkReady, we + // guarantee that this G will be in the worker pool for the next + // findRunnableGCWorker. This isn't strictly necessary, but it reduces + // latency between _GCmark starting and the workers starting. + + for { + // Go to sleep until woken by + // gcController.findRunnableGCWorker. + gopark(func(g *g, nodep unsafe.Pointer) bool { + node := (*gcBgMarkWorkerNode)(nodep) + + if mp := node.m.ptr(); mp != nil { + // The worker G is no longer running; release + // the M. + // + // N.B. it is _safe_ to release the M as soon + // as we are no longer performing P-local mark + // work. + // + // However, since we cooperatively stop work + // when gp.preempt is set, if we releasem in + // the loop then the following call to gopark + // would immediately preempt the G. This is + // also safe, but inefficient: the G must + // schedule again only to enter gopark and park + // again. Thus, we defer the release until + // after parking the G. + releasem(mp) + } + + // Release this G to the pool. + gcBgMarkWorkerPool.push(&node.node) + // Note that at this point, the G may immediately be + // rescheduled and may be running. + return true + }, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceBlockSystemGoroutine, 0) + + // Preemption must not occur here, or another G might see + // p.gcMarkWorkerMode. + + // Disable preemption so we can use the gcw. If the + // scheduler wants to preempt us, we'll stop draining, + // dispose the gcw, and then preempt. + node.m.set(acquirem()) + pp := gp.m.p.ptr() // P can't change with preemption disabled. + + if gcBlackenEnabled == 0 { + println("worker mode", pp.gcMarkWorkerMode) + throw("gcBgMarkWorker: blackening not enabled") + } + + if pp.gcMarkWorkerMode == gcMarkWorkerNotWorker { + throw("gcBgMarkWorker: mode not set") + } + + startTime := nanotime() + pp.gcMarkWorkerStartTime = startTime + var trackLimiterEvent bool + if pp.gcMarkWorkerMode == gcMarkWorkerIdleMode { + trackLimiterEvent = pp.limiterEvent.start(limiterEventIdleMarkWork, startTime) + } + + decnwait := atomic.Xadd(&work.nwait, -1) + if decnwait == work.nproc { + println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) + throw("work.nwait was > work.nproc") + } + + systemstack(func() { + // Mark our goroutine preemptible so its stack + // can be scanned. This lets two mark workers + // scan each other (otherwise, they would + // deadlock). We must not modify anything on + // the G stack. However, stack shrinking is + // disabled for mark workers, so it is safe to + // read from the G stack. + // + // N.B. The execution tracer is not aware of this status + // transition and handles it specially based on the + // wait reason. + casGToWaiting(gp, _Grunning, waitReasonGCWorkerActive) + switch pp.gcMarkWorkerMode { + default: + throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") + case gcMarkWorkerDedicatedMode: + gcDrainMarkWorkerDedicated(&pp.gcw, true) + if gp.preempt { + // We were preempted. This is + // a useful signal to kick + // everything out of the run + // queue so it can run + // somewhere else. + if drainQ, n := runqdrain(pp); n > 0 { + lock(&sched.lock) + globrunqputbatch(&drainQ, int32(n)) + unlock(&sched.lock) + } + } + // Go back to draining, this time + // without preemption. + gcDrainMarkWorkerDedicated(&pp.gcw, false) + case gcMarkWorkerFractionalMode: + gcDrainMarkWorkerFractional(&pp.gcw) + case gcMarkWorkerIdleMode: + gcDrainMarkWorkerIdle(&pp.gcw) + } + casgstatus(gp, _Gwaiting, _Grunning) + }) + + // Account for time and mark us as stopped. + now := nanotime() + duration := now - startTime + gcController.markWorkerStop(pp.gcMarkWorkerMode, duration) + if trackLimiterEvent { + pp.limiterEvent.stop(limiterEventIdleMarkWork, now) + } + if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode { + atomic.Xaddint64(&pp.gcFractionalMarkTime, duration) + } + + // Was this the last worker and did we run out + // of work? + incnwait := atomic.Xadd(&work.nwait, +1) + if incnwait > work.nproc { + println("runtime: p.gcMarkWorkerMode=", pp.gcMarkWorkerMode, + "work.nwait=", incnwait, "work.nproc=", work.nproc) + throw("work.nwait > work.nproc") + } + + // We'll releasem after this point and thus this P may run + // something else. We must clear the worker mode to avoid + // attributing the mode to a different (non-worker) G in + // traceGoStart. + pp.gcMarkWorkerMode = gcMarkWorkerNotWorker + + // If this worker reached a background mark completion + // point, signal the main GC goroutine. + if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { + // We don't need the P-local buffers here, allow + // preemption because we may schedule like a regular + // goroutine in gcMarkDone (block on locks, etc). + releasem(node.m.ptr()) + node.m.set(nil) + + gcMarkDone() + } + } +} + +// gcMarkWorkAvailable reports whether executing a mark worker +// on p is potentially useful. p may be nil, in which case it only +// checks the global sources of work. +func gcMarkWorkAvailable(p *p) bool { + if p != nil && !p.gcw.empty() { + return true + } + if !work.full.empty() { + return true // global work available + } + if work.markrootNext < work.markrootJobs { + return true // root scan work available + } + return false +} + +// gcMark runs the mark (or, for concurrent GC, mark termination) +// All gcWork caches must be empty. +// STW is in effect at this point. +func gcMark(startTime int64) { + if debug.allocfreetrace > 0 { + tracegc() + } + + if gcphase != _GCmarktermination { + throw("in gcMark expecting to see gcphase as _GCmarktermination") + } + work.tstart = startTime + + // Check that there's no marking work remaining. + if work.full != 0 || work.markrootNext < work.markrootJobs { + print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") + panic("non-empty mark queue after concurrent mark") + } + + if debug.gccheckmark > 0 { + // This is expensive when there's a large number of + // Gs, so only do it if checkmark is also enabled. + gcMarkRootCheck() + } + + // Drop allg snapshot. allgs may have grown, in which case + // this is the only reference to the old backing store and + // there's no need to keep it around. + work.stackRoots = nil + + // Clear out buffers and double-check that all gcWork caches + // are empty. This should be ensured by gcMarkDone before we + // enter mark termination. + // + // TODO: We could clear out buffers just before mark if this + // has a non-negligible impact on STW time. + for _, p := range allp { + // The write barrier may have buffered pointers since + // the gcMarkDone barrier. However, since the barrier + // ensured all reachable objects were marked, all of + // these must be pointers to black objects. Hence we + // can just discard the write barrier buffer. + if debug.gccheckmark > 0 { + // For debugging, flush the buffer and make + // sure it really was all marked. + wbBufFlush1(p) + } else { + p.wbBuf.reset() + } + + gcw := &p.gcw + if !gcw.empty() { + printlock() + print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork) + if gcw.wbuf1 == nil { + print(" wbuf1=") + } else { + print(" wbuf1.n=", gcw.wbuf1.nobj) + } + if gcw.wbuf2 == nil { + print(" wbuf2=") + } else { + print(" wbuf2.n=", gcw.wbuf2.nobj) + } + print("\n") + throw("P has cached GC work at end of mark termination") + } + // There may still be cached empty buffers, which we + // need to flush since we're going to free them. Also, + // there may be non-zero stats because we allocated + // black after the gcMarkDone barrier. + gcw.dispose() + } + + // Flush scanAlloc from each mcache since we're about to modify + // heapScan directly. If we were to flush this later, then scanAlloc + // might have incorrect information. + // + // Note that it's not important to retain this information; we know + // exactly what heapScan is at this point via scanWork. + for _, p := range allp { + c := p.mcache + if c == nil { + continue + } + c.scanAlloc = 0 + } + + // Reset controller state. + gcController.resetLive(work.bytesMarked) +} + +// gcSweep must be called on the system stack because it acquires the heap +// lock. See mheap for details. +// +// Returns true if the heap was fully swept by this function. +// +// The world must be stopped. +// +//go:systemstack +func gcSweep(mode gcMode) bool { + assertWorldStopped() + + if gcphase != _GCoff { + throw("gcSweep being done but phase is not GCoff") + } + + lock(&mheap_.lock) + mheap_.sweepgen += 2 + sweep.active.reset() + mheap_.pagesSwept.Store(0) + mheap_.sweepArenas = mheap_.allArenas + mheap_.reclaimIndex.Store(0) + mheap_.reclaimCredit.Store(0) + unlock(&mheap_.lock) + + sweep.centralIndex.clear() + + if !concurrentSweep || mode == gcForceBlockMode { + // Special case synchronous sweep. + // Record that no proportional sweeping has to happen. + lock(&mheap_.lock) + mheap_.sweepPagesPerByte = 0 + unlock(&mheap_.lock) + // Flush all mcaches. + for _, pp := range allp { + pp.mcache.prepareForSweep() + } + // Sweep all spans eagerly. + for sweepone() != ^uintptr(0) { + } + // Free workbufs eagerly. + prepareFreeWorkbufs() + for freeSomeWbufs(false) { + } + // All "free" events for this mark/sweep cycle have + // now happened, so we can make this profile cycle + // available immediately. + mProf_NextCycle() + mProf_Flush() + return true + } + + // Background sweep. + lock(&sweep.lock) + if sweep.parked { + sweep.parked = false + ready(sweep.g, 0, true) + } + unlock(&sweep.lock) + return false +} + +// gcResetMarkState resets global state prior to marking (concurrent +// or STW) and resets the stack scan state of all Gs. +// +// This is safe to do without the world stopped because any Gs created +// during or after this will start out in the reset state. +// +// gcResetMarkState must be called on the system stack because it acquires +// the heap lock. See mheap for details. +// +//go:systemstack +func gcResetMarkState() { + // This may be called during a concurrent phase, so lock to make sure + // allgs doesn't change. + forEachG(func(gp *g) { + gp.gcscandone = false // set to true in gcphasework + gp.gcAssistBytes = 0 + }) + + // Clear page marks. This is just 1MB per 64GB of heap, so the + // time here is pretty trivial. + lock(&mheap_.lock) + arenas := mheap_.allArenas + unlock(&mheap_.lock) + for _, ai := range arenas { + ha := mheap_.arenas[ai.l1()][ai.l2()] + for i := range ha.pageMarks { + ha.pageMarks[i] = 0 + } + } + + work.bytesMarked = 0 + work.initialHeapLive = gcController.heapLive.Load() +} + +// Hooks for other packages + +var poolcleanup func() +var boringCaches []unsafe.Pointer // for crypto/internal/boring + +//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup +func sync_runtime_registerPoolCleanup(f func()) { + poolcleanup = f +} + +//go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache +func boring_registerCache(p unsafe.Pointer) { + boringCaches = append(boringCaches, p) +} + +func clearpools() { + // clear sync.Pools + if poolcleanup != nil { + poolcleanup() + } + + // clear boringcrypto caches + for _, p := range boringCaches { + atomicstorep(p, nil) + } + + // Clear central sudog cache. + // Leave per-P caches alone, they have strictly bounded size. + // Disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + lock(&sched.sudoglock) + var sg, sgnext *sudog + for sg = sched.sudogcache; sg != nil; sg = sgnext { + sgnext = sg.next + sg.next = nil + } + sched.sudogcache = nil + unlock(&sched.sudoglock) + + // Clear central defer pool. + // Leave per-P pools alone, they have strictly bounded size. + lock(&sched.deferlock) + // disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + var d, dlink *_defer + for d = sched.deferpool; d != nil; d = dlink { + dlink = d.link + d.link = nil + } + sched.deferpool = nil + unlock(&sched.deferlock) +} + +// Timing + +// itoaDiv formats val/(10**dec) into buf. +func itoaDiv(buf []byte, val uint64, dec int) []byte { + i := len(buf) - 1 + idec := i - dec + for val >= 10 || i >= idec { + buf[i] = byte(val%10 + '0') + i-- + if i == idec { + buf[i] = '.' + i-- + } + val /= 10 + } + buf[i] = byte(val + '0') + return buf[i:] +} + +// fmtNSAsMS nicely formats ns nanoseconds as milliseconds. +func fmtNSAsMS(buf []byte, ns uint64) []byte { + if ns >= 10e6 { + // Format as whole milliseconds. + return itoaDiv(buf, ns/1e6, 0) + } + // Format two digits of precision, with at most three decimal places. + x := ns / 1e3 + if x == 0 { + buf[0] = '0' + return buf[:1] + } + dec := 3 + for x >= 100 { + x /= 10 + dec-- + } + return itoaDiv(buf, x, dec) +} + +// Helpers for testing GC. + +// gcTestMoveStackOnNextCall causes the stack to be moved on a call +// immediately following the call to this. It may not work correctly +// if any other work appears after this call (such as returning). +// Typically the following call should be marked go:noinline so it +// performs a stack check. +// +// In rare cases this may not cause the stack to move, specifically if +// there's a preemption between this call and the next. +func gcTestMoveStackOnNextCall() { + gp := getg() + gp.stackguard0 = stackForceMove +} + +// gcTestIsReachable performs a GC and returns a bit set where bit i +// is set if ptrs[i] is reachable. +func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) { + // This takes the pointers as unsafe.Pointers in order to keep + // them live long enough for us to attach specials. After + // that, we drop our references to them. + + if len(ptrs) > 64 { + panic("too many pointers for uint64 mask") + } + + // Block GC while we attach specials and drop our references + // to ptrs. Otherwise, if a GC is in progress, it could mark + // them reachable via this function before we have a chance to + // drop them. + semacquire(&gcsema) + + // Create reachability specials for ptrs. + specials := make([]*specialReachable, len(ptrs)) + for i, p := range ptrs { + lock(&mheap_.speciallock) + s := (*specialReachable)(mheap_.specialReachableAlloc.alloc()) + unlock(&mheap_.speciallock) + s.special.kind = _KindSpecialReachable + if !addspecial(p, &s.special) { + throw("already have a reachable special (duplicate pointer?)") + } + specials[i] = s + // Make sure we don't retain ptrs. + ptrs[i] = nil + } + + semrelease(&gcsema) + + // Force a full GC and sweep. + GC() + + // Process specials. + for i, s := range specials { + if !s.done { + printlock() + println("runtime: object", i, "was not swept") + throw("IsReachable failed") + } + if s.reachable { + mask |= 1 << i + } + lock(&mheap_.speciallock) + mheap_.specialReachableAlloc.free(unsafe.Pointer(s)) + unlock(&mheap_.speciallock) + } + + return mask +} + +// gcTestPointerClass returns the category of what p points to, one of: +// "heap", "stack", "data", "bss", "other". This is useful for checking +// that a test is doing what it's intended to do. +// +// This is nosplit simply to avoid extra pointer shuffling that may +// complicate a test. +// +//go:nosplit +func gcTestPointerClass(p unsafe.Pointer) string { + p2 := uintptr(noescape(p)) + gp := getg() + if gp.stack.lo <= p2 && p2 < gp.stack.hi { + return "stack" + } + if base, _, _ := findObject(p2, 0, 0); base != 0 { + return "heap" + } + for _, datap := range activeModules() { + if datap.data <= p2 && p2 < datap.edata || datap.noptrdata <= p2 && p2 < datap.enoptrdata { + return "data" + } + if datap.bss <= p2 && p2 < datap.ebss || datap.noptrbss <= p2 && p2 <= datap.enoptrbss { + return "bss" + } + } + KeepAlive(p) + return "other" +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgclimit.go b/platform/dbops/binaries/go/go/src/runtime/mgclimit.go new file mode 100644 index 0000000000000000000000000000000000000000..ef3cc081cec3d2fce215ac7f104994c56573f98e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgclimit.go @@ -0,0 +1,484 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "runtime/internal/atomic" + +// gcCPULimiter is a mechanism to limit GC CPU utilization in situations +// where it might become excessive and inhibit application progress (e.g. +// a death spiral). +// +// The core of the limiter is a leaky bucket mechanism that fills with GC +// CPU time and drains with mutator time. Because the bucket fills and +// drains with time directly (i.e. without any weighting), this effectively +// sets a very conservative limit of 50%. This limit could be enforced directly, +// however, but the purpose of the bucket is to accommodate spikes in GC CPU +// utilization without hurting throughput. +// +// Note that the bucket in the leaky bucket mechanism can never go negative, +// so the GC never gets credit for a lot of CPU time spent without the GC +// running. This is intentional, as an application that stays idle for, say, +// an entire day, could build up enough credit to fail to prevent a death +// spiral the following day. The bucket's capacity is the GC's only leeway. +// +// The capacity thus also sets the window the limiter considers. For example, +// if the capacity of the bucket is 1 cpu-second, then the limiter will not +// kick in until at least 1 full cpu-second in the last 2 cpu-second window +// is spent on GC CPU time. +var gcCPULimiter gcCPULimiterState + +type gcCPULimiterState struct { + lock atomic.Uint32 + + enabled atomic.Bool + bucket struct { + // Invariants: + // - fill >= 0 + // - capacity >= 0 + // - fill <= capacity + fill, capacity uint64 + } + // overflow is the cumulative amount of GC CPU time that we tried to fill the + // bucket with but exceeded its capacity. + overflow uint64 + + // gcEnabled is an internal copy of gcBlackenEnabled that determines + // whether the limiter tracks total assist time. + // + // gcBlackenEnabled isn't used directly so as to keep this structure + // unit-testable. + gcEnabled bool + + // transitioning is true when the GC is in a STW and transitioning between + // the mark and sweep phases. + transitioning bool + + // assistTimePool is the accumulated assist time since the last update. + assistTimePool atomic.Int64 + + // idleMarkTimePool is the accumulated idle mark time since the last update. + idleMarkTimePool atomic.Int64 + + // idleTimePool is the accumulated time Ps spent on the idle list since the last update. + idleTimePool atomic.Int64 + + // lastUpdate is the nanotime timestamp of the last time update was called. + // + // Updated under lock, but may be read concurrently. + lastUpdate atomic.Int64 + + // lastEnabledCycle is the GC cycle that last had the limiter enabled. + lastEnabledCycle atomic.Uint32 + + // nprocs is an internal copy of gomaxprocs, used to determine total available + // CPU time. + // + // gomaxprocs isn't used directly so as to keep this structure unit-testable. + nprocs int32 + + // test indicates whether this instance of the struct was made for testing purposes. + test bool +} + +// limiting returns true if the CPU limiter is currently enabled, meaning the Go GC +// should take action to limit CPU utilization. +// +// It is safe to call concurrently with other operations. +func (l *gcCPULimiterState) limiting() bool { + return l.enabled.Load() +} + +// startGCTransition notifies the limiter of a GC transition. +// +// This call takes ownership of the limiter and disables all other means of +// updating the limiter. Release ownership by calling finishGCTransition. +// +// It is safe to call concurrently with other operations. +func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64) { + if !l.tryLock() { + // This must happen during a STW, so we can't fail to acquire the lock. + // If we did, something went wrong. Throw. + throw("failed to acquire lock to start a GC transition") + } + if l.gcEnabled == enableGC { + throw("transitioning GC to the same state as before?") + } + // Flush whatever was left between the last update and now. + l.updateLocked(now) + l.gcEnabled = enableGC + l.transitioning = true + // N.B. finishGCTransition releases the lock. + // + // We don't release here to increase the chance that if there's a failure + // to finish the transition, that we throw on failing to acquire the lock. +} + +// finishGCTransition notifies the limiter that the GC transition is complete +// and releases ownership of it. It also accumulates STW time in the bucket. +// now must be the timestamp from the end of the STW pause. +func (l *gcCPULimiterState) finishGCTransition(now int64) { + if !l.transitioning { + throw("finishGCTransition called without starting one?") + } + // Count the full nprocs set of CPU time because the world is stopped + // between startGCTransition and finishGCTransition. Even though the GC + // isn't running on all CPUs, it is preventing user code from doing so, + // so it might as well be. + if lastUpdate := l.lastUpdate.Load(); now >= lastUpdate { + l.accumulate(0, (now-lastUpdate)*int64(l.nprocs)) + } + l.lastUpdate.Store(now) + l.transitioning = false + l.unlock() +} + +// gcCPULimiterUpdatePeriod dictates the maximum amount of wall-clock time +// we can go before updating the limiter. +const gcCPULimiterUpdatePeriod = 10e6 // 10ms + +// needUpdate returns true if the limiter's maximum update period has been +// exceeded, and so would benefit from an update. +func (l *gcCPULimiterState) needUpdate(now int64) bool { + return now-l.lastUpdate.Load() > gcCPULimiterUpdatePeriod +} + +// addAssistTime notifies the limiter of additional assist time. It will be +// included in the next update. +func (l *gcCPULimiterState) addAssistTime(t int64) { + l.assistTimePool.Add(t) +} + +// addIdleTime notifies the limiter of additional time a P spent on the idle list. It will be +// subtracted from the total CPU time in the next update. +func (l *gcCPULimiterState) addIdleTime(t int64) { + l.idleTimePool.Add(t) +} + +// update updates the bucket given runtime-specific information. now is the +// current monotonic time in nanoseconds. +// +// This is safe to call concurrently with other operations, except *GCTransition. +func (l *gcCPULimiterState) update(now int64) { + if !l.tryLock() { + // We failed to acquire the lock, which means something else is currently + // updating. Just drop our update, the next one to update will include + // our total assist time. + return + } + if l.transitioning { + throw("update during transition") + } + l.updateLocked(now) + l.unlock() +} + +// updateLocked is the implementation of update. l.lock must be held. +func (l *gcCPULimiterState) updateLocked(now int64) { + lastUpdate := l.lastUpdate.Load() + if now < lastUpdate { + // Defensively avoid overflow. This isn't even the latest update anyway. + return + } + windowTotalTime := (now - lastUpdate) * int64(l.nprocs) + l.lastUpdate.Store(now) + + // Drain the pool of assist time. + assistTime := l.assistTimePool.Load() + if assistTime != 0 { + l.assistTimePool.Add(-assistTime) + } + + // Drain the pool of idle time. + idleTime := l.idleTimePool.Load() + if idleTime != 0 { + l.idleTimePool.Add(-idleTime) + } + + if !l.test { + // Consume time from in-flight events. Make sure we're not preemptible so allp can't change. + // + // The reason we do this instead of just waiting for those events to finish and push updates + // is to ensure that all the time we're accounting for happened sometime between lastUpdate + // and now. This dramatically simplifies reasoning about the limiter because we're not at + // risk of extra time being accounted for in this window than actually happened in this window, + // leading to all sorts of weird transient behavior. + mp := acquirem() + for _, pp := range allp { + typ, duration := pp.limiterEvent.consume(now) + switch typ { + case limiterEventIdleMarkWork: + fallthrough + case limiterEventIdle: + idleTime += duration + sched.idleTime.Add(duration) + case limiterEventMarkAssist: + fallthrough + case limiterEventScavengeAssist: + assistTime += duration + case limiterEventNone: + break + default: + throw("invalid limiter event type found") + } + } + releasem(mp) + } + + // Compute total GC time. + windowGCTime := assistTime + if l.gcEnabled { + windowGCTime += int64(float64(windowTotalTime) * gcBackgroundUtilization) + } + + // Subtract out all idle time from the total time. Do this after computing + // GC time, because the background utilization is dependent on the *real* + // total time, not the total time after idle time is subtracted. + // + // Idle time is counted as any time that a P is on the P idle list plus idle mark + // time. Idle mark workers soak up time that the application spends idle. + // + // On a heavily undersubscribed system, any additional idle time can skew GC CPU + // utilization, because the GC might be executing continuously and thrashing, + // yet the CPU utilization with respect to GOMAXPROCS will be quite low, so + // the limiter fails to turn on. By subtracting idle time, we're removing time that + // we know the application was idle giving a more accurate picture of whether + // the GC is thrashing. + // + // Note that this can cause the limiter to turn on even if it's not needed. For + // instance, on a system with 32 Ps but only 1 running goroutine, each GC will have + // 8 dedicated GC workers. Assuming the GC cycle is half mark phase and half sweep + // phase, then the GC CPU utilization over that cycle, with idle time removed, will + // be 8/(8+2) = 80%. Even though the limiter turns on, though, assist should be + // unnecessary, as the GC has way more CPU time to outpace the 1 goroutine that's + // running. + windowTotalTime -= idleTime + + l.accumulate(windowTotalTime-windowGCTime, windowGCTime) +} + +// accumulate adds time to the bucket and signals whether the limiter is enabled. +// +// This is an internal function that deals just with the bucket. Prefer update. +// l.lock must be held. +func (l *gcCPULimiterState) accumulate(mutatorTime, gcTime int64) { + headroom := l.bucket.capacity - l.bucket.fill + enabled := headroom == 0 + + // Let's be careful about three things here: + // 1. The addition and subtraction, for the invariants. + // 2. Overflow. + // 3. Excessive mutation of l.enabled, which is accessed + // by all assists, potentially more than once. + change := gcTime - mutatorTime + + // Handle limiting case. + if change > 0 && headroom <= uint64(change) { + l.overflow += uint64(change) - headroom + l.bucket.fill = l.bucket.capacity + if !enabled { + l.enabled.Store(true) + l.lastEnabledCycle.Store(memstats.numgc + 1) + } + return + } + + // Handle non-limiting cases. + if change < 0 && l.bucket.fill <= uint64(-change) { + // Bucket emptied. + l.bucket.fill = 0 + } else { + // All other cases. + l.bucket.fill -= uint64(-change) + } + if change != 0 && enabled { + l.enabled.Store(false) + } +} + +// tryLock attempts to lock l. Returns true on success. +func (l *gcCPULimiterState) tryLock() bool { + return l.lock.CompareAndSwap(0, 1) +} + +// unlock releases the lock on l. Must be called if tryLock returns true. +func (l *gcCPULimiterState) unlock() { + old := l.lock.Swap(0) + if old != 1 { + throw("double unlock") + } +} + +// capacityPerProc is the limiter's bucket capacity for each P in GOMAXPROCS. +const capacityPerProc = 1e9 // 1 second in nanoseconds + +// resetCapacity updates the capacity based on GOMAXPROCS. Must not be called +// while the GC is enabled. +// +// It is safe to call concurrently with other operations. +func (l *gcCPULimiterState) resetCapacity(now int64, nprocs int32) { + if !l.tryLock() { + // This must happen during a STW, so we can't fail to acquire the lock. + // If we did, something went wrong. Throw. + throw("failed to acquire lock to reset capacity") + } + // Flush the rest of the time for this period. + l.updateLocked(now) + l.nprocs = nprocs + + l.bucket.capacity = uint64(nprocs) * capacityPerProc + if l.bucket.fill > l.bucket.capacity { + l.bucket.fill = l.bucket.capacity + l.enabled.Store(true) + l.lastEnabledCycle.Store(memstats.numgc + 1) + } else if l.bucket.fill < l.bucket.capacity { + l.enabled.Store(false) + } + l.unlock() +} + +// limiterEventType indicates the type of an event occurring on some P. +// +// These events represent the full set of events that the GC CPU limiter tracks +// to execute its function. +// +// This type may use no more than limiterEventBits bits of information. +type limiterEventType uint8 + +const ( + limiterEventNone limiterEventType = iota // None of the following events. + limiterEventIdleMarkWork // Refers to an idle mark worker (see gcMarkWorkerMode). + limiterEventMarkAssist // Refers to mark assist (see gcAssistAlloc). + limiterEventScavengeAssist // Refers to a scavenge assist (see allocSpan). + limiterEventIdle // Refers to time a P spent on the idle list. + + limiterEventBits = 3 +) + +// limiterEventTypeMask is a mask for the bits in p.limiterEventStart that represent +// the event type. The rest of the bits of that field represent a timestamp. +const ( + limiterEventTypeMask = uint64((1<> (64 - limiterEventBits)) +} + +// limiterEvent represents tracking state for an event tracked by the GC CPU limiter. +type limiterEvent struct { + stamp atomic.Uint64 // Stores a limiterEventStamp. +} + +// start begins tracking a new limiter event of the current type. If an event +// is already in flight, then a new event cannot begin because the current time is +// already being attributed to that event. In this case, this function returns false. +// Otherwise, it returns true. +// +// The caller must be non-preemptible until at least stop is called or this function +// returns false. Because this is trying to measure "on-CPU" time of some event, getting +// scheduled away during it can mean that whatever we're measuring isn't a reflection +// of "on-CPU" time. The OS could deschedule us at any time, but we want to maintain as +// close of an approximation as we can. +func (e *limiterEvent) start(typ limiterEventType, now int64) bool { + if limiterEventStamp(e.stamp.Load()).typ() != limiterEventNone { + return false + } + e.stamp.Store(uint64(makeLimiterEventStamp(typ, now))) + return true +} + +// consume acquires the partial event CPU time from any in-flight event. +// It achieves this by storing the current time as the new event time. +// +// Returns the type of the in-flight event, as well as how long it's currently been +// executing for. Returns limiterEventNone if no event is active. +func (e *limiterEvent) consume(now int64) (typ limiterEventType, duration int64) { + // Read the limiter event timestamp and update it to now. + for { + old := limiterEventStamp(e.stamp.Load()) + typ = old.typ() + if typ == limiterEventNone { + // There's no in-flight event, so just push that up. + return + } + duration = old.duration(now) + if duration == 0 { + // We might have a stale now value, or this crossed the + // 2^(64-limiterEventBits) boundary in the clock readings. + // Just ignore it. + return limiterEventNone, 0 + } + new := makeLimiterEventStamp(typ, now) + if e.stamp.CompareAndSwap(uint64(old), uint64(new)) { + break + } + } + return +} + +// stop stops the active limiter event. Throws if the +// +// The caller must be non-preemptible across the event. See start as to why. +func (e *limiterEvent) stop(typ limiterEventType, now int64) { + var stamp limiterEventStamp + for { + stamp = limiterEventStamp(e.stamp.Load()) + if stamp.typ() != typ { + print("runtime: want=", typ, " got=", stamp.typ(), "\n") + throw("limiterEvent.stop: found wrong event in p's limiter event slot") + } + if e.stamp.CompareAndSwap(uint64(stamp), uint64(limiterEventStampNone)) { + break + } + } + duration := stamp.duration(now) + if duration == 0 { + // It's possible that we're missing time because we crossed a + // 2^(64-limiterEventBits) boundary between the start and end. + // In this case, we're dropping that information. This is OK because + // at worst it'll cause a transient hiccup that will quickly resolve + // itself as all new timestamps begin on the other side of the boundary. + // Such a hiccup should be incredibly rare. + return + } + // Account for the event. + switch typ { + case limiterEventIdleMarkWork: + gcCPULimiter.addIdleTime(duration) + case limiterEventIdle: + gcCPULimiter.addIdleTime(duration) + sched.idleTime.Add(duration) + case limiterEventMarkAssist: + fallthrough + case limiterEventScavengeAssist: + gcCPULimiter.addAssistTime(duration) + default: + throw("limiterEvent.stop: invalid limiter event type found") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgclimit_test.go b/platform/dbops/binaries/go/go/src/runtime/mgclimit_test.go new file mode 100644 index 0000000000000000000000000000000000000000..124da03ef1d9f34f7099a00172f6093704ee364e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgclimit_test.go @@ -0,0 +1,255 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + . "runtime" + "testing" + "time" +) + +func TestGCCPULimiter(t *testing.T) { + const procs = 14 + + // Create mock time. + ticks := int64(0) + advance := func(d time.Duration) int64 { + t.Helper() + ticks += int64(d) + return ticks + } + + // assistTime computes the CPU time for assists using frac of GOMAXPROCS + // over the wall-clock duration d. + assistTime := func(d time.Duration, frac float64) int64 { + t.Helper() + return int64(frac * float64(d) * procs) + } + + l := NewGCCPULimiter(ticks, procs) + + // Do the whole test twice to make sure state doesn't leak across. + var baseOverflow uint64 // Track total overflow across iterations. + for i := 0; i < 2; i++ { + t.Logf("Iteration %d", i+1) + + if l.Capacity() != procs*CapacityPerProc { + t.Fatalf("unexpected capacity: %d", l.Capacity()) + } + if l.Fill() != 0 { + t.Fatalf("expected empty bucket to start") + } + + // Test filling the bucket with just mutator time. + + l.Update(advance(10 * time.Millisecond)) + l.Update(advance(1 * time.Second)) + l.Update(advance(1 * time.Hour)) + if l.Fill() != 0 { + t.Fatalf("expected empty bucket from only accumulating mutator time, got fill of %d cpu-ns", l.Fill()) + } + + // Test needUpdate. + + if l.NeedUpdate(advance(GCCPULimiterUpdatePeriod / 2)) { + t.Fatal("need update even though updated half a period ago") + } + if !l.NeedUpdate(advance(GCCPULimiterUpdatePeriod)) { + t.Fatal("doesn't need update even though updated 1.5 periods ago") + } + l.Update(advance(0)) + if l.NeedUpdate(advance(0)) { + t.Fatal("need update even though just updated") + } + + // Test transitioning the bucket to enable the GC. + + l.StartGCTransition(true, advance(109*time.Millisecond)) + l.FinishGCTransition(advance(2*time.Millisecond + 1*time.Microsecond)) + + if expect := uint64((2*time.Millisecond + 1*time.Microsecond) * procs); l.Fill() != expect { + t.Fatalf("expected fill of %d, got %d cpu-ns", expect, l.Fill()) + } + + // Test passing time without assists during a GC. Specifically, just enough to drain the bucket to + // exactly procs nanoseconds (easier to get to because of rounding). + // + // The window we need to drain the bucket is 1/(1-2*gcBackgroundUtilization) times the current fill: + // + // fill + (window * procs * gcBackgroundUtilization - window * procs * (1-gcBackgroundUtilization)) = n + // fill = n - (window * procs * gcBackgroundUtilization - window * procs * (1-gcBackgroundUtilization)) + // fill = n + window * procs * ((1-gcBackgroundUtilization) - gcBackgroundUtilization) + // fill = n + window * procs * (1-2*gcBackgroundUtilization) + // window = (fill - n) / (procs * (1-2*gcBackgroundUtilization))) + // + // And here we want n=procs: + factor := (1 / (1 - 2*GCBackgroundUtilization)) + fill := (2*time.Millisecond + 1*time.Microsecond) * procs + l.Update(advance(time.Duration(factor * float64(fill-procs) / procs))) + if l.Fill() != procs { + t.Fatalf("expected fill %d cpu-ns from draining after a GC started, got fill of %d cpu-ns", procs, l.Fill()) + } + + // Drain to zero for the rest of the test. + l.Update(advance(2 * procs * CapacityPerProc)) + if l.Fill() != 0 { + t.Fatalf("expected empty bucket from draining, got fill of %d cpu-ns", l.Fill()) + } + + // Test filling up the bucket with 50% total GC work (so, not moving the bucket at all). + l.AddAssistTime(assistTime(10*time.Millisecond, 0.5-GCBackgroundUtilization)) + l.Update(advance(10 * time.Millisecond)) + if l.Fill() != 0 { + t.Fatalf("expected empty bucket from 50%% GC work, got fill of %d cpu-ns", l.Fill()) + } + + // Test adding to the bucket overall with 100% GC work. + l.AddAssistTime(assistTime(time.Millisecond, 1.0-GCBackgroundUtilization)) + l.Update(advance(time.Millisecond)) + if expect := uint64(procs * time.Millisecond); l.Fill() != expect { + t.Errorf("expected %d fill from 100%% GC CPU, got fill of %d cpu-ns", expect, l.Fill()) + } + if l.Limiting() { + t.Errorf("limiter is enabled after filling bucket but shouldn't be") + } + if t.Failed() { + t.FailNow() + } + + // Test filling the bucket exactly full. + l.AddAssistTime(assistTime(CapacityPerProc-time.Millisecond, 1.0-GCBackgroundUtilization)) + l.Update(advance(CapacityPerProc - time.Millisecond)) + if l.Fill() != l.Capacity() { + t.Errorf("expected bucket filled to capacity %d, got %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is not enabled after filling bucket but should be") + } + if l.Overflow() != 0+baseOverflow { + t.Errorf("bucket filled exactly should not have overflow, found %d", l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Test adding with a delta of exactly zero. That is, GC work is exactly 50% of all resources. + // Specifically, the limiter should still be on, and no overflow should accumulate. + l.AddAssistTime(assistTime(1*time.Second, 0.5-GCBackgroundUtilization)) + l.Update(advance(1 * time.Second)) + if l.Fill() != l.Capacity() { + t.Errorf("expected bucket filled to capacity %d, got %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is not enabled after filling bucket but should be") + } + if l.Overflow() != 0+baseOverflow { + t.Errorf("bucket filled exactly should not have overflow, found %d", l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Drain the bucket by half. + l.AddAssistTime(assistTime(CapacityPerProc, 0)) + l.Update(advance(CapacityPerProc)) + if expect := l.Capacity() / 2; l.Fill() != expect { + t.Errorf("failed to drain to %d, got fill %d", expect, l.Fill()) + } + if l.Limiting() { + t.Errorf("limiter is enabled after draining bucket but shouldn't be") + } + if t.Failed() { + t.FailNow() + } + + // Test overfilling the bucket. + l.AddAssistTime(assistTime(CapacityPerProc, 1.0-GCBackgroundUtilization)) + l.Update(advance(CapacityPerProc)) + if l.Fill() != l.Capacity() { + t.Errorf("failed to fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is not enabled after overfill but should be") + } + if expect := uint64(CapacityPerProc * procs / 2); l.Overflow() != expect+baseOverflow { + t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Test ending the cycle with some assists left over. + l.AddAssistTime(assistTime(1*time.Millisecond, 1.0-GCBackgroundUtilization)) + l.StartGCTransition(false, advance(1*time.Millisecond)) + if l.Fill() != l.Capacity() { + t.Errorf("failed to maintain fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is not enabled after overfill but should be") + } + if expect := uint64((CapacityPerProc/2 + time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { + t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Make sure the STW adds to the bucket. + l.FinishGCTransition(advance(5 * time.Millisecond)) + if l.Fill() != l.Capacity() { + t.Errorf("failed to maintain fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is not enabled after overfill but should be") + } + if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { + t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Resize procs up and make sure limiting stops. + expectFill := l.Capacity() + l.ResetCapacity(advance(0), procs+10) + if l.Fill() != expectFill { + t.Errorf("failed to maintain fill at old capacity %d, got fill %d", expectFill, l.Fill()) + } + if l.Limiting() { + t.Errorf("limiter is enabled after resetting capacity higher") + } + if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { + t.Errorf("bucket overflow %d should have remained constant, found %d", expect, l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Resize procs down and make sure limiting begins again. + // Also make sure resizing doesn't affect overflow. This isn't + // a case where we want to report overflow, because we're not + // actively doing work to achieve it. It's that we have fewer + // CPU resources now. + l.ResetCapacity(advance(0), procs-10) + if l.Fill() != l.Capacity() { + t.Errorf("failed lower fill to new capacity %d, got fill %d", l.Capacity(), l.Fill()) + } + if !l.Limiting() { + t.Errorf("limiter is disabled after resetting capacity lower") + } + if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { + t.Errorf("bucket overflow %d should have remained constant, found %d", expect, l.Overflow()) + } + if t.Failed() { + t.FailNow() + } + + // Get back to a zero state. The top of the loop will double check. + l.ResetCapacity(advance(CapacityPerProc*procs), procs) + + // Track total overflow for future iterations. + baseOverflow += uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcmark.go b/platform/dbops/binaries/go/go/src/runtime/mgcmark.go new file mode 100644 index 0000000000000000000000000000000000000000..b515568eb003e4985ebecf26147cce55b0879326 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcmark.go @@ -0,0 +1,1755 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: marking and scanning + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "internal/goexperiment" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const ( + fixedRootFinalizers = iota + fixedRootFreeGStacks + fixedRootCount + + // rootBlockBytes is the number of bytes to scan per data or + // BSS root. + rootBlockBytes = 256 << 10 + + // maxObletBytes is the maximum bytes of an object to scan at + // once. Larger objects will be split up into "oblets" of at + // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds + // scan preemption at ~100 µs. + // + // This must be > _MaxSmallSize so that the object base is the + // span base. + maxObletBytes = 128 << 10 + + // drainCheckThreshold specifies how many units of work to do + // between self-preemption checks in gcDrain. Assuming a scan + // rate of 1 MB/ms, this is ~100 µs. Lower values have higher + // overhead in the scan loop (the scheduler check may perform + // a syscall, so its overhead is nontrivial). Higher values + // make the system less responsive to incoming work. + drainCheckThreshold = 100000 + + // pagesPerSpanRoot indicates how many pages to scan from a span root + // at a time. Used by special root marking. + // + // Higher values improve throughput by increasing locality, but + // increase the minimum latency of a marking operation. + // + // Must be a multiple of the pageInUse bitmap element size and + // must also evenly divide pagesPerArena. + pagesPerSpanRoot = 512 +) + +// gcMarkRootPrepare queues root scanning jobs (stacks, globals, and +// some miscellany) and initializes scanning-related state. +// +// The world must be stopped. +func gcMarkRootPrepare() { + assertWorldStopped() + + // Compute how many data and BSS root blocks there are. + nBlocks := func(bytes uintptr) int { + return int(divRoundUp(bytes, rootBlockBytes)) + } + + work.nDataRoots = 0 + work.nBSSRoots = 0 + + // Scan globals. + for _, datap := range activeModules() { + nDataRoots := nBlocks(datap.edata - datap.data) + if nDataRoots > work.nDataRoots { + work.nDataRoots = nDataRoots + } + } + + for _, datap := range activeModules() { + nBSSRoots := nBlocks(datap.ebss - datap.bss) + if nBSSRoots > work.nBSSRoots { + work.nBSSRoots = nBSSRoots + } + } + + // Scan span roots for finalizer specials. + // + // We depend on addfinalizer to mark objects that get + // finalizers after root marking. + // + // We're going to scan the whole heap (that was available at the time the + // mark phase started, i.e. markArenas) for in-use spans which have specials. + // + // Break up the work into arenas, and further into chunks. + // + // Snapshot allArenas as markArenas. This snapshot is safe because allArenas + // is append-only. + mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)] + work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot) + + // Scan stacks. + // + // Gs may be created after this point, but it's okay that we + // ignore them because they begin life without any roots, so + // there's nothing to scan, and any roots they create during + // the concurrent phase will be caught by the write barrier. + work.stackRoots = allGsSnapshot() + work.nStackRoots = len(work.stackRoots) + + work.markrootNext = 0 + work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) + + // Calculate base indexes of each root type + work.baseData = uint32(fixedRootCount) + work.baseBSS = work.baseData + uint32(work.nDataRoots) + work.baseSpans = work.baseBSS + uint32(work.nBSSRoots) + work.baseStacks = work.baseSpans + uint32(work.nSpanRoots) + work.baseEnd = work.baseStacks + uint32(work.nStackRoots) +} + +// gcMarkRootCheck checks that all roots have been scanned. It is +// purely for debugging. +func gcMarkRootCheck() { + if work.markrootNext < work.markrootJobs { + print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") + throw("left over markroot jobs") + } + + // Check that stacks have been scanned. + // + // We only check the first nStackRoots Gs that we should have scanned. + // Since we don't care about newer Gs (see comment in + // gcMarkRootPrepare), no locking is required. + i := 0 + forEachGRace(func(gp *g) { + if i >= work.nStackRoots { + return + } + + if !gp.gcscandone { + println("gp", gp, "goid", gp.goid, + "status", readgstatus(gp), + "gcscandone", gp.gcscandone) + throw("scan missed a g") + } + + i++ + }) +} + +// ptrmask for an allocation containing a single pointer. +var oneptrmask = [...]uint8{1} + +// markroot scans the i'th root. +// +// Preemption must be disabled (because this uses a gcWork). +// +// Returns the amount of GC work credit produced by the operation. +// If flushBgCredit is true, then that credit is also flushed +// to the background credit pool. +// +// nowritebarrier is only advisory here. +// +//go:nowritebarrier +func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 { + // Note: if you add a case here, please also update heapdump.go:dumproots. + var workDone int64 + var workCounter *atomic.Int64 + switch { + case work.baseData <= i && i < work.baseBSS: + workCounter = &gcController.globalsScanWork + for _, datap := range activeModules() { + workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData)) + } + + case work.baseBSS <= i && i < work.baseSpans: + workCounter = &gcController.globalsScanWork + for _, datap := range activeModules() { + workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS)) + } + + case i == fixedRootFinalizers: + for fb := allfin; fb != nil; fb = fb.alllink { + cnt := uintptr(atomic.Load(&fb.cnt)) + scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) + } + + case i == fixedRootFreeGStacks: + // Switch to the system stack so we can call + // stackfree. + systemstack(markrootFreeGStacks) + + case work.baseSpans <= i && i < work.baseStacks: + // mark mspan.specials + markrootSpans(gcw, int(i-work.baseSpans)) + + default: + // the rest is scanning goroutine stacks + workCounter = &gcController.stackScanWork + if i < work.baseStacks || work.baseEnd <= i { + printlock() + print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n") + throw("markroot: bad index") + } + gp := work.stackRoots[i-work.baseStacks] + + // remember when we've first observed the G blocked + // needed only to output in traceback + status := readgstatus(gp) // We are not in a scan state + if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 { + gp.waitsince = work.tstart + } + + // scanstack must be done on the system stack in case + // we're trying to scan our own stack. + systemstack(func() { + // If this is a self-scan, put the user G in + // _Gwaiting to prevent self-deadlock. It may + // already be in _Gwaiting if this is a mark + // worker or we're in mark termination. + userG := getg().m.curg + selfScan := gp == userG && readgstatus(userG) == _Grunning + if selfScan { + casGToWaiting(userG, _Grunning, waitReasonGarbageCollectionScan) + } + + // TODO: suspendG blocks (and spins) until gp + // stops, which may take a while for + // running goroutines. Consider doing this in + // two phases where the first is non-blocking: + // we scan the stacks we can and ask running + // goroutines to scan themselves; and the + // second blocks. + stopped := suspendG(gp) + if stopped.dead { + gp.gcscandone = true + return + } + if gp.gcscandone { + throw("g already scanned") + } + workDone += scanstack(gp, gcw) + gp.gcscandone = true + resumeG(stopped) + + if selfScan { + casgstatus(userG, _Gwaiting, _Grunning) + } + }) + } + if workCounter != nil && workDone != 0 { + workCounter.Add(workDone) + if flushBgCredit { + gcFlushBgCredit(workDone) + } + } + return workDone +} + +// markrootBlock scans the shard'th shard of the block of memory [b0, +// b0+n0), with the given pointer mask. +// +// Returns the amount of work done. +// +//go:nowritebarrier +func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 { + if rootBlockBytes%(8*goarch.PtrSize) != 0 { + // This is necessary to pick byte offsets in ptrmask0. + throw("rootBlockBytes must be a multiple of 8*ptrSize") + } + + // Note that if b0 is toward the end of the address space, + // then b0 + rootBlockBytes might wrap around. + // These tests are written to avoid any possible overflow. + off := uintptr(shard) * rootBlockBytes + if off >= n0 { + return 0 + } + b := b0 + off + ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize)))) + n := uintptr(rootBlockBytes) + if off+n > n0 { + n = n0 - off + } + + // Scan this shard. + scanblock(b, n, ptrmask, gcw, nil) + return int64(n) +} + +// markrootFreeGStacks frees stacks of dead Gs. +// +// This does not free stacks of dead Gs cached on Ps, but having a few +// cached stacks around isn't a problem. +func markrootFreeGStacks() { + // Take list of dead Gs with stacks. + lock(&sched.gFree.lock) + list := sched.gFree.stack + sched.gFree.stack = gList{} + unlock(&sched.gFree.lock) + if list.empty() { + return + } + + // Free stacks. + q := gQueue{list.head, list.head} + for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { + stackfree(gp.stack) + gp.stack.lo = 0 + gp.stack.hi = 0 + // Manipulate the queue directly since the Gs are + // already all linked the right way. + q.tail.set(gp) + } + + // Put Gs back on the free list. + lock(&sched.gFree.lock) + sched.gFree.noStack.pushAll(q) + unlock(&sched.gFree.lock) +} + +// markrootSpans marks roots for one shard of markArenas. +// +//go:nowritebarrier +func markrootSpans(gcw *gcWork, shard int) { + // Objects with finalizers have two GC-related invariants: + // + // 1) Everything reachable from the object must be marked. + // This ensures that when we pass the object to its finalizer, + // everything the finalizer can reach will be retained. + // + // 2) Finalizer specials (which are not in the garbage + // collected heap) are roots. In practice, this means the fn + // field must be scanned. + sg := mheap_.sweepgen + + // Find the arena and page index into that arena for this shard. + ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)] + ha := mheap_.arenas[ai.l1()][ai.l2()] + arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena) + + // Construct slice of bitmap which we'll iterate over. + specialsbits := ha.pageSpecials[arenaPage/8:] + specialsbits = specialsbits[:pagesPerSpanRoot/8] + for i := range specialsbits { + // Find set bits, which correspond to spans with specials. + specials := atomic.Load8(&specialsbits[i]) + if specials == 0 { + continue + } + for j := uint(0); j < 8; j++ { + if specials&(1< 0 || mp.preemptoff != "" { + return + } + + // This extremely verbose boolean indicates whether we've + // entered mark assist from the perspective of the tracer. + // + // In the old tracer, this is just before we call gcAssistAlloc1 + // *and* tracing is enabled. Because the old tracer doesn't + // do any extra tracking, we need to be careful to not emit an + // "end" event if there was no corresponding "begin" for the + // mark assist. + // + // In the new tracer, this is just before we call gcAssistAlloc1 + // *regardless* of whether tracing is enabled. This is because + // the new tracer allows for tracing to begin (and advance + // generations) in the middle of a GC mark phase, so we need to + // record some state so that the tracer can pick it up to ensure + // a consistent trace result. + // + // TODO(mknyszek): Hide the details of inMarkAssist in tracer + // functions and simplify all the state tracking. This is a lot. + enteredMarkAssistForTracing := false +retry: + if gcCPULimiter.limiting() { + // If the CPU limiter is enabled, intentionally don't + // assist to reduce the amount of CPU time spent in the GC. + if enteredMarkAssistForTracing { + trace := traceAcquire() + if trace.ok() { + trace.GCMarkAssistDone() + // Set this *after* we trace the end to make sure + // that we emit an in-progress event if this is + // the first event for the goroutine in the trace + // or trace generation. Also, do this between + // acquire/release because this is part of the + // goroutine's trace state, and it must be atomic + // with respect to the tracer. + gp.inMarkAssist = false + traceRelease(trace) + } else { + // This state is tracked even if tracing isn't enabled. + // It's only used by the new tracer. + // See the comment on enteredMarkAssistForTracing. + gp.inMarkAssist = false + } + } + return + } + // Compute the amount of scan work we need to do to make the + // balance positive. When the required amount of work is low, + // we over-assist to build up credit for future allocations + // and amortize the cost of assisting. + assistWorkPerByte := gcController.assistWorkPerByte.Load() + assistBytesPerWork := gcController.assistBytesPerWork.Load() + debtBytes := -gp.gcAssistBytes + scanWork := int64(assistWorkPerByte * float64(debtBytes)) + if scanWork < gcOverAssistWork { + scanWork = gcOverAssistWork + debtBytes = int64(assistBytesPerWork * float64(scanWork)) + } + + // Steal as much credit as we can from the background GC's + // scan credit. This is racy and may drop the background + // credit below 0 if two mutators steal at the same time. This + // will just cause steals to fail until credit is accumulated + // again, so in the long run it doesn't really matter, but we + // do have to handle the negative credit case. + bgScanCredit := gcController.bgScanCredit.Load() + stolen := int64(0) + if bgScanCredit > 0 { + if bgScanCredit < scanWork { + stolen = bgScanCredit + gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen)) + } else { + stolen = scanWork + gp.gcAssistBytes += debtBytes + } + gcController.bgScanCredit.Add(-stolen) + + scanWork -= stolen + + if scanWork == 0 { + // We were able to steal all of the credit we + // needed. + if enteredMarkAssistForTracing { + trace := traceAcquire() + if trace.ok() { + trace.GCMarkAssistDone() + // Set this *after* we trace the end to make sure + // that we emit an in-progress event if this is + // the first event for the goroutine in the trace + // or trace generation. Also, do this between + // acquire/release because this is part of the + // goroutine's trace state, and it must be atomic + // with respect to the tracer. + gp.inMarkAssist = false + traceRelease(trace) + } else { + // This state is tracked even if tracing isn't enabled. + // It's only used by the new tracer. + // See the comment on enteredMarkAssistForTracing. + gp.inMarkAssist = false + } + } + return + } + } + if !enteredMarkAssistForTracing { + trace := traceAcquire() + if trace.ok() { + if !goexperiment.ExecTracer2 { + // In the old tracer, enter mark assist tracing only + // if we actually traced an event. Otherwise a goroutine + // waking up from mark assist post-GC might end up + // writing a stray "end" event. + // + // This means inMarkAssist will not be meaningful + // in the old tracer; that's OK, it's unused. + // + // See the comment on enteredMarkAssistForTracing. + enteredMarkAssistForTracing = true + } + trace.GCMarkAssistStart() + // Set this *after* we trace the start, otherwise we may + // emit an in-progress event for an assist we're about to start. + gp.inMarkAssist = true + traceRelease(trace) + } else { + gp.inMarkAssist = true + } + if goexperiment.ExecTracer2 { + // In the new tracer, set enter mark assist tracing if we + // ever pass this point, because we must manage inMarkAssist + // correctly. + // + // See the comment on enteredMarkAssistForTracing. + enteredMarkAssistForTracing = true + } + } + + // Perform assist work + systemstack(func() { + gcAssistAlloc1(gp, scanWork) + // The user stack may have moved, so this can't touch + // anything on it until it returns from systemstack. + }) + + completed := gp.param != nil + gp.param = nil + if completed { + gcMarkDone() + } + + if gp.gcAssistBytes < 0 { + // We were unable steal enough credit or perform + // enough work to pay off the assist debt. We need to + // do one of these before letting the mutator allocate + // more to prevent over-allocation. + // + // If this is because we were preempted, reschedule + // and try some more. + if gp.preempt { + Gosched() + goto retry + } + + // Add this G to an assist queue and park. When the GC + // has more background credit, it will satisfy queued + // assists before flushing to the global credit pool. + // + // Note that this does *not* get woken up when more + // work is added to the work list. The theory is that + // there wasn't enough work to do anyway, so we might + // as well let background marking take care of the + // work that is available. + if !gcParkAssist() { + goto retry + } + + // At this point either background GC has satisfied + // this G's assist debt, or the GC cycle is over. + } + if enteredMarkAssistForTracing { + trace := traceAcquire() + if trace.ok() { + trace.GCMarkAssistDone() + // Set this *after* we trace the end to make sure + // that we emit an in-progress event if this is + // the first event for the goroutine in the trace + // or trace generation. Also, do this between + // acquire/release because this is part of the + // goroutine's trace state, and it must be atomic + // with respect to the tracer. + gp.inMarkAssist = false + traceRelease(trace) + } else { + // This state is tracked even if tracing isn't enabled. + // It's only used by the new tracer. + // See the comment on enteredMarkAssistForTracing. + gp.inMarkAssist = false + } + } +} + +// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system +// stack. This is a separate function to make it easier to see that +// we're not capturing anything from the user stack, since the user +// stack may move while we're in this function. +// +// gcAssistAlloc1 indicates whether this assist completed the mark +// phase by setting gp.param to non-nil. This can't be communicated on +// the stack since it may move. +// +//go:systemstack +func gcAssistAlloc1(gp *g, scanWork int64) { + // Clear the flag indicating that this assist completed the + // mark phase. + gp.param = nil + + if atomic.Load(&gcBlackenEnabled) == 0 { + // The gcBlackenEnabled check in malloc races with the + // store that clears it but an atomic check in every malloc + // would be a performance hit. + // Instead we recheck it here on the non-preemptible system + // stack to determine if we should perform an assist. + + // GC is done, so ignore any remaining debt. + gp.gcAssistBytes = 0 + return + } + // Track time spent in this assist. Since we're on the + // system stack, this is non-preemptible, so we can + // just measure start and end time. + // + // Limiter event tracking might be disabled if we end up here + // while on a mark worker. + startTime := nanotime() + trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime) + + decnwait := atomic.Xadd(&work.nwait, -1) + if decnwait == work.nproc { + println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc) + throw("nwait > work.nprocs") + } + + // gcDrainN requires the caller to be preemptible. + casGToWaiting(gp, _Grunning, waitReasonGCAssistMarking) + + // drain own cached work first in the hopes that it + // will be more cache friendly. + gcw := &getg().m.p.ptr().gcw + workDone := gcDrainN(gcw, scanWork) + + casgstatus(gp, _Gwaiting, _Grunning) + + // Record that we did this much scan work. + // + // Back out the number of bytes of assist credit that + // this scan work counts for. The "1+" is a poor man's + // round-up, to ensure this adds credit even if + // assistBytesPerWork is very low. + assistBytesPerWork := gcController.assistBytesPerWork.Load() + gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone)) + + // If this is the last worker and we ran out of work, + // signal a completion point. + incnwait := atomic.Xadd(&work.nwait, +1) + if incnwait > work.nproc { + println("runtime: work.nwait=", incnwait, + "work.nproc=", work.nproc) + throw("work.nwait > work.nproc") + } + + if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { + // This has reached a background completion point. Set + // gp.param to a non-nil value to indicate this. It + // doesn't matter what we set it to (it just has to be + // a valid pointer). + gp.param = unsafe.Pointer(gp) + } + now := nanotime() + duration := now - startTime + pp := gp.m.p.ptr() + pp.gcAssistTime += duration + if trackLimiterEvent { + pp.limiterEvent.stop(limiterEventMarkAssist, now) + } + if pp.gcAssistTime > gcAssistTimeSlack { + gcController.assistTime.Add(pp.gcAssistTime) + gcCPULimiter.update(now) + pp.gcAssistTime = 0 + } +} + +// gcWakeAllAssists wakes all currently blocked assists. This is used +// at the end of a GC cycle. gcBlackenEnabled must be false to prevent +// new assists from going to sleep after this point. +func gcWakeAllAssists() { + lock(&work.assistQueue.lock) + list := work.assistQueue.q.popList() + injectglist(&list) + unlock(&work.assistQueue.lock) +} + +// gcParkAssist puts the current goroutine on the assist queue and parks. +// +// gcParkAssist reports whether the assist is now satisfied. If it +// returns false, the caller must retry the assist. +func gcParkAssist() bool { + lock(&work.assistQueue.lock) + // If the GC cycle finished while we were getting the lock, + // exit the assist. The cycle can't finish while we hold the + // lock. + if atomic.Load(&gcBlackenEnabled) == 0 { + unlock(&work.assistQueue.lock) + return true + } + + gp := getg() + oldList := work.assistQueue.q + work.assistQueue.q.pushBack(gp) + + // Recheck for background credit now that this G is in + // the queue, but can still back out. This avoids a + // race in case background marking has flushed more + // credit since we checked above. + if gcController.bgScanCredit.Load() > 0 { + work.assistQueue.q = oldList + if oldList.tail != 0 { + oldList.tail.ptr().schedlink.set(nil) + } + unlock(&work.assistQueue.lock) + return false + } + // Park. + goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2) + return true +} + +// gcFlushBgCredit flushes scanWork units of background scan work +// credit. This first satisfies blocked assists on the +// work.assistQueue and then flushes any remaining credit to +// gcController.bgScanCredit. +// +// Write barriers are disallowed because this is used by gcDrain after +// it has ensured that all work is drained and this must preserve that +// condition. +// +//go:nowritebarrierrec +func gcFlushBgCredit(scanWork int64) { + if work.assistQueue.q.empty() { + // Fast path; there are no blocked assists. There's a + // small window here where an assist may add itself to + // the blocked queue and park. If that happens, we'll + // just get it on the next flush. + gcController.bgScanCredit.Add(scanWork) + return + } + + assistBytesPerWork := gcController.assistBytesPerWork.Load() + scanBytes := int64(float64(scanWork) * assistBytesPerWork) + + lock(&work.assistQueue.lock) + for !work.assistQueue.q.empty() && scanBytes > 0 { + gp := work.assistQueue.q.pop() + // Note that gp.gcAssistBytes is negative because gp + // is in debt. Think carefully about the signs below. + if scanBytes+gp.gcAssistBytes >= 0 { + // Satisfy this entire assist debt. + scanBytes += gp.gcAssistBytes + gp.gcAssistBytes = 0 + // It's important that we *not* put gp in + // runnext. Otherwise, it's possible for user + // code to exploit the GC worker's high + // scheduler priority to get itself always run + // before other goroutines and always in the + // fresh quantum started by GC. + ready(gp, 0, false) + } else { + // Partially satisfy this assist. + gp.gcAssistBytes += scanBytes + scanBytes = 0 + // As a heuristic, we move this assist to the + // back of the queue so that large assists + // can't clog up the assist queue and + // substantially delay small assists. + work.assistQueue.q.pushBack(gp) + break + } + } + + if scanBytes > 0 { + // Convert from scan bytes back to work. + assistWorkPerByte := gcController.assistWorkPerByte.Load() + scanWork = int64(float64(scanBytes) * assistWorkPerByte) + gcController.bgScanCredit.Add(scanWork) + } + unlock(&work.assistQueue.lock) +} + +// scanstack scans gp's stack, greying all pointers found on the stack. +// +// Returns the amount of scan work performed, but doesn't update +// gcController.stackScanWork or flush any credit. Any background credit produced +// by this function should be flushed by its caller. scanstack itself can't +// safely flush because it may result in trying to wake up a goroutine that +// was just scanned, resulting in a self-deadlock. +// +// scanstack will also shrink the stack if it is safe to do so. If it +// is not, it schedules a stack shrink for the next synchronous safe +// point. +// +// scanstack is marked go:systemstack because it must not be preempted +// while using a workbuf. +// +//go:nowritebarrier +//go:systemstack +func scanstack(gp *g, gcw *gcWork) int64 { + if readgstatus(gp)&_Gscan == 0 { + print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n") + throw("scanstack - bad status") + } + + switch readgstatus(gp) &^ _Gscan { + default: + print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") + throw("mark - bad status") + case _Gdead: + return 0 + case _Grunning: + print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") + throw("scanstack: goroutine not stopped") + case _Grunnable, _Gsyscall, _Gwaiting: + // ok + } + + if gp == getg() { + throw("can't scan our own stack") + } + + // scannedSize is the amount of work we'll be reporting. + // + // It is less than the allocated size (which is hi-lo). + var sp uintptr + if gp.syscallsp != 0 { + sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows). + } else { + sp = gp.sched.sp + } + scannedSize := gp.stack.hi - sp + + // Keep statistics for initial stack size calculation. + // Note that this accumulates the scanned size, not the allocated size. + p := getg().m.p.ptr() + p.scannedStackSize += uint64(scannedSize) + p.scannedStacks++ + + if isShrinkStackSafe(gp) { + // Shrink the stack if not much of it is being used. + shrinkstack(gp) + } else { + // Otherwise, shrink the stack at the next sync safe point. + gp.preemptShrink = true + } + + var state stackScanState + state.stack = gp.stack + + if stackTraceDebug { + println("stack trace goroutine", gp.goid) + } + + if debugScanConservative && gp.asyncSafePoint { + print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n") + } + + // Scan the saved context register. This is effectively a live + // register that gets moved back and forth between the + // register and sched.ctxt without a write barrier. + if gp.sched.ctxt != nil { + scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state) + } + + // Scan the stack. Accumulate a list of stack objects. + var u unwinder + for u.init(gp, 0); u.valid(); u.next() { + scanframeworker(&u.frame, &state, gcw) + } + + // Find additional pointers that point into the stack from the heap. + // Currently this includes defers and panics. See also function copystack. + + // Find and trace other pointers in defer records. + for d := gp._defer; d != nil; d = d.link { + if d.fn != nil { + // Scan the func value, which could be a stack allocated closure. + // See issue 30453. + scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state) + } + if d.link != nil { + // The link field of a stack-allocated defer record might point + // to a heap-allocated defer record. Keep that heap record live. + scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state) + } + // Retain defers records themselves. + // Defer records might not be reachable from the G through regular heap + // tracing because the defer linked list might weave between the stack and the heap. + if d.heap { + scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state) + } + } + if gp._panic != nil { + // Panics are always stack allocated. + state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false) + } + + // Find and scan all reachable stack objects. + // + // The state's pointer queue prioritizes precise pointers over + // conservative pointers so that we'll prefer scanning stack + // objects precisely. + state.buildIndex() + for { + p, conservative := state.getPtr() + if p == 0 { + break + } + obj := state.findObject(p) + if obj == nil { + continue + } + r := obj.r + if r == nil { + // We've already scanned this object. + continue + } + obj.setRecord(nil) // Don't scan it again. + if stackTraceDebug { + printlock() + print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size) + if conservative { + print(" (conservative)") + } + println() + printunlock() + } + gcdata := r.gcdata() + var s *mspan + if r.useGCProg() { + // This path is pretty unlikely, an object large enough + // to have a GC program allocated on the stack. + // We need some space to unpack the program into a straight + // bitmask, which we allocate/free here. + // TODO: it would be nice if there were a way to run a GC + // program without having to store all its bits. We'd have + // to change from a Lempel-Ziv style program to something else. + // Or we can forbid putting objects on stacks if they require + // a gc program (see issue 27447). + s = materializeGCProg(r.ptrdata(), gcdata) + gcdata = (*byte)(unsafe.Pointer(s.startAddr)) + } + + b := state.stack.lo + uintptr(obj.off) + if conservative { + scanConservative(b, r.ptrdata(), gcdata, gcw, &state) + } else { + scanblock(b, r.ptrdata(), gcdata, gcw, &state) + } + + if s != nil { + dematerializeGCProg(s) + } + } + + // Deallocate object buffers. + // (Pointer buffers were all deallocated in the loop above.) + for state.head != nil { + x := state.head + state.head = x.next + if stackTraceDebug { + for i := 0; i < x.nobj; i++ { + obj := &x.obj[i] + if obj.r == nil { // reachable + continue + } + println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size) + // Note: not necessarily really dead - only reachable-from-ptr dead. + } + } + x.nobj = 0 + putempty((*workbuf)(unsafe.Pointer(x))) + } + if state.buf != nil || state.cbuf != nil || state.freeBuf != nil { + throw("remaining pointer buffers") + } + return int64(scannedSize) +} + +// Scan a stack frame: local variables and function arguments/results. +// +//go:nowritebarrier +func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { + if _DebugGC > 1 && frame.continpc != 0 { + print("scanframe ", funcname(frame.fn), "\n") + } + + isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt + isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2 + if state.conservative || isAsyncPreempt || isDebugCall { + if debugScanConservative { + println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc)) + } + + // Conservatively scan the frame. Unlike the precise + // case, this includes the outgoing argument space + // since we may have stopped while this function was + // setting up a call. + // + // TODO: We could narrow this down if the compiler + // produced a single map per function of stack slots + // and registers that ever contain a pointer. + if frame.varp != 0 { + size := frame.varp - frame.sp + if size > 0 { + scanConservative(frame.sp, size, nil, gcw, state) + } + } + + // Scan arguments to this frame. + if n := frame.argBytes(); n != 0 { + // TODO: We could pass the entry argument map + // to narrow this down further. + scanConservative(frame.argp, n, nil, gcw, state) + } + + if isAsyncPreempt || isDebugCall { + // This function's frame contained the + // registers for the asynchronously stopped + // parent frame. Scan the parent + // conservatively. + state.conservative = true + } else { + // We only wanted to scan those two frames + // conservatively. Clear the flag for future + // frames. + state.conservative = false + } + return + } + + locals, args, objs := frame.getStackMap(false) + + // Scan local variables if stack frame has been allocated. + if locals.n > 0 { + size := uintptr(locals.n) * goarch.PtrSize + scanblock(frame.varp-size, size, locals.bytedata, gcw, state) + } + + // Scan arguments. + if args.n > 0 { + scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state) + } + + // Add all stack objects to the stack object list. + if frame.varp != 0 { + // varp is 0 for defers, where there are no locals. + // In that case, there can't be a pointer to its args, either. + // (And all args would be scanned above anyway.) + for i := range objs { + obj := &objs[i] + off := obj.off + base := frame.varp // locals base pointer + if off >= 0 { + base = frame.argp // arguments and return values base pointer + } + ptr := base + uintptr(off) + if ptr < frame.sp { + // object hasn't been allocated in the frame yet. + continue + } + if stackTraceDebug { + println("stkobj at", hex(ptr), "of size", obj.size) + } + state.addObject(ptr, obj) + } + } +} + +type gcDrainFlags int + +const ( + gcDrainUntilPreempt gcDrainFlags = 1 << iota + gcDrainFlushBgCredit + gcDrainIdle + gcDrainFractional +) + +// gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account +// mark time in profiles. +func gcDrainMarkWorkerIdle(gcw *gcWork) { + gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit) +} + +// gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account +// mark time in profiles. +func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) { + flags := gcDrainFlushBgCredit + if untilPreempt { + flags |= gcDrainUntilPreempt + } + gcDrain(gcw, flags) +} + +// gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account +// mark time in profiles. +func gcDrainMarkWorkerFractional(gcw *gcWork) { + gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit) +} + +// gcDrain scans roots and objects in work buffers, blackening grey +// objects until it is unable to get more work. It may return before +// GC is done; it's the caller's responsibility to balance work from +// other Ps. +// +// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt +// is set. +// +// If flags&gcDrainIdle != 0, gcDrain returns when there is other work +// to do. +// +// If flags&gcDrainFractional != 0, gcDrain self-preempts when +// pollFractionalWorkerExit() returns true. This implies +// gcDrainNoBlock. +// +// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work +// credit to gcController.bgScanCredit every gcCreditSlack units of +// scan work. +// +// gcDrain will always return if there is a pending STW or forEachP. +// +// Disabling write barriers is necessary to ensure that after we've +// confirmed that we've drained gcw, that we don't accidentally end +// up flipping that condition by immediately adding work in the form +// of a write barrier buffer flush. +// +// Don't set nowritebarrierrec because it's safe for some callees to +// have write barriers enabled. +// +//go:nowritebarrier +func gcDrain(gcw *gcWork, flags gcDrainFlags) { + if !writeBarrier.enabled { + throw("gcDrain phase incorrect") + } + + // N.B. We must be running in a non-preemptible context, so it's + // safe to hold a reference to our P here. + gp := getg().m.curg + pp := gp.m.p.ptr() + preemptible := flags&gcDrainUntilPreempt != 0 + flushBgCredit := flags&gcDrainFlushBgCredit != 0 + idle := flags&gcDrainIdle != 0 + + initScanWork := gcw.heapScanWork + + // checkWork is the scan work before performing the next + // self-preempt check. + checkWork := int64(1<<63 - 1) + var check func() bool + if flags&(gcDrainIdle|gcDrainFractional) != 0 { + checkWork = initScanWork + drainCheckThreshold + if idle { + check = pollWork + } else if flags&gcDrainFractional != 0 { + check = pollFractionalWorkerExit + } + } + + // Drain root marking jobs. + if work.markrootNext < work.markrootJobs { + // Stop if we're preemptible, if someone wants to STW, or if + // someone is calling forEachP. + for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { + job := atomic.Xadd(&work.markrootNext, +1) - 1 + if job >= work.markrootJobs { + break + } + markroot(gcw, job, flushBgCredit) + if check != nil && check() { + goto done + } + } + } + + // Drain heap marking jobs. + // + // Stop if we're preemptible, if someone wants to STW, or if + // someone is calling forEachP. + // + // TODO(mknyszek): Consider always checking gp.preempt instead + // of having the preempt flag, and making an exception for certain + // mark workers in retake. That might be simpler than trying to + // enumerate all the reasons why we might want to preempt, even + // if we're supposed to be mostly non-preemptible. + for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { + // Try to keep work available on the global queue. We used to + // check if there were waiting workers, but it's better to + // just keep work available than to make workers wait. In the + // worst case, we'll do O(log(_WorkbufSize)) unnecessary + // balances. + if work.full == 0 { + gcw.balance() + } + + b := gcw.tryGetFast() + if b == 0 { + b = gcw.tryGet() + if b == 0 { + // Flush the write barrier + // buffer; this may create + // more work. + wbBufFlush() + b = gcw.tryGet() + } + } + if b == 0 { + // Unable to get work. + break + } + scanobject(b, gcw) + + // Flush background scan work credit to the global + // account if we've accumulated enough locally so + // mutator assists can draw on it. + if gcw.heapScanWork >= gcCreditSlack { + gcController.heapScanWork.Add(gcw.heapScanWork) + if flushBgCredit { + gcFlushBgCredit(gcw.heapScanWork - initScanWork) + initScanWork = 0 + } + checkWork -= gcw.heapScanWork + gcw.heapScanWork = 0 + + if checkWork <= 0 { + checkWork += drainCheckThreshold + if check != nil && check() { + break + } + } + } + } + +done: + // Flush remaining scan work credit. + if gcw.heapScanWork > 0 { + gcController.heapScanWork.Add(gcw.heapScanWork) + if flushBgCredit { + gcFlushBgCredit(gcw.heapScanWork - initScanWork) + } + gcw.heapScanWork = 0 + } +} + +// gcDrainN blackens grey objects until it has performed roughly +// scanWork units of scan work or the G is preempted. This is +// best-effort, so it may perform less work if it fails to get a work +// buffer. Otherwise, it will perform at least n units of work, but +// may perform more because scanning is always done in whole object +// increments. It returns the amount of scan work performed. +// +// The caller goroutine must be in a preemptible state (e.g., +// _Gwaiting) to prevent deadlocks during stack scanning. As a +// consequence, this must be called on the system stack. +// +//go:nowritebarrier +//go:systemstack +func gcDrainN(gcw *gcWork, scanWork int64) int64 { + if !writeBarrier.enabled { + throw("gcDrainN phase incorrect") + } + + // There may already be scan work on the gcw, which we don't + // want to claim was done by this call. + workFlushed := -gcw.heapScanWork + + // In addition to backing out because of a preemption, back out + // if the GC CPU limiter is enabled. + gp := getg().m.curg + for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork { + // See gcDrain comment. + if work.full == 0 { + gcw.balance() + } + + b := gcw.tryGetFast() + if b == 0 { + b = gcw.tryGet() + if b == 0 { + // Flush the write barrier buffer; + // this may create more work. + wbBufFlush() + b = gcw.tryGet() + } + } + + if b == 0 { + // Try to do a root job. + if work.markrootNext < work.markrootJobs { + job := atomic.Xadd(&work.markrootNext, +1) - 1 + if job < work.markrootJobs { + workFlushed += markroot(gcw, job, false) + continue + } + } + // No heap or root jobs. + break + } + + scanobject(b, gcw) + + // Flush background scan work credit. + if gcw.heapScanWork >= gcCreditSlack { + gcController.heapScanWork.Add(gcw.heapScanWork) + workFlushed += gcw.heapScanWork + gcw.heapScanWork = 0 + } + } + + // Unlike gcDrain, there's no need to flush remaining work + // here because this never flushes to bgScanCredit and + // gcw.dispose will flush any remaining work to scanWork. + + return workFlushed + gcw.heapScanWork +} + +// scanblock scans b as scanobject would, but using an explicit +// pointer bitmap instead of the heap bitmap. +// +// This is used to scan non-heap roots, so it does not update +// gcw.bytesMarked or gcw.heapScanWork. +// +// If stk != nil, possible stack pointers are also reported to stk.putPtr. +// +//go:nowritebarrier +func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { + // Use local copies of original parameters, so that a stack trace + // due to one of the throws below shows the original block + // base and extent. + b := b0 + n := n0 + + for i := uintptr(0); i < n; { + // Find bits for the next word. + bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8))) + if bits == 0 { + i += goarch.PtrSize * 8 + continue + } + for j := 0; j < 8 && i < n; j++ { + if bits&1 != 0 { + // Same work as in scanobject; see comments there. + p := *(*uintptr)(unsafe.Pointer(b + i)) + if p != 0 { + if obj, span, objIndex := findObject(p, b, i); obj != 0 { + greyobject(obj, b, i, span, gcw, objIndex) + } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { + stk.putPtr(p, false) + } + } + } + bits >>= 1 + i += goarch.PtrSize + } + } +} + +// scanobject scans the object starting at b, adding pointers to gcw. +// b must point to the beginning of a heap object or an oblet. +// scanobject consults the GC bitmap for the pointer mask and the +// spans for the size of the object. +// +//go:nowritebarrier +func scanobject(b uintptr, gcw *gcWork) { + // Prefetch object before we scan it. + // + // This will overlap fetching the beginning of the object with initial + // setup before we start scanning the object. + sys.Prefetch(b) + + // Find the bits for b and the size of the object at b. + // + // b is either the beginning of an object, in which case this + // is the size of the object to scan, or it points to an + // oblet, in which case we compute the size to scan below. + s := spanOfUnchecked(b) + n := s.elemsize + if n == 0 { + throw("scanobject n == 0") + } + if s.spanclass.noscan() { + // Correctness-wise this is ok, but it's inefficient + // if noscan objects reach here. + throw("scanobject of a noscan object") + } + + var tp typePointers + if n > maxObletBytes { + // Large object. Break into oblets for better + // parallelism and lower latency. + if b == s.base() { + // Enqueue the other oblets to scan later. + // Some oblets may be in b's scalar tail, but + // these will be marked as "no more pointers", + // so we'll drop out immediately when we go to + // scan those. + for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { + if !gcw.putFast(oblet) { + gcw.put(oblet) + } + } + } + + // Compute the size of the oblet. Since this object + // must be a large object, s.base() is the beginning + // of the object. + n = s.base() + s.elemsize - b + n = min(n, maxObletBytes) + if goexperiment.AllocHeaders { + tp = s.typePointersOfUnchecked(s.base()) + tp = tp.fastForward(b-tp.addr, b+n) + } + } else { + if goexperiment.AllocHeaders { + tp = s.typePointersOfUnchecked(b) + } + } + + var hbits heapBits + if !goexperiment.AllocHeaders { + hbits = heapBitsForAddr(b, n) + } + var scanSize uintptr + for { + var addr uintptr + if goexperiment.AllocHeaders { + if tp, addr = tp.nextFast(); addr == 0 { + if tp, addr = tp.next(b + n); addr == 0 { + break + } + } + } else { + if hbits, addr = hbits.nextFast(); addr == 0 { + if hbits, addr = hbits.next(); addr == 0 { + break + } + } + } + + // Keep track of farthest pointer we found, so we can + // update heapScanWork. TODO: is there a better metric, + // now that we can skip scalar portions pretty efficiently? + scanSize = addr - b + goarch.PtrSize + + // Work here is duplicated in scanblock and above. + // If you make changes here, make changes there too. + obj := *(*uintptr)(unsafe.Pointer(addr)) + + // At this point we have extracted the next potential pointer. + // Quickly filter out nil and pointers back to the current object. + if obj != 0 && obj-b >= n { + // Test if obj points into the Go heap and, if so, + // mark the object. + // + // Note that it's possible for findObject to + // fail if obj points to a just-allocated heap + // object because of a race with growing the + // heap. In this case, we know the object was + // just allocated and hence will be marked by + // allocation itself. + if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 { + greyobject(obj, b, addr-b, span, gcw, objIndex) + } + } + } + gcw.bytesMarked += uint64(n) + gcw.heapScanWork += int64(scanSize) +} + +// scanConservative scans block [b, b+n) conservatively, treating any +// pointer-like value in the block as a pointer. +// +// If ptrmask != nil, only words that are marked in ptrmask are +// considered as potential pointers. +// +// If state != nil, it's assumed that [b, b+n) is a block in the stack +// and may contain pointers to stack objects. +func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) { + if debugScanConservative { + printlock() + print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n") + hexdumpWords(b, b+n, func(p uintptr) byte { + if ptrmask != nil { + word := (p - b) / goarch.PtrSize + bits := *addb(ptrmask, word/8) + if (bits>>(word%8))&1 == 0 { + return '$' + } + } + + val := *(*uintptr)(unsafe.Pointer(p)) + if state != nil && state.stack.lo <= val && val < state.stack.hi { + return '@' + } + + span := spanOfHeap(val) + if span == nil { + return ' ' + } + idx := span.objIndex(val) + if span.isFree(idx) { + return ' ' + } + return '*' + }) + printunlock() + } + + for i := uintptr(0); i < n; i += goarch.PtrSize { + if ptrmask != nil { + word := i / goarch.PtrSize + bits := *addb(ptrmask, word/8) + if bits == 0 { + // Skip 8 words (the loop increment will do the 8th) + // + // This must be the first time we've + // seen this word of ptrmask, so i + // must be 8-word-aligned, but check + // our reasoning just in case. + if i%(goarch.PtrSize*8) != 0 { + throw("misaligned mask") + } + i += goarch.PtrSize*8 - goarch.PtrSize + continue + } + if (bits>>(word%8))&1 == 0 { + continue + } + } + + val := *(*uintptr)(unsafe.Pointer(b + i)) + + // Check if val points into the stack. + if state != nil && state.stack.lo <= val && val < state.stack.hi { + // val may point to a stack object. This + // object may be dead from last cycle and + // hence may contain pointers to unallocated + // objects, but unlike heap objects we can't + // tell if it's already dead. Hence, if all + // pointers to this object are from + // conservative scanning, we have to scan it + // defensively, too. + state.putPtr(val, true) + continue + } + + // Check if val points to a heap span. + span := spanOfHeap(val) + if span == nil { + continue + } + + // Check if val points to an allocated object. + idx := span.objIndex(val) + if span.isFree(idx) { + continue + } + + // val points to an allocated object. Mark it. + obj := span.base() + idx*span.elemsize + greyobject(obj, b, i, span, gcw, idx) + } +} + +// Shade the object if it isn't already. +// The object is not nil and known to be in the heap. +// Preemption must be disabled. +// +//go:nowritebarrier +func shade(b uintptr) { + if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { + gcw := &getg().m.p.ptr().gcw + greyobject(obj, 0, 0, span, gcw, objIndex) + } +} + +// obj is the start of an object with mark mbits. +// If it isn't already marked, mark it and enqueue into gcw. +// base and off are for debugging only and could be removed. +// +// See also wbBufFlush1, which partially duplicates this logic. +// +//go:nowritebarrierrec +func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) { + // obj should be start of allocation, and so must be at least pointer-aligned. + if obj&(goarch.PtrSize-1) != 0 { + throw("greyobject: obj not pointer-aligned") + } + mbits := span.markBitsForIndex(objIndex) + + if useCheckmark { + if setCheckmark(obj, base, off, mbits) { + // Already marked. + return + } + } else { + if debug.gccheckmark > 0 && span.isFree(objIndex) { + print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n") + gcDumpObject("base", base, off) + gcDumpObject("obj", obj, ^uintptr(0)) + getg().m.traceback = 2 + throw("marking free object") + } + + // If marked we have nothing to do. + if mbits.isMarked() { + return + } + mbits.setMarked() + + // Mark span. + arena, pageIdx, pageMask := pageIndexOf(span.base()) + if arena.pageMarks[pageIdx]&pageMask == 0 { + atomic.Or8(&arena.pageMarks[pageIdx], pageMask) + } + + // If this is a noscan object, fast-track it to black + // instead of greying it. + if span.spanclass.noscan() { + gcw.bytesMarked += uint64(span.elemsize) + return + } + } + + // We're adding obj to P's local workbuf, so it's likely + // this object will be processed soon by the same P. + // Even if the workbuf gets flushed, there will likely still be + // some benefit on platforms with inclusive shared caches. + sys.Prefetch(obj) + // Queue the obj for scanning. + if !gcw.putFast(obj) { + gcw.put(obj) + } +} + +// gcDumpObject dumps the contents of obj for debugging and marks the +// field at byte offset off in obj. +func gcDumpObject(label string, obj, off uintptr) { + s := spanOf(obj) + print(label, "=", hex(obj)) + if s == nil { + print(" s=nil\n") + return + } + print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=") + if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) { + print(mSpanStateNames[state], "\n") + } else { + print("unknown(", state, ")\n") + } + + skipped := false + size := s.elemsize + if s.state.get() == mSpanManual && size == 0 { + // We're printing something from a stack frame. We + // don't know how big it is, so just show up to an + // including off. + size = off + goarch.PtrSize + } + for i := uintptr(0); i < size; i += goarch.PtrSize { + // For big objects, just print the beginning (because + // that usually hints at the object's type) and the + // fields around off. + if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) { + skipped = true + continue + } + if skipped { + print(" ...\n") + skipped = false + } + print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i)))) + if i == off { + print(" <==") + } + print("\n") + } + if skipped { + print(" ...\n") + } +} + +// gcmarknewobject marks a newly allocated object black. obj must +// not contain any non-nil pointers. +// +// This is nosplit so it can manipulate a gcWork without preemption. +// +//go:nowritebarrier +//go:nosplit +func gcmarknewobject(span *mspan, obj uintptr) { + if useCheckmark { // The world should be stopped so this should not happen. + throw("gcmarknewobject called while doing checkmark") + } + + // Mark object. + objIndex := span.objIndex(obj) + span.markBitsForIndex(objIndex).setMarked() + + // Mark span. + arena, pageIdx, pageMask := pageIndexOf(span.base()) + if arena.pageMarks[pageIdx]&pageMask == 0 { + atomic.Or8(&arena.pageMarks[pageIdx], pageMask) + } + + gcw := &getg().m.p.ptr().gcw + gcw.bytesMarked += uint64(span.elemsize) +} + +// gcMarkTinyAllocs greys all active tiny alloc blocks. +// +// The world must be stopped. +func gcMarkTinyAllocs() { + assertWorldStopped() + + for _, p := range allp { + c := p.mcache + if c == nil || c.tiny == 0 { + continue + } + _, span, objIndex := findObject(c.tiny, 0, 0) + gcw := &p.gcw + greyobject(c.tiny, 0, 0, span, gcw, objIndex) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcpacer.go b/platform/dbops/binaries/go/go/src/runtime/mgcpacer.go new file mode 100644 index 0000000000000000000000000000000000000000..e9af3d60cdc2586ddb558e1572b0865188ef7b3c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcpacer.go @@ -0,0 +1,1446 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/cpu" + "internal/goexperiment" + "runtime/internal/atomic" + _ "unsafe" // for go:linkname +) + +const ( + // gcGoalUtilization is the goal CPU utilization for + // marking as a fraction of GOMAXPROCS. + // + // Increasing the goal utilization will shorten GC cycles as the GC + // has more resources behind it, lessening costs from the write barrier, + // but comes at the cost of increasing mutator latency. + gcGoalUtilization = gcBackgroundUtilization + + // gcBackgroundUtilization is the fixed CPU utilization for background + // marking. It must be <= gcGoalUtilization. The difference between + // gcGoalUtilization and gcBackgroundUtilization will be made up by + // mark assists. The scheduler will aim to use within 50% of this + // goal. + // + // As a general rule, there's little reason to set gcBackgroundUtilization + // < gcGoalUtilization. One reason might be in mostly idle applications, + // where goroutines are unlikely to assist at all, so the actual + // utilization will be lower than the goal. But this is moot point + // because the idle mark workers already soak up idle CPU resources. + // These two values are still kept separate however because they are + // distinct conceptually, and in previous iterations of the pacer the + // distinction was more important. + gcBackgroundUtilization = 0.25 + + // gcCreditSlack is the amount of scan work credit that can + // accumulate locally before updating gcController.heapScanWork and, + // optionally, gcController.bgScanCredit. Lower values give a more + // accurate assist ratio and make it more likely that assists will + // successfully steal background credit. Higher values reduce memory + // contention. + gcCreditSlack = 2000 + + // gcAssistTimeSlack is the nanoseconds of mutator assist time that + // can accumulate on a P before updating gcController.assistTime. + gcAssistTimeSlack = 5000 + + // gcOverAssistWork determines how many extra units of scan work a GC + // assist does when an assist happens. This amortizes the cost of an + // assist by pre-paying for this many bytes of future allocations. + gcOverAssistWork = 64 << 10 + + // defaultHeapMinimum is the value of heapMinimum for GOGC==100. + defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) + + (1-goexperiment.HeapMinimum512KiBInt)*(4<<20) + + // maxStackScanSlack is the bytes of stack space allocated or freed + // that can accumulate on a P before updating gcController.stackSize. + maxStackScanSlack = 8 << 10 + + // memoryLimitMinHeapGoalHeadroom is the minimum amount of headroom the + // pacer gives to the heap goal when operating in the memory-limited regime. + // That is, it'll reduce the heap goal by this many extra bytes off of the + // base calculation, at minimum. + memoryLimitMinHeapGoalHeadroom = 1 << 20 + + // memoryLimitHeapGoalHeadroomPercent is how headroom the memory-limit-based + // heap goal should have as a percent of the maximum possible heap goal allowed + // to maintain the memory limit. + memoryLimitHeapGoalHeadroomPercent = 3 +) + +// gcController implements the GC pacing controller that determines +// when to trigger concurrent garbage collection and how much marking +// work to do in mutator assists and background marking. +// +// It calculates the ratio between the allocation rate (in terms of CPU +// time) and the GC scan throughput to determine the heap size at which to +// trigger a GC cycle such that no GC assists are required to finish on time. +// This algorithm thus optimizes GC CPU utilization to the dedicated background +// mark utilization of 25% of GOMAXPROCS by minimizing GC assists. +// GOMAXPROCS. The high-level design of this algorithm is documented +// at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md. +// See https://golang.org/s/go15gcpacing for additional historical context. +var gcController gcControllerState + +type gcControllerState struct { + // Initialized from GOGC. GOGC=off means no GC. + gcPercent atomic.Int32 + + // memoryLimit is the soft memory limit in bytes. + // + // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64 + // which means no soft memory limit in practice. + // + // This is an int64 instead of a uint64 to more easily maintain parity with + // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value + // should never be negative. + memoryLimit atomic.Int64 + + // heapMinimum is the minimum heap size at which to trigger GC. + // For small heaps, this overrides the usual GOGC*live set rule. + // + // When there is a very small live set but a lot of allocation, simply + // collecting when the heap reaches GOGC*live results in many GC + // cycles and high total per-GC overhead. This minimum amortizes this + // per-GC overhead while keeping the heap reasonably small. + // + // During initialization this is set to 4MB*GOGC/100. In the case of + // GOGC==0, this will set heapMinimum to 0, resulting in constant + // collection even when the heap size is small, which is useful for + // debugging. + heapMinimum uint64 + + // runway is the amount of runway in heap bytes allocated by the + // application that we want to give the GC once it starts. + // + // This is computed from consMark during mark termination. + runway atomic.Uint64 + + // consMark is the estimated per-CPU consMark ratio for the application. + // + // It represents the ratio between the application's allocation + // rate, as bytes allocated per CPU-time, and the GC's scan rate, + // as bytes scanned per CPU-time. + // The units of this ratio are (B / cpu-ns) / (B / cpu-ns). + // + // At a high level, this value is computed as the bytes of memory + // allocated (cons) per unit of scan work completed (mark) in a GC + // cycle, divided by the CPU time spent on each activity. + // + // Updated at the end of each GC cycle, in endCycle. + consMark float64 + + // lastConsMark is the computed cons/mark value for the previous 4 GC + // cycles. Note that this is *not* the last value of consMark, but the + // measured cons/mark value in endCycle. + lastConsMark [4]float64 + + // gcPercentHeapGoal is the goal heapLive for when next GC ends derived + // from gcPercent. + // + // Set to ^uint64(0) if gcPercent is disabled. + gcPercentHeapGoal atomic.Uint64 + + // sweepDistMinTrigger is the minimum trigger to ensure a minimum + // sweep distance. + // + // This bound is also special because it applies to both the trigger + // *and* the goal (all other trigger bounds must be based *on* the goal). + // + // It is computed ahead of time, at commit time. The theory is that, + // absent a sudden change to a parameter like gcPercent, the trigger + // will be chosen to always give the sweeper enough headroom. However, + // such a change might dramatically and suddenly move up the trigger, + // in which case we need to ensure the sweeper still has enough headroom. + sweepDistMinTrigger atomic.Uint64 + + // triggered is the point at which the current GC cycle actually triggered. + // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0). + // + // Updated while the world is stopped. + triggered uint64 + + // lastHeapGoal is the value of heapGoal at the moment the last GC + // ended. Note that this is distinct from the last value heapGoal had, + // because it could change if e.g. gcPercent changes. + // + // Read and written with the world stopped or with mheap_.lock held. + lastHeapGoal uint64 + + // heapLive is the number of bytes considered live by the GC. + // That is: retained by the most recent GC plus allocated + // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since + // heapAlloc includes unmarked objects that have not yet been swept (and + // hence goes up as we allocate and down as we sweep) while heapLive + // excludes these objects (and hence only goes up between GCs). + // + // To reduce contention, this is updated only when obtaining a span + // from an mcentral and at this point it counts all of the unallocated + // slots in that span (which will be allocated before that mcache + // obtains another span from that mcentral). Hence, it slightly + // overestimates the "true" live heap size. It's better to overestimate + // than to underestimate because 1) this triggers the GC earlier than + // necessary rather than potentially too late and 2) this leads to a + // conservative GC rate rather than a GC rate that is potentially too + // low. + // + // Whenever this is updated, call traceHeapAlloc() and + // this gcControllerState's revise() method. + heapLive atomic.Uint64 + + // heapScan is the number of bytes of "scannable" heap. This is the + // live heap (as counted by heapLive), but omitting no-scan objects and + // no-scan tails of objects. + // + // This value is fixed at the start of a GC cycle. It represents the + // maximum scannable heap. + heapScan atomic.Uint64 + + // lastHeapScan is the number of bytes of heap that were scanned + // last GC cycle. It is the same as heapMarked, but only + // includes the "scannable" parts of objects. + // + // Updated when the world is stopped. + lastHeapScan uint64 + + // lastStackScan is the number of bytes of stack that were scanned + // last GC cycle. + lastStackScan atomic.Uint64 + + // maxStackScan is the amount of allocated goroutine stack space in + // use by goroutines. + // + // This number tracks allocated goroutine stack space rather than used + // goroutine stack space (i.e. what is actually scanned) because used + // goroutine stack space is much harder to measure cheaply. By using + // allocated space, we make an overestimate; this is OK, it's better + // to conservatively overcount than undercount. + maxStackScan atomic.Uint64 + + // globalsScan is the total amount of global variable space + // that is scannable. + globalsScan atomic.Uint64 + + // heapMarked is the number of bytes marked by the previous + // GC. After mark termination, heapLive == heapMarked, but + // unlike heapLive, heapMarked does not change until the + // next mark termination. + heapMarked uint64 + + // heapScanWork is the total heap scan work performed this cycle. + // stackScanWork is the total stack scan work performed this cycle. + // globalsScanWork is the total globals scan work performed this cycle. + // + // These are updated atomically during the cycle. Updates occur in + // bounded batches, since they are both written and read + // throughout the cycle. At the end of the cycle, heapScanWork is how + // much of the retained heap is scannable. + // + // Currently these are measured in bytes. For most uses, this is an + // opaque unit of work, but for estimation the definition is important. + // + // Note that stackScanWork includes only stack space scanned, not all + // of the allocated stack. + heapScanWork atomic.Int64 + stackScanWork atomic.Int64 + globalsScanWork atomic.Int64 + + // bgScanCredit is the scan work credit accumulated by the concurrent + // background scan. This credit is accumulated by the background scan + // and stolen by mutator assists. Updates occur in bounded batches, + // since it is both written and read throughout the cycle. + bgScanCredit atomic.Int64 + + // assistTime is the nanoseconds spent in mutator assists + // during this cycle. This is updated atomically, and must also + // be updated atomically even during a STW, because it is read + // by sysmon. Updates occur in bounded batches, since it is both + // written and read throughout the cycle. + assistTime atomic.Int64 + + // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers + // during this cycle. This is updated at the end of the concurrent mark + // phase. + dedicatedMarkTime atomic.Int64 + + // fractionalMarkTime is the nanoseconds spent in the fractional mark + // worker during this cycle. This is updated throughout the cycle and + // will be up-to-date if the fractional mark worker is not currently + // running. + fractionalMarkTime atomic.Int64 + + // idleMarkTime is the nanoseconds spent in idle marking during this + // cycle. This is updated throughout the cycle. + idleMarkTime atomic.Int64 + + // markStartTime is the absolute start time in nanoseconds + // that assists and background mark workers started. + markStartTime int64 + + // dedicatedMarkWorkersNeeded is the number of dedicated mark workers + // that need to be started. This is computed at the beginning of each + // cycle and decremented as dedicated mark workers get started. + dedicatedMarkWorkersNeeded atomic.Int64 + + // idleMarkWorkers is two packed int32 values in a single uint64. + // These two values are always updated simultaneously. + // + // The bottom int32 is the current number of idle mark workers executing. + // + // The top int32 is the maximum number of idle mark workers allowed to + // execute concurrently. Normally, this number is just gomaxprocs. However, + // during periodic GC cycles it is set to 0 because the system is idle + // anyway; there's no need to go full blast on all of GOMAXPROCS. + // + // The maximum number of idle mark workers is used to prevent new workers + // from starting, but it is not a hard maximum. It is possible (but + // exceedingly rare) for the current number of idle mark workers to + // transiently exceed the maximum. This could happen if the maximum changes + // just after a GC ends, and an M with no P. + // + // Note that if we have no dedicated mark workers, we set this value to + // 1 in this case we only have fractional GC workers which aren't scheduled + // strictly enough to ensure GC progress. As a result, idle-priority mark + // workers are vital to GC progress in these situations. + // + // For example, consider a situation in which goroutines block on the GC + // (such as via runtime.GOMAXPROCS) and only fractional mark workers are + // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the + // last running M might skip scheduling a fractional mark worker if its + // utilization goal is met, such that once it goes to sleep (because there's + // nothing to do), there will be nothing else to spin up a new M for the + // fractional worker in the future, stalling GC progress and causing a + // deadlock. However, idle-priority workers will *always* run when there is + // nothing left to do, ensuring the GC makes progress. + // + // See github.com/golang/go/issues/44163 for more details. + idleMarkWorkers atomic.Uint64 + + // assistWorkPerByte is the ratio of scan work to allocated + // bytes that should be performed by mutator assists. This is + // computed at the beginning of each cycle and updated every + // time heapScan is updated. + assistWorkPerByte atomic.Float64 + + // assistBytesPerWork is 1/assistWorkPerByte. + // + // Note that because this is read and written independently + // from assistWorkPerByte users may notice a skew between + // the two values, and such a state should be safe. + assistBytesPerWork atomic.Float64 + + // fractionalUtilizationGoal is the fraction of wall clock + // time that should be spent in the fractional mark worker on + // each P that isn't running a dedicated worker. + // + // For example, if the utilization goal is 25% and there are + // no dedicated workers, this will be 0.25. If the goal is + // 25%, there is one dedicated worker, and GOMAXPROCS is 5, + // this will be 0.05 to make up the missing 5%. + // + // If this is zero, no fractional workers are needed. + fractionalUtilizationGoal float64 + + // These memory stats are effectively duplicates of fields from + // memstats.heapStats but are updated atomically or with the world + // stopped and don't provide the same consistency guarantees. + // + // Because the runtime is responsible for managing a memory limit, it's + // useful to couple these stats more tightly to the gcController, which + // is intimately connected to how that memory limit is maintained. + heapInUse sysMemStat // bytes in mSpanInUse spans + heapReleased sysMemStat // bytes released to the OS + heapFree sysMemStat // bytes not in any span, but not released to the OS + totalAlloc atomic.Uint64 // total bytes allocated + totalFree atomic.Uint64 // total bytes freed + mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go). + + // test indicates that this is a test-only copy of gcControllerState. + test bool + + _ cpu.CacheLinePad +} + +func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) { + c.heapMinimum = defaultHeapMinimum + c.triggered = ^uint64(0) + c.setGCPercent(gcPercent) + c.setMemoryLimit(memoryLimit) + c.commit(true) // No sweep phase in the first GC cycle. + // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at + // initialization time. + // N.B. No need to call revise; there's no GC enabled during + // initialization. +} + +// startCycle resets the GC controller's state and computes estimates +// for a new GC cycle. The caller must hold worldsema and the world +// must be stopped. +func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) { + c.heapScanWork.Store(0) + c.stackScanWork.Store(0) + c.globalsScanWork.Store(0) + c.bgScanCredit.Store(0) + c.assistTime.Store(0) + c.dedicatedMarkTime.Store(0) + c.fractionalMarkTime.Store(0) + c.idleMarkTime.Store(0) + c.markStartTime = markStartTime + c.triggered = c.heapLive.Load() + + // Compute the background mark utilization goal. In general, + // this may not come out exactly. We round the number of + // dedicated workers so that the utilization is closest to + // 25%. For small GOMAXPROCS, this would introduce too much + // error, so we add fractional workers in that case. + totalUtilizationGoal := float64(procs) * gcBackgroundUtilization + dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5) + utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1 + const maxUtilError = 0.3 + if utilError < -maxUtilError || utilError > maxUtilError { + // Rounding put us more than 30% off our goal. With + // gcBackgroundUtilization of 25%, this happens for + // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional + // workers to compensate. + if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal { + // Too many dedicated workers. + dedicatedMarkWorkersNeeded-- + } + c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs) + } else { + c.fractionalUtilizationGoal = 0 + } + + // In STW mode, we just want dedicated workers. + if debug.gcstoptheworld > 0 { + dedicatedMarkWorkersNeeded = int64(procs) + c.fractionalUtilizationGoal = 0 + } + + // Clear per-P state + for _, p := range allp { + p.gcAssistTime = 0 + p.gcFractionalMarkTime = 0 + } + + if trigger.kind == gcTriggerTime { + // During a periodic GC cycle, reduce the number of idle mark workers + // required. However, we need at least one dedicated mark worker or + // idle GC worker to ensure GC progress in some scenarios (see comment + // on maxIdleMarkWorkers). + if dedicatedMarkWorkersNeeded > 0 { + c.setMaxIdleMarkWorkers(0) + } else { + // TODO(mknyszek): The fundamental reason why we need this is because + // we can't count on the fractional mark worker to get scheduled. + // Fix that by ensuring it gets scheduled according to its quota even + // if the rest of the application is idle. + c.setMaxIdleMarkWorkers(1) + } + } else { + // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to + // change during a GC cycle. + c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded)) + } + + // Compute initial values for controls that are updated + // throughout the cycle. + c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded) + c.revise() + + if debug.gcpacertrace > 0 { + heapGoal := c.heapGoal() + assistRatio := c.assistWorkPerByte.Load() + print("pacer: assist ratio=", assistRatio, + " (scan ", gcController.heapScan.Load()>>20, " MB in ", + work.initialHeapLive>>20, "->", + heapGoal>>20, " MB)", + " workers=", dedicatedMarkWorkersNeeded, + "+", c.fractionalUtilizationGoal, "\n") + } +} + +// revise updates the assist ratio during the GC cycle to account for +// improved estimates. This should be called whenever gcController.heapScan, +// gcController.heapLive, or if any inputs to gcController.heapGoal are +// updated. It is safe to call concurrently, but it may race with other +// calls to revise. +// +// The result of this race is that the two assist ratio values may not line +// up or may be stale. In practice this is OK because the assist ratio +// moves slowly throughout a GC cycle, and the assist ratio is a best-effort +// heuristic anyway. Furthermore, no part of the heuristic depends on +// the two assist ratio values being exact reciprocals of one another, since +// the two values are used to convert values from different sources. +// +// The worst case result of this raciness is that we may miss a larger shift +// in the ratio (say, if we decide to pace more aggressively against the +// hard heap goal) but even this "hard goal" is best-effort (see #40460). +// The dedicated GC should ensure we don't exceed the hard goal by too much +// in the rare case we do exceed it. +// +// It should only be called when gcBlackenEnabled != 0 (because this +// is when assists are enabled and the necessary statistics are +// available). +func (c *gcControllerState) revise() { + gcPercent := c.gcPercent.Load() + if gcPercent < 0 { + // If GC is disabled but we're running a forced GC, + // act like GOGC is huge for the below calculations. + gcPercent = 100000 + } + live := c.heapLive.Load() + scan := c.heapScan.Load() + work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() + + // Assume we're under the soft goal. Pace GC to complete at + // heapGoal assuming the heap is in steady-state. + heapGoal := int64(c.heapGoal()) + + // The expected scan work is computed as the amount of bytes scanned last + // GC cycle (both heap and stack), plus our estimate of globals work for this cycle. + scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load()) + + // maxScanWork is a worst-case estimate of the amount of scan work that + // needs to be performed in this GC cycle. Specifically, it represents + // the case where *all* scannable memory turns out to be live, and + // *all* allocated stack space is scannable. + maxStackScan := c.maxStackScan.Load() + maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load()) + if work > scanWorkExpected { + // We've already done more scan work than expected. Because our expectation + // is based on a steady-state scannable heap size, we assume this means our + // heap is growing. Compute a new heap goal that takes our existing runway + // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case + // scan work. This keeps our assist ratio stable if the heap continues to grow. + // + // The effect of this mechanism is that assists stay flat in the face of heap + // growths. It's OK to use more memory this cycle to scan all the live heap, + // because the next GC cycle is inevitably going to use *at least* that much + // memory anyway. + extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered) + scanWorkExpected = maxScanWork + + // hardGoal is a hard limit on the amount that we're willing to push back the + // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or + // stacks and/or globals grow to twice their size, this limits the current GC cycle's + // growth to 4x the original live heap's size). + // + // This maintains the invariant that we use no more memory than the next GC cycle + // will anyway. + hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal)) + if extHeapGoal > hardGoal { + extHeapGoal = hardGoal + } + heapGoal = extHeapGoal + } + if int64(live) > heapGoal { + // We're already past our heap goal, even the extrapolated one. + // Leave ourselves some extra runway, so in the worst case we + // finish by that point. + const maxOvershoot = 1.1 + heapGoal = int64(float64(heapGoal) * maxOvershoot) + + // Compute the upper bound on the scan work remaining. + scanWorkExpected = maxScanWork + } + + // Compute the remaining scan work estimate. + // + // Note that we currently count allocations during GC as both + // scannable heap (heapScan) and scan work completed + // (scanWork), so allocation will change this difference + // slowly in the soft regime and not at all in the hard + // regime. + scanWorkRemaining := scanWorkExpected - work + if scanWorkRemaining < 1000 { + // We set a somewhat arbitrary lower bound on + // remaining scan work since if we aim a little high, + // we can miss by a little. + // + // We *do* need to enforce that this is at least 1, + // since marking is racy and double-scanning objects + // may legitimately make the remaining scan work + // negative, even in the hard goal regime. + scanWorkRemaining = 1000 + } + + // Compute the heap distance remaining. + heapRemaining := heapGoal - int64(live) + if heapRemaining <= 0 { + // This shouldn't happen, but if it does, avoid + // dividing by zero or setting the assist negative. + heapRemaining = 1 + } + + // Compute the mutator assist ratio so by the time the mutator + // allocates the remaining heap bytes up to heapGoal, it will + // have done (or stolen) the remaining amount of scan work. + // Note that the assist ratio values are updated atomically + // but not together. This means there may be some degree of + // skew between the two values. This is generally OK as the + // values shift relatively slowly over the course of a GC + // cycle. + assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining) + assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining) + c.assistWorkPerByte.Store(assistWorkPerByte) + c.assistBytesPerWork.Store(assistBytesPerWork) +} + +// endCycle computes the consMark estimate for the next cycle. +// userForced indicates whether the current GC cycle was forced +// by the application. +func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) { + // Record last heap goal for the scavenger. + // We'll be updating the heap goal soon. + gcController.lastHeapGoal = c.heapGoal() + + // Compute the duration of time for which assists were turned on. + assistDuration := now - c.markStartTime + + // Assume background mark hit its utilization goal. + utilization := gcBackgroundUtilization + // Add assist utilization; avoid divide by zero. + if assistDuration > 0 { + utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs)) + } + + if c.heapLive.Load() <= c.triggered { + // Shouldn't happen, but let's be very safe about this in case the + // GC is somehow extremely short. + // + // In this case though, the only reasonable value for c.heapLive-c.triggered + // would be 0, which isn't really all that useful, i.e. the GC was so short + // that it didn't matter. + // + // Ignore this case and don't update anything. + return + } + idleUtilization := 0.0 + if assistDuration > 0 { + idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs)) + } + // Determine the cons/mark ratio. + // + // The units we want for the numerator and denominator are both B / cpu-ns. + // We get this by taking the bytes allocated or scanned, and divide by the amount of + // CPU time it took for those operations. For allocations, that CPU time is + // + // assistDuration * procs * (1 - utilization) + // + // Where utilization includes just background GC workers and assists. It does *not* + // include idle GC work time, because in theory the mutator is free to take that at + // any point. + // + // For scanning, that CPU time is + // + // assistDuration * procs * (utilization + idleUtilization) + // + // In this case, we *include* idle utilization, because that is additional CPU time that + // the GC had available to it. + // + // In effect, idle GC time is sort of double-counted here, but it's very weird compared + // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is + // *always* free to take it. + // + // So this calculation is really: + // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) / + // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)) + // + // Note that because we only care about the ratio, assistDuration and procs cancel out. + scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() + currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) / + (float64(scanWork) * (1 - utilization)) + + // Update our cons/mark estimate. This is the maximum of the value we just computed and the last + // 4 cons/mark values we measured. The reason we take the maximum here is to bias a noisy + // cons/mark measurement toward fewer assists at the expense of additional GC cycles (starting + // earlier). + oldConsMark := c.consMark + c.consMark = currentConsMark + for i := range c.lastConsMark { + if c.lastConsMark[i] > c.consMark { + c.consMark = c.lastConsMark[i] + } + } + copy(c.lastConsMark[:], c.lastConsMark[1:]) + c.lastConsMark[len(c.lastConsMark)-1] = currentConsMark + + if debug.gcpacertrace > 0 { + printlock() + goal := gcGoalUtilization * 100 + print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ") + print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ") + live := c.heapLive.Load() + print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")") + println() + printunlock() + } +} + +// enlistWorker encourages another dedicated mark worker to start on +// another P if there are spare worker slots. It is used by putfull +// when more work is made available. +// +//go:nowritebarrier +func (c *gcControllerState) enlistWorker() { + // If there are idle Ps, wake one so it will run an idle worker. + // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112. + // + // if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 { + // wakep() + // return + // } + + // There are no idle Ps. If we need more dedicated workers, + // try to preempt a running P so it will switch to a worker. + if c.dedicatedMarkWorkersNeeded.Load() <= 0 { + return + } + // Pick a random other P to preempt. + if gomaxprocs <= 1 { + return + } + gp := getg() + if gp == nil || gp.m == nil || gp.m.p == 0 { + return + } + myID := gp.m.p.ptr().id + for tries := 0; tries < 5; tries++ { + id := int32(cheaprandn(uint32(gomaxprocs - 1))) + if id >= myID { + id++ + } + p := allp[id] + if p.status != _Prunning { + continue + } + if preemptone(p) { + return + } + } +} + +// findRunnableGCWorker returns a background mark worker for pp if it +// should be run. This must only be called when gcBlackenEnabled != 0. +func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { + if gcBlackenEnabled == 0 { + throw("gcControllerState.findRunnable: blackening not enabled") + } + + // Since we have the current time, check if the GC CPU limiter + // hasn't had an update in a while. This check is necessary in + // case the limiter is on but hasn't been checked in a while and + // so may have left sufficient headroom to turn off again. + if now == 0 { + now = nanotime() + } + if gcCPULimiter.needUpdate(now) { + gcCPULimiter.update(now) + } + + if !gcMarkWorkAvailable(pp) { + // No work to be done right now. This can happen at + // the end of the mark phase when there are still + // assists tapering off. Don't bother running a worker + // now because it'll just return immediately. + return nil, now + } + + // Grab a worker before we commit to running below. + node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) + if node == nil { + // There is at least one worker per P, so normally there are + // enough workers to run on all Ps, if necessary. However, once + // a worker enters gcMarkDone it may park without rejoining the + // pool, thus freeing a P with no corresponding worker. + // gcMarkDone never depends on another worker doing work, so it + // is safe to simply do nothing here. + // + // If gcMarkDone bails out without completing the mark phase, + // it will always do so with queued global work. Thus, that P + // will be immediately eligible to re-run the worker G it was + // just using, ensuring work can complete. + return nil, now + } + + decIfPositive := func(val *atomic.Int64) bool { + for { + v := val.Load() + if v <= 0 { + return false + } + + if val.CompareAndSwap(v, v-1) { + return true + } + } + } + + if decIfPositive(&c.dedicatedMarkWorkersNeeded) { + // This P is now dedicated to marking until the end of + // the concurrent mark phase. + pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode + } else if c.fractionalUtilizationGoal == 0 { + // No need for fractional workers. + gcBgMarkWorkerPool.push(&node.node) + return nil, now + } else { + // Is this P behind on the fractional utilization + // goal? + // + // This should be kept in sync with pollFractionalWorkerExit. + delta := now - c.markStartTime + if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal { + // Nope. No need to run a fractional worker. + gcBgMarkWorkerPool.push(&node.node) + return nil, now + } + // Run a fractional worker. + pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode + } + + // Run the background mark worker. + gp := node.gp.ptr() + trace := traceAcquire() + casgstatus(gp, _Gwaiting, _Grunnable) + if trace.ok() { + trace.GoUnpark(gp, 0) + traceRelease(trace) + } + return gp, now +} + +// resetLive sets up the controller state for the next mark phase after the end +// of the previous one. Must be called after endCycle and before commit, before +// the world is started. +// +// The world must be stopped. +func (c *gcControllerState) resetLive(bytesMarked uint64) { + c.heapMarked = bytesMarked + c.heapLive.Store(bytesMarked) + c.heapScan.Store(uint64(c.heapScanWork.Load())) + c.lastHeapScan = uint64(c.heapScanWork.Load()) + c.lastStackScan.Store(uint64(c.stackScanWork.Load())) + c.triggered = ^uint64(0) // Reset triggered. + + // heapLive was updated, so emit a trace event. + trace := traceAcquire() + if trace.ok() { + trace.HeapAlloc(bytesMarked) + traceRelease(trace) + } +} + +// markWorkerStop must be called whenever a mark worker stops executing. +// +// It updates mark work accounting in the controller by a duration of +// work in nanoseconds and other bookkeeping. +// +// Safe to execute at any time. +func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) { + switch mode { + case gcMarkWorkerDedicatedMode: + c.dedicatedMarkTime.Add(duration) + c.dedicatedMarkWorkersNeeded.Add(1) + case gcMarkWorkerFractionalMode: + c.fractionalMarkTime.Add(duration) + case gcMarkWorkerIdleMode: + c.idleMarkTime.Add(duration) + c.removeIdleMarkWorker() + default: + throw("markWorkerStop: unknown mark worker mode") + } +} + +func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { + if dHeapLive != 0 { + trace := traceAcquire() + live := gcController.heapLive.Add(dHeapLive) + if trace.ok() { + // gcController.heapLive changed. + trace.HeapAlloc(live) + traceRelease(trace) + } + } + if gcBlackenEnabled == 0 { + // Update heapScan when we're not in a current GC. It is fixed + // at the beginning of a cycle. + if dHeapScan != 0 { + gcController.heapScan.Add(dHeapScan) + } + } else { + // gcController.heapLive changed. + c.revise() + } +} + +func (c *gcControllerState) addScannableStack(pp *p, amount int64) { + if pp == nil { + c.maxStackScan.Add(amount) + return + } + pp.maxStackScanDelta += amount + if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack { + c.maxStackScan.Add(pp.maxStackScanDelta) + pp.maxStackScanDelta = 0 + } +} + +func (c *gcControllerState) addGlobals(amount int64) { + c.globalsScan.Add(amount) +} + +// heapGoal returns the current heap goal. +func (c *gcControllerState) heapGoal() uint64 { + goal, _ := c.heapGoalInternal() + return goal +} + +// heapGoalInternal is the implementation of heapGoal which returns additional +// information that is necessary for computing the trigger. +// +// The returned minTrigger is always <= goal. +func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) { + // Start with the goal calculated for gcPercent. + goal = c.gcPercentHeapGoal.Load() + + // Check if the memory-limit-based goal is smaller, and if so, pick that. + if newGoal := c.memoryLimitHeapGoal(); newGoal < goal { + goal = newGoal + } else { + // We're not limited by the memory limit goal, so perform a series of + // adjustments that might move the goal forward in a variety of circumstances. + + sweepDistTrigger := c.sweepDistMinTrigger.Load() + if sweepDistTrigger > goal { + // Set the goal to maintain a minimum sweep distance since + // the last call to commit. Note that we never want to do this + // if we're in the memory limit regime, because it could push + // the goal up. + goal = sweepDistTrigger + } + // Since we ignore the sweep distance trigger in the memory + // limit regime, we need to ensure we don't propagate it to + // the trigger, because it could cause a violation of the + // invariant that the trigger < goal. + minTrigger = sweepDistTrigger + + // Ensure that the heap goal is at least a little larger than + // the point at which we triggered. This may not be the case if GC + // start is delayed or if the allocation that pushed gcController.heapLive + // over trigger is large or if the trigger is really close to + // GOGC. Assist is proportional to this distance, so enforce a + // minimum distance, even if it means going over the GOGC goal + // by a tiny bit. + // + // Ignore this if we're in the memory limit regime: we'd prefer to + // have the GC respond hard about how close we are to the goal than to + // push the goal back in such a manner that it could cause us to exceed + // the memory limit. + const minRunway = 64 << 10 + if c.triggered != ^uint64(0) && goal < c.triggered+minRunway { + goal = c.triggered + minRunway + } + } + return +} + +// memoryLimitHeapGoal returns a heap goal derived from memoryLimit. +func (c *gcControllerState) memoryLimitHeapGoal() uint64 { + // Start by pulling out some values we'll need. Be careful about overflow. + var heapFree, heapAlloc, mappedReady uint64 + for { + heapFree = c.heapFree.load() // Free and unscavenged memory. + heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use. + mappedReady = c.mappedReady.Load() // Total unreleased mapped memory. + if heapFree+heapAlloc <= mappedReady { + break + } + // It is impossible for total unreleased mapped memory to exceed heap memory, but + // because these stats are updated independently, we may observe a partial update + // including only some values. Thus, we appear to break the invariant. However, + // this condition is necessarily transient, so just try again. In the case of a + // persistent accounting error, we'll deadlock here. + } + + // Below we compute a goal from memoryLimit. There are a few things to be aware of. + // Firstly, the memoryLimit does not easily compare to the heap goal: the former + // is total mapped memory by the runtime that hasn't been released, while the latter is + // only heap object memory. Intuitively, the way we convert from one to the other is to + // subtract everything from memoryLimit that both contributes to the memory limit (so, + // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what + // lines up with reality, but it's a good starting point. + // + // In practice this computation looks like the following: + // + // goal := memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) + // ^1 ^2 + // goal -= goal / 100 * memoryLimitHeapGoalHeadroomPercent + // ^3 + // + // Let's break this down. + // + // The first term (marker 1) is everything that contributes to the memory limit and isn't + // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads. + // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged + // memory that may contain heap objects in the future. + // + // Let's take a step back. In an ideal world, this term would look something like just + // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap + // goal, and subtract out everything else. This is of course impossible; the definition + // is circular! However, this impossible definition contains a key insight: the amount + // we're *going* to use matters just as much as whatever we're currently using. + // + // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and + // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free + // and unscavenged memory, pushing the goal down significantly. + // + // heapFree is also safe to exclude from the memory limit because in the steady-state, it's + // just a pool of memory for future heap allocations, and making new allocations from heapFree + // memory doesn't increase overall memory use. In transient states, the scavenger and the + // allocator actively manage the pool of heapFree memory to maintain the memory limit. + // + // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is + // intended to help recover from such a situation. By pushing the heap goal down, we also + // push the trigger down, triggering and finishing a GC sooner in order to make room for + // other memory sources. Note that since we're effectively reducing the heap goal by X bytes, + // we're actually giving more than X bytes of headroom back, because the heap goal is in + // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store + // X bytes worth of objects. + // + // The final adjustment (marker 3) reduces the maximum possible memory limit heap goal by + // memoryLimitHeapGoalPercent. As the name implies, this is to provide additional headroom in + // the face of pacing inaccuracies, and also to leave a buffer of unscavenged memory so the + // allocator isn't constantly scavenging. The reduction amount also has a fixed minimum + // (memoryLimitMinHeapGoalHeadroom, not pictured) because the aforementioned pacing inaccuracies + // disproportionately affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. + // Shorter GC cycles and less GC work means noisy external factors like the OS scheduler have a + // greater impact. + + memoryLimit := uint64(c.memoryLimit.Load()) + + // Compute term 1. + nonHeapMemory := mappedReady - heapFree - heapAlloc + + // Compute term 2. + var overage uint64 + if mappedReady > memoryLimit { + overage = mappedReady - memoryLimit + } + + if nonHeapMemory+overage >= memoryLimit { + // We're at a point where non-heap memory exceeds the memory limit on its own. + // There's honestly not much we can do here but just trigger GCs continuously + // and let the CPU limiter reign that in. Something has to give at this point. + // Set it to heapMarked, the lowest possible goal. + return c.heapMarked + } + + // Compute the goal. + goal := memoryLimit - (nonHeapMemory + overage) + + // Apply some headroom to the goal to account for pacing inaccuracies and to reduce + // the impact of scavenging at allocation time in response to a high allocation rate + // when GOGC=off. See issue #57069. Also, be careful about small limits. + headroom := goal / 100 * memoryLimitHeapGoalHeadroomPercent + if headroom < memoryLimitMinHeapGoalHeadroom { + // Set a fixed minimum to deal with the particularly large effect pacing inaccuracies + // have for smaller heaps. + headroom = memoryLimitMinHeapGoalHeadroom + } + if goal < headroom || goal-headroom < headroom { + goal = headroom + } else { + goal = goal - headroom + } + // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense. + if goal < c.heapMarked { + goal = c.heapMarked + } + return goal +} + +const ( + // These constants determine the bounds on the GC trigger as a fraction + // of heap bytes allocated between the start of a GC (heapLive == heapMarked) + // and the end of a GC (heapLive == heapGoal). + // + // The constants are obscured in this way for efficiency. The denominator + // of the fraction is always a power-of-two for a quick division, so that + // the numerator is a single constant integer multiplication. + triggerRatioDen = 64 + + // The minimum trigger constant was chosen empirically: given a sufficiently + // fast/scalable allocator with 48 Ps that could drive the trigger ratio + // to <0.05, this constant causes applications to retain the same peak + // RSS compared to not having this allocator. + minTriggerRatioNum = 45 // ~0.7 + + // The maximum trigger constant is chosen somewhat arbitrarily, but the + // current constant has served us well over the years. + maxTriggerRatioNum = 61 // ~0.95 +) + +// trigger returns the current point at which a GC should trigger along with +// the heap goal. +// +// The returned value may be compared against heapLive to determine whether +// the GC should trigger. Thus, the GC trigger condition should be (but may +// not be, in the case of small movements for efficiency) checked whenever +// the heap goal may change. +func (c *gcControllerState) trigger() (uint64, uint64) { + goal, minTrigger := c.heapGoalInternal() + + // Invariant: the trigger must always be less than the heap goal. + // + // Note that the memory limit sets a hard maximum on our heap goal, + // but the live heap may grow beyond it. + + if c.heapMarked >= goal { + // The goal should never be smaller than heapMarked, but let's be + // defensive about it. The only reasonable trigger here is one that + // causes a continuous GC cycle at heapMarked, but respect the goal + // if it came out as smaller than that. + return goal, goal + } + + // Below this point, c.heapMarked < goal. + + // heapMarked is our absolute minimum, and it's possible the trigger + // bound we get from heapGoalinternal is less than that. + if minTrigger < c.heapMarked { + minTrigger = c.heapMarked + } + + // If we let the trigger go too low, then if the application + // is allocating very rapidly we might end up in a situation + // where we're allocating black during a nearly always-on GC. + // The result of this is a growing heap and ultimately an + // increase in RSS. By capping us at a point >0, we're essentially + // saying that we're OK using more CPU during the GC to prevent + // this growth in RSS. + triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked + if minTrigger < triggerLowerBound { + minTrigger = triggerLowerBound + } + + // For small heaps, set the max trigger point at maxTriggerRatio of the way + // from the live heap to the heap goal. This ensures we always have *some* + // headroom when the GC actually starts. For larger heaps, set the max trigger + // point at the goal, minus the minimum heap size. + // + // This choice follows from the fact that the minimum heap size is chosen + // to reflect the costs of a GC with no work to do. With a large heap but + // very little scan work to perform, this gives us exactly as much runway + // as we would need, in the worst case. + maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked + if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger { + maxTrigger = goal - defaultHeapMinimum + } + maxTrigger = max(maxTrigger, minTrigger) + + // Compute the trigger from our bounds and the runway stored by commit. + var trigger uint64 + runway := c.runway.Load() + if runway > goal { + trigger = minTrigger + } else { + trigger = goal - runway + } + trigger = max(trigger, minTrigger) + trigger = min(trigger, maxTrigger) + if trigger > goal { + print("trigger=", trigger, " heapGoal=", goal, "\n") + print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n") + throw("produced a trigger greater than the heap goal") + } + return trigger, goal +} + +// commit recomputes all pacing parameters needed to derive the +// trigger and the heap goal. Namely, the gcPercent-based heap goal, +// and the amount of runway we want to give the GC this cycle. +// +// This can be called any time. If GC is the in the middle of a +// concurrent phase, it will adjust the pacing of that phase. +// +// isSweepDone should be the result of calling isSweepDone(), +// unless we're testing or we know we're executing during a GC cycle. +// +// This depends on gcPercent, gcController.heapMarked, and +// gcController.heapLive. These must be up to date. +// +// Callers must call gcControllerState.revise after calling this +// function if the GC is enabled. +// +// mheap_.lock must be held or the world must be stopped. +func (c *gcControllerState) commit(isSweepDone bool) { + if !c.test { + assertWorldStoppedOrLockHeld(&mheap_.lock) + } + + if isSweepDone { + // The sweep is done, so there aren't any restrictions on the trigger + // we need to think about. + c.sweepDistMinTrigger.Store(0) + } else { + // Concurrent sweep happens in the heap growth + // from gcController.heapLive to trigger. Make sure we + // give the sweeper some runway if it doesn't have enough. + c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance) + } + + // Compute the next GC goal, which is when the allocated heap + // has grown by GOGC/100 over where it started the last cycle, + // plus additional runway for non-heap sources of GC work. + gcPercentHeapGoal := ^uint64(0) + if gcPercent := c.gcPercent.Load(); gcPercent >= 0 { + gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100 + } + // Apply the minimum heap size here. It's defined in terms of gcPercent + // and is only updated by functions that call commit. + if gcPercentHeapGoal < c.heapMinimum { + gcPercentHeapGoal = c.heapMinimum + } + c.gcPercentHeapGoal.Store(gcPercentHeapGoal) + + // Compute the amount of runway we want the GC to have by using our + // estimate of the cons/mark ratio. + // + // The idea is to take our expected scan work, and multiply it by + // the cons/mark ratio to determine how long it'll take to complete + // that scan work in terms of bytes allocated. This gives us our GC's + // runway. + // + // However, the cons/mark ratio is a ratio of rates per CPU-second, but + // here we care about the relative rates for some division of CPU + // resources among the mutator and the GC. + // + // To summarize, we have B / cpu-ns, and we want B / ns. We get that + // by multiplying by our desired division of CPU resources. We choose + // to express CPU resources as GOMAPROCS*fraction. Note that because + // we're working with a ratio here, we can omit the number of CPU cores, + // because they'll appear in the numerator and denominator and cancel out. + // As a result, this is basically just "weighing" the cons/mark ratio by + // our desired division of resources. + // + // Furthermore, by setting the runway so that CPU resources are divided + // this way, assuming that the cons/mark ratio is correct, we make that + // division a reality. + c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load()))) +} + +// setGCPercent updates gcPercent. commit must be called after. +// Returns the old value of gcPercent. +// +// The world must be stopped, or mheap_.lock must be held. +func (c *gcControllerState) setGCPercent(in int32) int32 { + if !c.test { + assertWorldStoppedOrLockHeld(&mheap_.lock) + } + + out := c.gcPercent.Load() + if in < 0 { + in = -1 + } + c.heapMinimum = defaultHeapMinimum * uint64(in) / 100 + c.gcPercent.Store(in) + + return out +} + +//go:linkname setGCPercent runtime/debug.setGCPercent +func setGCPercent(in int32) (out int32) { + // Run on the system stack since we grab the heap lock. + systemstack(func() { + lock(&mheap_.lock) + out = gcController.setGCPercent(in) + gcControllerCommit() + unlock(&mheap_.lock) + }) + + // If we just disabled GC, wait for any concurrent GC mark to + // finish so we always return with no GC running. + if in < 0 { + gcWaitOnMark(work.cycles.Load()) + } + + return out +} + +func readGOGC() int32 { + p := gogetenv("GOGC") + if p == "off" { + return -1 + } + if n, ok := atoi32(p); ok { + return n + } + return 100 +} + +// setMemoryLimit updates memoryLimit. commit must be called after +// Returns the old value of memoryLimit. +// +// The world must be stopped, or mheap_.lock must be held. +func (c *gcControllerState) setMemoryLimit(in int64) int64 { + if !c.test { + assertWorldStoppedOrLockHeld(&mheap_.lock) + } + + out := c.memoryLimit.Load() + if in >= 0 { + c.memoryLimit.Store(in) + } + + return out +} + +//go:linkname setMemoryLimit runtime/debug.setMemoryLimit +func setMemoryLimit(in int64) (out int64) { + // Run on the system stack since we grab the heap lock. + systemstack(func() { + lock(&mheap_.lock) + out = gcController.setMemoryLimit(in) + if in < 0 || out == in { + // If we're just checking the value or not changing + // it, there's no point in doing the rest. + unlock(&mheap_.lock) + return + } + gcControllerCommit() + unlock(&mheap_.lock) + }) + return out +} + +func readGOMEMLIMIT() int64 { + p := gogetenv("GOMEMLIMIT") + if p == "" || p == "off" { + return maxInt64 + } + n, ok := parseByteCount(p) + if !ok { + print("GOMEMLIMIT=", p, "\n") + throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`") + } + return n +} + +// addIdleMarkWorker attempts to add a new idle mark worker. +// +// If this returns true, the caller must become an idle mark worker unless +// there's no background mark worker goroutines in the pool. This case is +// harmless because there are already background mark workers running. +// If this returns false, the caller must NOT become an idle mark worker. +// +// nosplit because it may be called without a P. +// +//go:nosplit +func (c *gcControllerState) addIdleMarkWorker() bool { + for { + old := c.idleMarkWorkers.Load() + n, max := int32(old&uint64(^uint32(0))), int32(old>>32) + if n >= max { + // See the comment on idleMarkWorkers for why + // n > max is tolerated. + return false + } + if n < 0 { + print("n=", n, " max=", max, "\n") + throw("negative idle mark workers") + } + new := uint64(uint32(n+1)) | (uint64(max) << 32) + if c.idleMarkWorkers.CompareAndSwap(old, new) { + return true + } + } +} + +// needIdleMarkWorker is a hint as to whether another idle mark worker is needed. +// +// The caller must still call addIdleMarkWorker to become one. This is mainly +// useful for a quick check before an expensive operation. +// +// nosplit because it may be called without a P. +// +//go:nosplit +func (c *gcControllerState) needIdleMarkWorker() bool { + p := c.idleMarkWorkers.Load() + n, max := int32(p&uint64(^uint32(0))), int32(p>>32) + return n < max +} + +// removeIdleMarkWorker must be called when a new idle mark worker stops executing. +func (c *gcControllerState) removeIdleMarkWorker() { + for { + old := c.idleMarkWorkers.Load() + n, max := int32(old&uint64(^uint32(0))), int32(old>>32) + if n-1 < 0 { + print("n=", n, " max=", max, "\n") + throw("negative idle mark workers") + } + new := uint64(uint32(n-1)) | (uint64(max) << 32) + if c.idleMarkWorkers.CompareAndSwap(old, new) { + return + } + } +} + +// setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed. +// +// This method is optimistic in that it does not wait for the number of +// idle mark workers to reduce to max before returning; it assumes the workers +// will deschedule themselves. +func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) { + for { + old := c.idleMarkWorkers.Load() + n := int32(old & uint64(^uint32(0))) + if n < 0 { + print("n=", n, " max=", max, "\n") + throw("negative idle mark workers") + } + new := uint64(uint32(n)) | (uint64(max) << 32) + if c.idleMarkWorkers.CompareAndSwap(old, new) { + return + } + } +} + +// gcControllerCommit is gcController.commit, but passes arguments from live +// (non-test) data. It also updates any consumers of the GC pacing, such as +// sweep pacing and the background scavenger. +// +// Calls gcController.commit. +// +// The heap lock must be held, so this must be executed on the system stack. +// +//go:systemstack +func gcControllerCommit() { + assertWorldStoppedOrLockHeld(&mheap_.lock) + + gcController.commit(isSweepDone()) + + // Update mark pacing. + if gcphase != _GCoff { + gcController.revise() + } + + // TODO(mknyszek): This isn't really accurate any longer because the heap + // goal is computed dynamically. Still useful to snapshot, but not as useful. + trace := traceAcquire() + if trace.ok() { + trace.HeapGoal() + traceRelease(trace) + } + + trigger, heapGoal := gcController.trigger() + gcPaceSweeper(trigger) + gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcpacer_test.go b/platform/dbops/binaries/go/go/src/runtime/mgcpacer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ef1483d629ea13345422f5a91524b456b65446d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcpacer_test.go @@ -0,0 +1,1097 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "math" + "math/rand" + . "runtime" + "testing" + "time" +) + +func TestGcPacer(t *testing.T) { + t.Parallel() + + const initialHeapBytes = 256 << 10 + for _, e := range []*gcExecTest{ + { + // The most basic test case: a steady-state heap. + // Growth to an O(MiB) heap, then constant heap size, alloc/scan rates. + name: "Steady", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n >= 25 { + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // Same as the steady-state case, but lots of stacks to scan relative to the heap size. + name: "SteadyBigStacks", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(132.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(2048).sum(ramp(128<<20, 8)), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + // Check the same conditions as the steady-state case, except the old pacer can't + // really handle this well, so don't check the goal ratio for it. + n := len(c) + if n >= 25 { + // For the pacer redesign, assert something even stronger: at this alloc/scan rate, + // it should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + } + }, + }, + { + // Same as the steady-state case, but lots of globals to scan relative to the heap size. + name: "SteadyBigGlobals", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 128 << 20, + nCores: 8, + allocRate: constant(132.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + // Check the same conditions as the steady-state case, except the old pacer can't + // really handle this well, so don't check the goal ratio for it. + n := len(c) + if n >= 25 { + // For the pacer redesign, assert something even stronger: at this alloc/scan rate, + // it should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + } + }, + }, + { + // This tests the GC pacer's response to a small change in allocation rate. + name: "StepAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0).sum(ramp(66.0, 1).delay(50)), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 100, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if (n >= 25 && n < 50) || n >= 75 { + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles + // and then is able to settle again after a significant jump in allocation rate. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // This tests the GC pacer's response to a large change in allocation rate. + name: "HeavyStepAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33).sum(ramp(330, 1).delay(50)), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 100, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if (n >= 25 && n < 50) || n >= 75 { + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles + // and then is able to settle again after a significant jump in allocation rate. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // This tests the GC pacer's response to a change in the fraction of the scannable heap. + name: "StepScannableFrac", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(128.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(0.2).sum(unit(0.5).delay(50)), + stackBytes: constant(8192), + length: 100, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if (n >= 25 && n < 50) || n >= 75 { + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles + // and then is able to settle again after a significant jump in allocation rate. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // Tests the pacer for a high GOGC value with a large heap growth happening + // in the middle. The purpose of the large heap growth is to check if GC + // utilization ends up sensitive + name: "HighGOGC", + gcPercent: 1500, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: random(7, 0x53).offset(165), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0x1), unit(14).delay(25)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 12 { + if n == 26 { + // In the 26th cycle there's a heap growth. Overshoot is expected to maintain + // a stable utilization, but we should *never* overshoot more than GOGC of + // the next cycle. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.90, 15) + } else { + // Give a wider goal range here. With such a high GOGC value we're going to be + // forced to undershoot. + // + // TODO(mknyszek): Instead of placing a 0.95 limit on the trigger, make the limit + // based on absolute bytes, that's based somewhat in how the minimum heap size + // is determined. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.90, 1.05) + } + + // Ensure utilization remains stable despite a growth in live heap size + // at GC #25. This test fails prior to the GC pacer redesign. + // + // Because GOGC is so large, we should also be really close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, GCGoalUtilization+0.03) + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.03) + } + }, + }, + { + // This test makes sure that in the face of a varying (in this case, oscillating) allocation + // rate, the pacer does a reasonably good job of staying abreast of the changes. + name: "OscAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: oscillate(13, 0, 8).offset(67), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 12 { + // After the 12th GC, the heap will stop growing. Now, just make sure that: + // 1. Utilization isn't varying _too_ much, and + // 2. The pacer is mostly keeping up with the goal. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.3) + } + }, + }, + { + // This test is the same as OscAlloc, but instead of oscillating, the allocation rate is jittery. + name: "JitterAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: random(13, 0xf).offset(132), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0xe)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 12 { + // After the 12th GC, the heap will stop growing. Now, just make sure that: + // 1. Utilization isn't varying _too_ much, and + // 2. The pacer is mostly keeping up with the goal. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.025) + assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.275) + } + }, + }, + { + // This test is the same as JitterAlloc, but with a much higher allocation rate. + // The jitter is proportionally the same. + name: "HeavyJitterAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: random(33.0, 0x0).offset(330), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0x152)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 13 { + // After the 12th GC, the heap will stop growing. Now, just make sure that: + // 1. Utilization isn't varying _too_ much, and + // 2. The pacer is mostly keeping up with the goal. + // We start at the 13th here because we want to use the 12th as a reference. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + // Unlike the other tests, GC utilization here will vary more and tend higher. + // Just make sure it's not going too crazy. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.05) + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.05) + } + }, + }, + { + // This test sets a slow allocation rate and a small heap (close to the minimum heap size) + // to try to minimize the difference between the trigger and the goal. + name: "SmallHeapSlowAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(1.0), + scanRate: constant(2048.0), + growthRate: constant(2.0).sum(ramp(-1.0, 3)), + scannableFrac: constant(0.01), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 4 { + // After the 4th GC, the heap will stop growing. + // First, let's make sure we're finishing near the goal, with some extra + // room because we're probably going to be triggering early. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.925, 1.025) + // Next, let's make sure there's some minimum distance between the goal + // and the trigger. It should be proportional to the runway (hence the + // trigger ratio check, instead of a check against the runway). + assertInRange(t, "trigger ratio", c[n-1].triggerRatio(), 0.925, 0.975) + } + if n > 25 { + // Double-check that GC utilization looks OK. + + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + // Make sure GC utilization has mostly levelled off. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.05) + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.05) + } + }, + }, + { + // This test sets a slow allocation rate and a medium heap (around 10x the min heap size) + // to try to minimize the difference between the trigger and the goal. + name: "MediumHeapSlowAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(1.0), + scanRate: constant(2048.0), + growthRate: constant(2.0).sum(ramp(-1.0, 8)), + scannableFrac: constant(0.01), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 9 { + // After the 4th GC, the heap will stop growing. + // First, let's make sure we're finishing near the goal, with some extra + // room because we're probably going to be triggering early. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.925, 1.025) + // Next, let's make sure there's some minimum distance between the goal + // and the trigger. It should be proportional to the runway (hence the + // trigger ratio check, instead of a check against the runway). + assertInRange(t, "trigger ratio", c[n-1].triggerRatio(), 0.925, 0.975) + } + if n > 25 { + // Double-check that GC utilization looks OK. + + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + // Make sure GC utilization has mostly levelled off. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.05) + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.05) + } + }, + }, + { + // This test sets a slow allocation rate and a large heap to try to minimize the + // difference between the trigger and the goal. + name: "LargeHeapSlowAlloc", + gcPercent: 100, + memoryLimit: math.MaxInt64, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(1.0), + scanRate: constant(2048.0), + growthRate: constant(4.0).sum(ramp(-3.0, 12)), + scannableFrac: constant(0.01), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 13 { + // After the 4th GC, the heap will stop growing. + // First, let's make sure we're finishing near the goal. + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + // Next, let's make sure there's some minimum distance between the goal + // and the trigger. It should be around the default minimum heap size. + assertInRange(t, "runway", c[n-1].runway(), DefaultHeapMinimum-64<<10, DefaultHeapMinimum+64<<10) + } + if n > 25 { + // Double-check that GC utilization looks OK. + + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + // Make sure GC utilization has mostly levelled off. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.05) + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.05) + } + }, + }, + { + // The most basic test case with a memory limit: a steady-state heap. + // Growth to an O(MiB) heap, then constant heap size, alloc/scan rates. + // Provide a lot of room for the limit. Essentially, this should behave just like + // the "Steady" test. Note that we don't simulate non-heap overheads, so the + // memory limit and the heap limit are identical. + name: "SteadyMemoryLimit", + gcPercent: 100, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if peak := c[n-1].heapPeak; peak >= applyMemoryLimitHeapGoalHeadroom(512<<20) { + t.Errorf("peak heap size reaches heap limit: %d", peak) + } + if n >= 25 { + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // This is the same as the previous test, but gcPercent = -1, so the heap *should* grow + // all the way to the peak. + name: "SteadyMemoryLimitNoGCPercent", + gcPercent: -1, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(2.0).sum(ramp(-1.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if goal := c[n-1].heapGoal; goal != applyMemoryLimitHeapGoalHeadroom(512<<20) { + t.Errorf("heap goal is not the heap limit: %d", goal) + } + if n >= 25 { + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // This test ensures that the pacer doesn't fall over even when the live heap exceeds + // the memory limit. It also makes sure GC utilization actually rises to push back. + name: "ExceedMemoryLimit", + gcPercent: 100, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(3.5).sum(ramp(-2.5, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 12 { + // We're way over the memory limit, so we want to make sure our goal is set + // as low as it possibly can be. + if goal, live := c[n-1].heapGoal, c[n-1].heapLive; goal != live { + t.Errorf("heap goal is not equal to live heap: %d != %d", goal, live) + } + } + if n >= 25 { + // Due to memory pressure, we should scale to 100% GC CPU utilization. + // Note that in practice this won't actually happen because of the CPU limiter, + // but it's not the pacer's job to limit CPU usage. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, 1.0, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + // In this case, that just means it's not wavering around a whole bunch. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + } + }, + }, + { + // Same as the previous test, but with gcPercent = -1. + name: "ExceedMemoryLimitNoGCPercent", + gcPercent: -1, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(3.5).sum(ramp(-2.5, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n < 10 { + if goal := c[n-1].heapGoal; goal != applyMemoryLimitHeapGoalHeadroom(512<<20) { + t.Errorf("heap goal is not the heap limit: %d", goal) + } + } + if n > 12 { + // We're way over the memory limit, so we want to make sure our goal is set + // as low as it possibly can be. + if goal, live := c[n-1].heapGoal, c[n-1].heapLive; goal != live { + t.Errorf("heap goal is not equal to live heap: %d != %d", goal, live) + } + } + if n >= 25 { + // Due to memory pressure, we should scale to 100% GC CPU utilization. + // Note that in practice this won't actually happen because of the CPU limiter, + // but it's not the pacer's job to limit CPU usage. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, 1.0, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles. + // In this case, that just means it's not wavering around a whole bunch. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + } + }, + }, + { + // This test ensures that the pacer maintains the memory limit as the heap grows. + name: "MaintainMemoryLimit", + gcPercent: 100, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(3.0).sum(ramp(-2.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if n > 12 { + // We're trying to saturate the memory limit. + if goal := c[n-1].heapGoal; goal != applyMemoryLimitHeapGoalHeadroom(512<<20) { + t.Errorf("heap goal is not the heap limit: %d", goal) + } + } + if n >= 25 { + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization, + // even with the additional memory pressure. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles and + // that it's meeting its goal. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + { + // Same as the previous test, but with gcPercent = -1. + name: "MaintainMemoryLimitNoGCPercent", + gcPercent: -1, + memoryLimit: 512 << 20, + globalsBytes: 32 << 10, + nCores: 8, + allocRate: constant(33.0), + scanRate: constant(1024.0), + growthRate: constant(3.0).sum(ramp(-2.0, 12)), + scannableFrac: constant(1.0), + stackBytes: constant(8192), + length: 50, + checker: func(t *testing.T, c []gcCycleResult) { + n := len(c) + if goal := c[n-1].heapGoal; goal != applyMemoryLimitHeapGoalHeadroom(512<<20) { + t.Errorf("heap goal is not the heap limit: %d", goal) + } + if n >= 25 { + // At this alloc/scan rate, the pacer should be extremely close to the goal utilization, + // even with the additional memory pressure. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005) + + // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles and + // that it's meeting its goal. + assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) + assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05) + } + }, + }, + // TODO(mknyszek): Write a test that exercises the pacer's hard goal. + // This is difficult in the idealized model this testing framework places + // the pacer in, because the calculated overshoot is directly proportional + // to the runway for the case of the expected work. + // However, it is still possible to trigger this case if something exceptional + // happens between calls to revise; the framework just doesn't support this yet. + } { + e := e + t.Run(e.name, func(t *testing.T) { + t.Parallel() + + c := NewGCController(e.gcPercent, e.memoryLimit) + var bytesAllocatedBlackLast int64 + results := make([]gcCycleResult, 0, e.length) + for i := 0; i < e.length; i++ { + cycle := e.next() + c.StartCycle(cycle.stackBytes, e.globalsBytes, cycle.scannableFrac, e.nCores) + + // Update pacer incrementally as we complete scan work. + const ( + revisePeriod = 500 * time.Microsecond + rateConv = 1024 * float64(revisePeriod) / float64(time.Millisecond) + ) + var nextHeapMarked int64 + if i == 0 { + nextHeapMarked = initialHeapBytes + } else { + nextHeapMarked = int64(float64(int64(c.HeapMarked())-bytesAllocatedBlackLast) * cycle.growthRate) + } + globalsScanWorkLeft := int64(e.globalsBytes) + stackScanWorkLeft := int64(cycle.stackBytes) + heapScanWorkLeft := int64(float64(nextHeapMarked) * cycle.scannableFrac) + doWork := func(work int64) (int64, int64, int64) { + var deltas [3]int64 + + // Do globals work first, then stacks, then heap. + for i, workLeft := range []*int64{&globalsScanWorkLeft, &stackScanWorkLeft, &heapScanWorkLeft} { + if *workLeft == 0 { + continue + } + if *workLeft > work { + deltas[i] += work + *workLeft -= work + work = 0 + break + } else { + deltas[i] += *workLeft + work -= *workLeft + *workLeft = 0 + } + } + return deltas[0], deltas[1], deltas[2] + } + var ( + gcDuration int64 + assistTime int64 + bytesAllocatedBlack int64 + ) + for heapScanWorkLeft+stackScanWorkLeft+globalsScanWorkLeft > 0 { + // Simulate GC assist pacing. + // + // Note that this is an idealized view of the GC assist pacing + // mechanism. + + // From the assist ratio and the alloc and scan rates, we can idealize what + // the GC CPU utilization looks like. + // + // We start with assistRatio = (bytes of scan work) / (bytes of runway) (by definition). + // + // Over revisePeriod, we can also calculate how many bytes are scanned and + // allocated, given some GC CPU utilization u: + // + // bytesScanned = scanRate * rateConv * nCores * u + // bytesAllocated = allocRate * rateConv * nCores * (1 - u) + // + // During revisePeriod, assistRatio is kept constant, and GC assists kick in to + // maintain it. Specifically, they act to prevent too many bytes being allocated + // compared to how many bytes are scanned. It directly defines the ratio of + // bytesScanned to bytesAllocated over this period, hence: + // + // assistRatio = bytesScanned / bytesAllocated + // + // From this, we can solve for utilization, because everything else has already + // been determined: + // + // assistRatio = (scanRate * rateConv * nCores * u) / (allocRate * rateConv * nCores * (1 - u)) + // assistRatio = (scanRate * u) / (allocRate * (1 - u)) + // assistRatio * allocRate * (1-u) = scanRate * u + // assistRatio * allocRate - assistRatio * allocRate * u = scanRate * u + // assistRatio * allocRate = assistRatio * allocRate * u + scanRate * u + // assistRatio * allocRate = (assistRatio * allocRate + scanRate) * u + // u = (assistRatio * allocRate) / (assistRatio * allocRate + scanRate) + // + // Note that this may give a utilization that is _less_ than GCBackgroundUtilization, + // which isn't possible in practice because of dedicated workers. Thus, this case + // must be interpreted as GC assists not kicking in at all, and just round up. All + // downstream values will then have this accounted for. + assistRatio := c.AssistWorkPerByte() + utilization := assistRatio * cycle.allocRate / (assistRatio*cycle.allocRate + cycle.scanRate) + if utilization < GCBackgroundUtilization { + utilization = GCBackgroundUtilization + } + + // Knowing the utilization, calculate bytesScanned and bytesAllocated. + bytesScanned := int64(cycle.scanRate * rateConv * float64(e.nCores) * utilization) + bytesAllocated := int64(cycle.allocRate * rateConv * float64(e.nCores) * (1 - utilization)) + + // Subtract work from our model. + globalsScanned, stackScanned, heapScanned := doWork(bytesScanned) + + // doWork may not use all of bytesScanned. + // In this case, the GC actually ends sometime in this period. + // Let's figure out when, exactly, and adjust bytesAllocated too. + actualElapsed := revisePeriod + actualAllocated := bytesAllocated + if actualScanned := globalsScanned + stackScanned + heapScanned; actualScanned < bytesScanned { + // actualScanned = scanRate * rateConv * (t / revisePeriod) * nCores * u + // => t = actualScanned * revisePeriod / (scanRate * rateConv * nCores * u) + actualElapsed = time.Duration(float64(actualScanned) * float64(revisePeriod) / (cycle.scanRate * rateConv * float64(e.nCores) * utilization)) + actualAllocated = int64(cycle.allocRate * rateConv * float64(actualElapsed) / float64(revisePeriod) * float64(e.nCores) * (1 - utilization)) + } + + // Ask the pacer to revise. + c.Revise(GCControllerReviseDelta{ + HeapLive: actualAllocated, + HeapScan: int64(float64(actualAllocated) * cycle.scannableFrac), + HeapScanWork: heapScanned, + StackScanWork: stackScanned, + GlobalsScanWork: globalsScanned, + }) + + // Accumulate variables. + assistTime += int64(float64(actualElapsed) * float64(e.nCores) * (utilization - GCBackgroundUtilization)) + gcDuration += int64(actualElapsed) + bytesAllocatedBlack += actualAllocated + } + + // Put together the results, log them, and concatenate them. + result := gcCycleResult{ + cycle: i + 1, + heapLive: c.HeapMarked(), + heapScannable: int64(float64(int64(c.HeapMarked())-bytesAllocatedBlackLast) * cycle.scannableFrac), + heapTrigger: c.Triggered(), + heapPeak: c.HeapLive(), + heapGoal: c.HeapGoal(), + gcUtilization: float64(assistTime)/(float64(gcDuration)*float64(e.nCores)) + GCBackgroundUtilization, + } + t.Log("GC", result.String()) + results = append(results, result) + + // Run the checker for this test. + e.check(t, results) + + c.EndCycle(uint64(nextHeapMarked+bytesAllocatedBlack), assistTime, gcDuration, e.nCores) + + bytesAllocatedBlackLast = bytesAllocatedBlack + } + }) + } +} + +type gcExecTest struct { + name string + + gcPercent int + memoryLimit int64 + globalsBytes uint64 + nCores int + + allocRate float64Stream // > 0, KiB / cpu-ms + scanRate float64Stream // > 0, KiB / cpu-ms + growthRate float64Stream // > 0 + scannableFrac float64Stream // Clamped to [0, 1] + stackBytes float64Stream // Multiple of 2048. + length int + + checker func(*testing.T, []gcCycleResult) +} + +// minRate is an arbitrary minimum for allocRate, scanRate, and growthRate. +// These values just cannot be zero. +const minRate = 0.0001 + +func (e *gcExecTest) next() gcCycle { + return gcCycle{ + allocRate: e.allocRate.min(minRate)(), + scanRate: e.scanRate.min(minRate)(), + growthRate: e.growthRate.min(minRate)(), + scannableFrac: e.scannableFrac.limit(0, 1)(), + stackBytes: uint64(e.stackBytes.quantize(2048).min(0)()), + } +} + +func (e *gcExecTest) check(t *testing.T, results []gcCycleResult) { + t.Helper() + + // Do some basic general checks first. + n := len(results) + switch n { + case 0: + t.Fatal("no results passed to check") + return + case 1: + if results[0].cycle != 1 { + t.Error("first cycle has incorrect number") + } + default: + if results[n-1].cycle != results[n-2].cycle+1 { + t.Error("cycle numbers out of order") + } + } + if u := results[n-1].gcUtilization; u < 0 || u > 1 { + t.Fatal("GC utilization not within acceptable bounds") + } + if s := results[n-1].heapScannable; s < 0 { + t.Fatal("heapScannable is negative") + } + if e.checker == nil { + t.Fatal("test-specific checker is missing") + } + + // Run the test-specific checker. + e.checker(t, results) +} + +type gcCycle struct { + allocRate float64 + scanRate float64 + growthRate float64 + scannableFrac float64 + stackBytes uint64 +} + +type gcCycleResult struct { + cycle int + + // These come directly from the pacer, so uint64. + heapLive uint64 + heapTrigger uint64 + heapGoal uint64 + heapPeak uint64 + + // These are produced by the simulation, so int64 and + // float64 are more appropriate, so that we can check for + // bad states in the simulation. + heapScannable int64 + gcUtilization float64 +} + +func (r *gcCycleResult) goalRatio() float64 { + return float64(r.heapPeak) / float64(r.heapGoal) +} + +func (r *gcCycleResult) runway() float64 { + return float64(r.heapGoal - r.heapTrigger) +} + +func (r *gcCycleResult) triggerRatio() float64 { + return float64(r.heapTrigger-r.heapLive) / float64(r.heapGoal-r.heapLive) +} + +func (r *gcCycleResult) String() string { + return fmt.Sprintf("%d %2.1f%% %d->%d->%d (goal: %d)", r.cycle, r.gcUtilization*100, r.heapLive, r.heapTrigger, r.heapPeak, r.heapGoal) +} + +func assertInEpsilon(t *testing.T, name string, a, b, epsilon float64) { + t.Helper() + assertInRange(t, name, a, b-epsilon, b+epsilon) +} + +func assertInRange(t *testing.T, name string, a, min, max float64) { + t.Helper() + if a < min || a > max { + t.Errorf("%s not in range (%f, %f): %f", name, min, max, a) + } +} + +// float64Stream is a function that generates an infinite stream of +// float64 values when called repeatedly. +type float64Stream func() float64 + +// constant returns a stream that generates the value c. +func constant(c float64) float64Stream { + return func() float64 { + return c + } +} + +// unit returns a stream that generates a single peak with +// amplitude amp, followed by zeroes. +// +// In another manner of speaking, this is the Kronecker delta. +func unit(amp float64) float64Stream { + dropped := false + return func() float64 { + if dropped { + return 0 + } + dropped = true + return amp + } +} + +// oscillate returns a stream that oscillates sinusoidally +// with the given amplitude, phase, and period. +func oscillate(amp, phase float64, period int) float64Stream { + var cycle int + return func() float64 { + p := float64(cycle)/float64(period)*2*math.Pi + phase + cycle++ + if cycle == period { + cycle = 0 + } + return math.Sin(p) * amp + } +} + +// ramp returns a stream that moves from zero to height +// over the course of length steps. +func ramp(height float64, length int) float64Stream { + var cycle int + return func() float64 { + h := height * float64(cycle) / float64(length) + if cycle < length { + cycle++ + } + return h + } +} + +// random returns a stream that generates random numbers +// between -amp and amp. +func random(amp float64, seed int64) float64Stream { + r := rand.New(rand.NewSource(seed)) + return func() float64 { + return ((r.Float64() - 0.5) * 2) * amp + } +} + +// delay returns a new stream which is a buffered version +// of f: it returns zero for cycles steps, followed by f. +func (f float64Stream) delay(cycles int) float64Stream { + zeroes := 0 + return func() float64 { + if zeroes < cycles { + zeroes++ + return 0 + } + return f() + } +} + +// scale returns a new stream that is f, but attenuated by a +// constant factor. +func (f float64Stream) scale(amt float64) float64Stream { + return func() float64 { + return f() * amt + } +} + +// offset returns a new stream that is f but offset by amt +// at each step. +func (f float64Stream) offset(amt float64) float64Stream { + return func() float64 { + old := f() + return old + amt + } +} + +// sum returns a new stream that is the sum of all input streams +// at each step. +func (f float64Stream) sum(fs ...float64Stream) float64Stream { + return func() float64 { + sum := f() + for _, s := range fs { + sum += s() + } + return sum + } +} + +// quantize returns a new stream that rounds f to a multiple +// of mult at each step. +func (f float64Stream) quantize(mult float64) float64Stream { + return func() float64 { + r := f() / mult + if r < 0 { + return math.Ceil(r) * mult + } + return math.Floor(r) * mult + } +} + +// min returns a new stream that replaces all values produced +// by f lower than min with min. +func (f float64Stream) min(min float64) float64Stream { + return func() float64 { + return math.Max(min, f()) + } +} + +// max returns a new stream that replaces all values produced +// by f higher than max with max. +func (f float64Stream) max(max float64) float64Stream { + return func() float64 { + return math.Min(max, f()) + } +} + +// limit returns a new stream that replaces all values produced +// by f lower than min with min and higher than max with max. +func (f float64Stream) limit(min, max float64) float64Stream { + return func() float64 { + v := f() + if v < min { + v = min + } else if v > max { + v = max + } + return v + } +} + +func applyMemoryLimitHeapGoalHeadroom(goal uint64) uint64 { + headroom := goal / 100 * MemoryLimitHeapGoalHeadroomPercent + if headroom < MemoryLimitMinHeapGoalHeadroom { + headroom = MemoryLimitMinHeapGoalHeadroom + } + if goal < headroom || goal-headroom < headroom { + goal = headroom + } else { + goal -= headroom + } + return goal +} + +func TestIdleMarkWorkerCount(t *testing.T) { + const workers = 10 + c := NewGCController(100, math.MaxInt64) + c.SetMaxIdleMarkWorkers(workers) + for i := 0; i < workers; i++ { + if !c.NeedIdleMarkWorker() { + t.Fatalf("expected to need idle mark workers: i=%d", i) + } + if !c.AddIdleMarkWorker() { + t.Fatalf("expected to be able to add an idle mark worker: i=%d", i) + } + } + if c.NeedIdleMarkWorker() { + t.Fatalf("expected to not need idle mark workers") + } + if c.AddIdleMarkWorker() { + t.Fatalf("expected to not be able to add an idle mark worker") + } + for i := 0; i < workers; i++ { + c.RemoveIdleMarkWorker() + if !c.NeedIdleMarkWorker() { + t.Fatalf("expected to need idle mark workers after removal: i=%d", i) + } + } + for i := 0; i < workers-1; i++ { + if !c.AddIdleMarkWorker() { + t.Fatalf("expected to be able to add idle mark workers after adding again: i=%d", i) + } + } + for i := 0; i < 10; i++ { + if !c.AddIdleMarkWorker() { + t.Fatalf("expected to be able to add idle mark workers interleaved: i=%d", i) + } + if c.AddIdleMarkWorker() { + t.Fatalf("expected to not be able to add idle mark workers interleaved: i=%d", i) + } + c.RemoveIdleMarkWorker() + } + // Support the max being below the count. + c.SetMaxIdleMarkWorkers(0) + if c.NeedIdleMarkWorker() { + t.Fatalf("expected to not need idle mark workers after capacity set to 0") + } + if c.AddIdleMarkWorker() { + t.Fatalf("expected to not be able to add idle mark workers after capacity set to 0") + } + for i := 0; i < workers-1; i++ { + c.RemoveIdleMarkWorker() + } + if c.NeedIdleMarkWorker() { + t.Fatalf("expected to not need idle mark workers after capacity set to 0") + } + if c.AddIdleMarkWorker() { + t.Fatalf("expected to not be able to add idle mark workers after capacity set to 0") + } + c.SetMaxIdleMarkWorkers(1) + if !c.NeedIdleMarkWorker() { + t.Fatalf("expected to need idle mark workers after capacity set to 1") + } + if !c.AddIdleMarkWorker() { + t.Fatalf("expected to be able to add idle mark workers after capacity set to 1") + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcscavenge.go b/platform/dbops/binaries/go/go/src/runtime/mgcscavenge.go new file mode 100644 index 0000000000000000000000000000000000000000..86c2103f186887e67b2ed30bd13f582ca95354ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcscavenge.go @@ -0,0 +1,1416 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Scavenging free pages. +// +// This file implements scavenging (the release of physical pages backing mapped +// memory) of free and unused pages in the heap as a way to deal with page-level +// fragmentation and reduce the RSS of Go applications. +// +// Scavenging in Go happens on two fronts: there's the background +// (asynchronous) scavenger and the allocation-time (synchronous) scavenger. +// +// The former happens on a goroutine much like the background sweeper which is +// soft-capped at using scavengePercent of the mutator's time, based on +// order-of-magnitude estimates of the costs of scavenging. The latter happens +// when allocating pages from the heap. +// +// The scavenger's primary goal is to bring the estimated heap RSS of the +// application down to a goal. +// +// Before we consider what this looks like, we need to split the world into two +// halves. One in which a memory limit is not set, and one in which it is. +// +// For the former, the goal is defined as: +// (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * lastHeapInUse +// +// Essentially, we wish to have the application's RSS track the heap goal, but +// the heap goal is defined in terms of bytes of objects, rather than pages like +// RSS. As a result, we need to take into account for fragmentation internal to +// spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal +// and the last heap goal, which tells us by how much the heap is growing and +// shrinking. We estimate what the heap will grow to in terms of pages by taking +// this ratio and multiplying it by heapInUse at the end of the last GC, which +// allows us to account for this additional fragmentation. Note that this +// procedure makes the assumption that the degree of fragmentation won't change +// dramatically over the next GC cycle. Overestimating the amount of +// fragmentation simply results in higher memory use, which will be accounted +// for by the next pacing up date. Underestimating the fragmentation however +// could lead to performance degradation. Handling this case is not within the +// scope of the scavenger. Situations where the amount of fragmentation balloons +// over the course of a single GC cycle should be considered pathologies, +// flagged as bugs, and fixed appropriately. +// +// An additional factor of retainExtraPercent is added as a buffer to help ensure +// that there's more unscavenged memory to allocate out of, since each allocation +// out of scavenged memory incurs a potentially expensive page fault. +// +// If a memory limit is set, then we wish to pick a scavenge goal that maintains +// that memory limit. For that, we look at total memory that has been committed +// (memstats.mappedReady) and try to bring that down below the limit. In this case, +// we want to give buffer space in the *opposite* direction. When the application +// is close to the limit, we want to make sure we push harder to keep it under, so +// if we target below the memory limit, we ensure that the background scavenger is +// giving the situation the urgency it deserves. +// +// In this case, the goal is defined as: +// (100-reduceExtraPercent) / 100 * memoryLimit +// +// We compute both of these goals, and check whether either of them have been met. +// The background scavenger continues operating as long as either one of the goals +// has not been met. +// +// The goals are updated after each GC. +// +// Synchronous scavenging happens for one of two reasons: if an allocation would +// exceed the memory limit or whenever the heap grows in size, for some +// definition of heap-growth. The intuition behind this second reason is that the +// application had to grow the heap because existing fragments were not sufficiently +// large to satisfy a page-level memory allocation, so we scavenge those fragments +// eagerly to offset the growth in RSS that results. +// +// Lastly, not all pages are available for scavenging at all times and in all cases. +// The background scavenger and heap-growth scavenger only release memory in chunks +// that have not been densely-allocated for at least 1 full GC cycle. The reason +// behind this is likelihood of reuse: the Go heap is allocated in a first-fit order +// and by the end of the GC mark phase, the heap tends to be densely packed. Releasing +// memory in these densely packed chunks while they're being packed is counter-productive, +// and worse, it breaks up huge pages on systems that support them. The scavenger (invoked +// during memory allocation) further ensures that chunks it identifies as "dense" are +// immediately eligible for being backed by huge pages. Note that for the most part these +// density heuristics are best-effort heuristics. It's totally possible (but unlikely) +// that a chunk that just became dense is scavenged in the case of a race between memory +// allocation and scavenging. +// +// When synchronously scavenging for the memory limit or for debug.FreeOSMemory, these +// "dense" packing heuristics are ignored (in other words, scavenging is "forced") because +// in these scenarios returning memory to the OS is more important than keeping CPU +// overheads low. + +package runtime + +import ( + "internal/goos" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const ( + // The background scavenger is paced according to these parameters. + // + // scavengePercent represents the portion of mutator time we're willing + // to spend on scavenging in percent. + scavengePercent = 1 // 1% + + // retainExtraPercent represents the amount of memory over the heap goal + // that the scavenger should keep as a buffer space for the allocator. + // This constant is used when we do not have a memory limit set. + // + // The purpose of maintaining this overhead is to have a greater pool of + // unscavenged memory available for allocation (since using scavenged memory + // incurs an additional cost), to account for heap fragmentation and + // the ever-changing layout of the heap. + retainExtraPercent = 10 + + // reduceExtraPercent represents the amount of memory under the limit + // that the scavenger should target. For example, 5 means we target 95% + // of the limit. + // + // The purpose of shooting lower than the limit is to ensure that, once + // close to the limit, the scavenger is working hard to maintain it. If + // we have a memory limit set but are far away from it, there's no harm + // in leaving up to 100-retainExtraPercent live, and it's more efficient + // anyway, for the same reasons that retainExtraPercent exists. + reduceExtraPercent = 5 + + // maxPagesPerPhysPage is the maximum number of supported runtime pages per + // physical page, based on maxPhysPageSize. + maxPagesPerPhysPage = maxPhysPageSize / pageSize + + // scavengeCostRatio is the approximate ratio between the costs of using previously + // scavenged memory and scavenging memory. + // + // For most systems the cost of scavenging greatly outweighs the costs + // associated with using scavenged memory, making this constant 0. On other systems + // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial. + // + // This ratio is used as part of multiplicative factor to help the scavenger account + // for the additional costs of using scavenged memory in its pacing. + scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos) + + // scavChunkHiOcFrac indicates the fraction of pages that need to be allocated + // in the chunk in a single GC cycle for it to be considered high density. + scavChunkHiOccFrac = 0.96875 + scavChunkHiOccPages = uint16(scavChunkHiOccFrac * pallocChunkPages) +) + +// heapRetained returns an estimate of the current heap RSS. +func heapRetained() uint64 { + return gcController.heapInUse.load() + gcController.heapFree.load() +} + +// gcPaceScavenger updates the scavenger's pacing, particularly +// its rate and RSS goal. For this, it requires the current heapGoal, +// and the heapGoal for the previous GC cycle. +// +// The RSS goal is based on the current heap goal with a small overhead +// to accommodate non-determinism in the allocator. +// +// The pacing is based on scavengePageRate, which applies to both regular and +// huge pages. See that constant for more information. +// +// Must be called whenever GC pacing is updated. +// +// mheap_.lock must be held or the world must be stopped. +func gcPaceScavenger(memoryLimit int64, heapGoal, lastHeapGoal uint64) { + assertWorldStoppedOrLockHeld(&mheap_.lock) + + // As described at the top of this file, there are two scavenge goals here: one + // for gcPercent and one for memoryLimit. Let's handle the latter first because + // it's simpler. + + // We want to target retaining (100-reduceExtraPercent)% of the heap. + memoryLimitGoal := uint64(float64(memoryLimit) * (1 - reduceExtraPercent/100.0)) + + // mappedReady is comparable to memoryLimit, and represents how much total memory + // the Go runtime has committed now (estimated). + mappedReady := gcController.mappedReady.Load() + + // If we're below the goal already indicate that we don't need the background + // scavenger for the memory limit. This may seems worrisome at first, but note + // that the allocator will assist the background scavenger in the face of a memory + // limit, so we'll be safe even if we stop the scavenger when we shouldn't have. + if mappedReady <= memoryLimitGoal { + scavenge.memoryLimitGoal.Store(^uint64(0)) + } else { + scavenge.memoryLimitGoal.Store(memoryLimitGoal) + } + + // Now handle the gcPercent goal. + + // If we're called before the first GC completed, disable scavenging. + // We never scavenge before the 2nd GC cycle anyway (we don't have enough + // information about the heap yet) so this is fine, and avoids a fault + // or garbage data later. + if lastHeapGoal == 0 { + scavenge.gcPercentGoal.Store(^uint64(0)) + return + } + // Compute our scavenging goal. + goalRatio := float64(heapGoal) / float64(lastHeapGoal) + gcPercentGoal := uint64(float64(memstats.lastHeapInUse) * goalRatio) + // Add retainExtraPercent overhead to retainedGoal. This calculation + // looks strange but the purpose is to arrive at an integer division + // (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8) + // that also avoids the overflow from a multiplication. + gcPercentGoal += gcPercentGoal / (1.0 / (retainExtraPercent / 100.0)) + // Align it to a physical page boundary to make the following calculations + // a bit more exact. + gcPercentGoal = (gcPercentGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1) + + // Represents where we are now in the heap's contribution to RSS in bytes. + // + // Guaranteed to always be a multiple of physPageSize on systems where + // physPageSize <= pageSize since we map new heap memory at a size larger than + // any physPageSize and released memory in multiples of the physPageSize. + // + // However, certain functions recategorize heap memory as other stats (e.g. + // stacks) and this happens in multiples of pageSize, so on systems + // where physPageSize > pageSize the calculations below will not be exact. + // Generally this is OK since we'll be off by at most one regular + // physical page. + heapRetainedNow := heapRetained() + + // If we're already below our goal, or within one page of our goal, then indicate + // that we don't need the background scavenger for maintaining a memory overhead + // proportional to the heap goal. + if heapRetainedNow <= gcPercentGoal || heapRetainedNow-gcPercentGoal < uint64(physPageSize) { + scavenge.gcPercentGoal.Store(^uint64(0)) + } else { + scavenge.gcPercentGoal.Store(gcPercentGoal) + } +} + +var scavenge struct { + // gcPercentGoal is the amount of retained heap memory (measured by + // heapRetained) that the runtime will try to maintain by returning + // memory to the OS. This goal is derived from gcController.gcPercent + // by choosing to retain enough memory to allocate heap memory up to + // the heap goal. + gcPercentGoal atomic.Uint64 + + // memoryLimitGoal is the amount of memory retained by the runtime ( + // measured by gcController.mappedReady) that the runtime will try to + // maintain by returning memory to the OS. This goal is derived from + // gcController.memoryLimit by choosing to target the memory limit or + // some lower target to keep the scavenger working. + memoryLimitGoal atomic.Uint64 + + // assistTime is the time spent by the allocator scavenging in the last GC cycle. + // + // This is reset once a GC cycle ends. + assistTime atomic.Int64 + + // backgroundTime is the time spent by the background scavenger in the last GC cycle. + // + // This is reset once a GC cycle ends. + backgroundTime atomic.Int64 +} + +const ( + // It doesn't really matter what value we start at, but we can't be zero, because + // that'll cause divide-by-zero issues. Pick something conservative which we'll + // also use as a fallback. + startingScavSleepRatio = 0.001 + + // Spend at least 1 ms scavenging, otherwise the corresponding + // sleep time to maintain our desired utilization is too low to + // be reliable. + minScavWorkTime = 1e6 +) + +// Sleep/wait state of the background scavenger. +var scavenger scavengerState + +type scavengerState struct { + // lock protects all fields below. + lock mutex + + // g is the goroutine the scavenger is bound to. + g *g + + // parked is whether or not the scavenger is parked. + parked bool + + // timer is the timer used for the scavenger to sleep. + timer *timer + + // sysmonWake signals to sysmon that it should wake the scavenger. + sysmonWake atomic.Uint32 + + // targetCPUFraction is the target CPU overhead for the scavenger. + targetCPUFraction float64 + + // sleepRatio is the ratio of time spent doing scavenging work to + // time spent sleeping. This is used to decide how long the scavenger + // should sleep for in between batches of work. It is set by + // critSleepController in order to maintain a CPU overhead of + // targetCPUFraction. + // + // Lower means more sleep, higher means more aggressive scavenging. + sleepRatio float64 + + // sleepController controls sleepRatio. + // + // See sleepRatio for more details. + sleepController piController + + // controllerCooldown is the time left in nanoseconds during which we avoid + // using the controller and we hold sleepRatio at a conservative + // value. Used if the controller's assumptions fail to hold. + controllerCooldown int64 + + // printControllerReset instructs printScavTrace to signal that + // the controller was reset. + printControllerReset bool + + // sleepStub is a stub used for testing to avoid actually having + // the scavenger sleep. + // + // Unlike the other stubs, this is not populated if left nil + // Instead, it is called when non-nil because any valid implementation + // of this function basically requires closing over this scavenger + // state, and allocating a closure is not allowed in the runtime as + // a matter of policy. + sleepStub func(n int64) int64 + + // scavenge is a function that scavenges n bytes of memory. + // Returns how many bytes of memory it actually scavenged, as + // well as the time it took in nanoseconds. Usually mheap.pages.scavenge + // with nanotime called around it, but stubbed out for testing. + // Like mheap.pages.scavenge, if it scavenges less than n bytes of + // memory, the caller may assume the heap is exhausted of scavengable + // memory for now. + // + // If this is nil, it is populated with the real thing in init. + scavenge func(n uintptr) (uintptr, int64) + + // shouldStop is a callback called in the work loop and provides a + // point that can force the scavenger to stop early, for example because + // the scavenge policy dictates too much has been scavenged already. + // + // If this is nil, it is populated with the real thing in init. + shouldStop func() bool + + // gomaxprocs returns the current value of gomaxprocs. Stub for testing. + // + // If this is nil, it is populated with the real thing in init. + gomaxprocs func() int32 +} + +// init initializes a scavenger state and wires to the current G. +// +// Must be called from a regular goroutine that can allocate. +func (s *scavengerState) init() { + if s.g != nil { + throw("scavenger state is already wired") + } + lockInit(&s.lock, lockRankScavenge) + s.g = getg() + + s.timer = new(timer) + s.timer.arg = s + s.timer.f = func(s any, _ uintptr) { + s.(*scavengerState).wake() + } + + // input: fraction of CPU time actually used. + // setpoint: ideal CPU fraction. + // output: ratio of time worked to time slept (determines sleep time). + // + // The output of this controller is somewhat indirect to what we actually + // want to achieve: how much time to sleep for. The reason for this definition + // is to ensure that the controller's outputs have a direct relationship with + // its inputs (as opposed to an inverse relationship), making it somewhat + // easier to reason about for tuning purposes. + s.sleepController = piController{ + // Tuned loosely via Ziegler-Nichols process. + kp: 0.3375, + ti: 3.2e6, + tt: 1e9, // 1 second reset time. + + // These ranges seem wide, but we want to give the controller plenty of + // room to hunt for the optimal value. + min: 0.001, // 1:1000 + max: 1000.0, // 1000:1 + } + s.sleepRatio = startingScavSleepRatio + + // Install real functions if stubs aren't present. + if s.scavenge == nil { + s.scavenge = func(n uintptr) (uintptr, int64) { + start := nanotime() + r := mheap_.pages.scavenge(n, nil, false) + end := nanotime() + if start >= end { + return r, 0 + } + scavenge.backgroundTime.Add(end - start) + return r, end - start + } + } + if s.shouldStop == nil { + s.shouldStop = func() bool { + // If background scavenging is disabled or if there's no work to do just stop. + return heapRetained() <= scavenge.gcPercentGoal.Load() && + gcController.mappedReady.Load() <= scavenge.memoryLimitGoal.Load() + } + } + if s.gomaxprocs == nil { + s.gomaxprocs = func() int32 { + return gomaxprocs + } + } +} + +// park parks the scavenger goroutine. +func (s *scavengerState) park() { + lock(&s.lock) + if getg() != s.g { + throw("tried to park scavenger from another goroutine") + } + s.parked = true + goparkunlock(&s.lock, waitReasonGCScavengeWait, traceBlockSystemGoroutine, 2) +} + +// ready signals to sysmon that the scavenger should be awoken. +func (s *scavengerState) ready() { + s.sysmonWake.Store(1) +} + +// wake immediately unparks the scavenger if necessary. +// +// Safe to run without a P. +func (s *scavengerState) wake() { + lock(&s.lock) + if s.parked { + // Unset sysmonWake, since the scavenger is now being awoken. + s.sysmonWake.Store(0) + + // s.parked is unset to prevent a double wake-up. + s.parked = false + + // Ready the goroutine by injecting it. We use injectglist instead + // of ready or goready in order to allow us to run this function + // without a P. injectglist also avoids placing the goroutine in + // the current P's runnext slot, which is desirable to prevent + // the scavenger from interfering with user goroutine scheduling + // too much. + var list gList + list.push(s.g) + injectglist(&list) + } + unlock(&s.lock) +} + +// sleep puts the scavenger to sleep based on the amount of time that it worked +// in nanoseconds. +// +// Note that this function should only be called by the scavenger. +// +// The scavenger may be woken up earlier by a pacing change, and it may not go +// to sleep at all if there's a pending pacing change. +func (s *scavengerState) sleep(worked float64) { + lock(&s.lock) + if getg() != s.g { + throw("tried to sleep scavenger from another goroutine") + } + + if worked < minScavWorkTime { + // This means there wasn't enough work to actually fill up minScavWorkTime. + // That's fine; we shouldn't try to do anything with this information + // because it's going result in a short enough sleep request that things + // will get messy. Just assume we did at least this much work. + // All this means is that we'll sleep longer than we otherwise would have. + worked = minScavWorkTime + } + + // Multiply the critical time by 1 + the ratio of the costs of using + // scavenged memory vs. scavenging memory. This forces us to pay down + // the cost of reusing this memory eagerly by sleeping for a longer period + // of time and scavenging less frequently. More concretely, we avoid situations + // where we end up scavenging so often that we hurt allocation performance + // because of the additional overheads of using scavenged memory. + worked *= 1 + scavengeCostRatio + + // sleepTime is the amount of time we're going to sleep, based on the amount + // of time we worked, and the sleepRatio. + sleepTime := int64(worked / s.sleepRatio) + + var slept int64 + if s.sleepStub == nil { + // Set the timer. + // + // This must happen here instead of inside gopark + // because we can't close over any variables without + // failing escape analysis. + start := nanotime() + resetTimer(s.timer, start+sleepTime) + + // Mark ourselves as asleep and go to sleep. + s.parked = true + goparkunlock(&s.lock, waitReasonSleep, traceBlockSleep, 2) + + // How long we actually slept for. + slept = nanotime() - start + + lock(&s.lock) + // Stop the timer here because s.wake is unable to do it for us. + // We don't really care if we succeed in stopping the timer. One + // reason we might fail is that we've already woken up, but the timer + // might be in the process of firing on some other P; essentially we're + // racing with it. That's totally OK. Double wake-ups are perfectly safe. + stopTimer(s.timer) + unlock(&s.lock) + } else { + unlock(&s.lock) + slept = s.sleepStub(sleepTime) + } + + // Stop here if we're cooling down from the controller. + if s.controllerCooldown > 0 { + // worked and slept aren't exact measures of time, but it's OK to be a bit + // sloppy here. We're just hoping we're avoiding some transient bad behavior. + t := slept + int64(worked) + if t > s.controllerCooldown { + s.controllerCooldown = 0 + } else { + s.controllerCooldown -= t + } + return + } + + // idealFraction is the ideal % of overall application CPU time that we + // spend scavenging. + idealFraction := float64(scavengePercent) / 100.0 + + // Calculate the CPU time spent. + // + // This may be slightly inaccurate with respect to GOMAXPROCS, but we're + // recomputing this often enough relative to GOMAXPROCS changes in general + // (it only changes when the world is stopped, and not during a GC) that + // that small inaccuracy is in the noise. + cpuFraction := worked / ((float64(slept) + worked) * float64(s.gomaxprocs())) + + // Update the critSleepRatio, adjusting until we reach our ideal fraction. + var ok bool + s.sleepRatio, ok = s.sleepController.next(cpuFraction, idealFraction, float64(slept)+worked) + if !ok { + // The core assumption of the controller, that we can get a proportional + // response, broke down. This may be transient, so temporarily switch to + // sleeping a fixed, conservative amount. + s.sleepRatio = startingScavSleepRatio + s.controllerCooldown = 5e9 // 5 seconds. + + // Signal the scav trace printer to output this. + s.controllerFailed() + } +} + +// controllerFailed indicates that the scavenger's scheduling +// controller failed. +func (s *scavengerState) controllerFailed() { + lock(&s.lock) + s.printControllerReset = true + unlock(&s.lock) +} + +// run is the body of the main scavenging loop. +// +// Returns the number of bytes released and the estimated time spent +// releasing those bytes. +// +// Must be run on the scavenger goroutine. +func (s *scavengerState) run() (released uintptr, worked float64) { + lock(&s.lock) + if getg() != s.g { + throw("tried to run scavenger from another goroutine") + } + unlock(&s.lock) + + for worked < minScavWorkTime { + // If something from outside tells us to stop early, stop. + if s.shouldStop() { + break + } + + // scavengeQuantum is the amount of memory we try to scavenge + // in one go. A smaller value means the scavenger is more responsive + // to the scheduler in case of e.g. preemption. A larger value means + // that the overheads of scavenging are better amortized, so better + // scavenging throughput. + // + // The current value is chosen assuming a cost of ~10µs/physical page + // (this is somewhat pessimistic), which implies a worst-case latency of + // about 160µs for 4 KiB physical pages. The current value is biased + // toward latency over throughput. + const scavengeQuantum = 64 << 10 + + // Accumulate the amount of time spent scavenging. + r, duration := s.scavenge(scavengeQuantum) + + // On some platforms we may see end >= start if the time it takes to scavenge + // memory is less than the minimum granularity of its clock (e.g. Windows) or + // due to clock bugs. + // + // In this case, just assume scavenging takes 10 µs per regular physical page + // (determined empirically), and conservatively ignore the impact of huge pages + // on timing. + const approxWorkedNSPerPhysicalPage = 10e3 + if duration == 0 { + worked += approxWorkedNSPerPhysicalPage * float64(r/physPageSize) + } else { + // TODO(mknyszek): If duration is small compared to worked, it could be + // rounded down to zero. Probably not a problem in practice because the + // values are all within a few orders of magnitude of each other but maybe + // worth worrying about. + worked += float64(duration) + } + released += r + + // scavenge does not return until it either finds the requisite amount of + // memory to scavenge, or exhausts the heap. If we haven't found enough + // to scavenge, then the heap must be exhausted. + if r < scavengeQuantum { + break + } + // When using fake time just do one loop. + if faketime != 0 { + break + } + } + if released > 0 && released < physPageSize { + // If this happens, it means that we may have attempted to release part + // of a physical page, but the likely effect of that is that it released + // the whole physical page, some of which may have still been in-use. + // This could lead to memory corruption. Throw. + throw("released less than one physical page of memory") + } + return +} + +// Background scavenger. +// +// The background scavenger maintains the RSS of the application below +// the line described by the proportional scavenging statistics in +// the mheap struct. +func bgscavenge(c chan int) { + scavenger.init() + + c <- 1 + scavenger.park() + + for { + released, workTime := scavenger.run() + if released == 0 { + scavenger.park() + continue + } + mheap_.pages.scav.releasedBg.Add(released) + scavenger.sleep(workTime) + } +} + +// scavenge scavenges nbytes worth of free pages, starting with the +// highest address first. Successive calls continue from where it left +// off until the heap is exhausted. force makes all memory available to +// scavenge, ignoring huge page heuristics. +// +// Returns the amount of memory scavenged in bytes. +// +// scavenge always tries to scavenge nbytes worth of memory, and will +// only fail to do so if the heap is exhausted for now. +func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool, force bool) uintptr { + released := uintptr(0) + for released < nbytes { + ci, pageIdx := p.scav.index.find(force) + if ci == 0 { + break + } + systemstack(func() { + released += p.scavengeOne(ci, pageIdx, nbytes-released) + }) + if shouldStop != nil && shouldStop() { + break + } + } + return released +} + +// printScavTrace prints a scavenge trace line to standard error. +// +// released should be the amount of memory released since the last time this +// was called, and forced indicates whether the scavenge was forced by the +// application. +// +// scavenger.lock must be held. +func printScavTrace(releasedBg, releasedEager uintptr, forced bool) { + assertLockHeld(&scavenger.lock) + + printlock() + print("scav ", + releasedBg>>10, " KiB work (bg), ", + releasedEager>>10, " KiB work (eager), ", + gcController.heapReleased.load()>>10, " KiB now, ", + (gcController.heapInUse.load()*100)/heapRetained(), "% util", + ) + if forced { + print(" (forced)") + } else if scavenger.printControllerReset { + print(" [controller reset]") + scavenger.printControllerReset = false + } + println() + printunlock() +} + +// scavengeOne walks over the chunk at chunk index ci and searches for +// a contiguous run of pages to scavenge. It will try to scavenge +// at most max bytes at once, but may scavenge more to avoid +// breaking huge pages. Once it scavenges some memory it returns +// how much it scavenged in bytes. +// +// searchIdx is the page index to start searching from in ci. +// +// Returns the number of bytes scavenged. +// +// Must run on the systemstack because it acquires p.mheapLock. +// +//go:systemstack +func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintptr { + // Calculate the maximum number of pages to scavenge. + // + // This should be alignUp(max, pageSize) / pageSize but max can and will + // be ^uintptr(0), so we need to be very careful not to overflow here. + // Rather than use alignUp, calculate the number of pages rounded down + // first, then add back one if necessary. + maxPages := max / pageSize + if max%pageSize != 0 { + maxPages++ + } + + // Calculate the minimum number of pages we can scavenge. + // + // Because we can only scavenge whole physical pages, we must + // ensure that we scavenge at least minPages each time, aligned + // to minPages*pageSize. + minPages := physPageSize / pageSize + if minPages < 1 { + minPages = 1 + } + + lock(p.mheapLock) + if p.summary[len(p.summary)-1][ci].max() >= uint(minPages) { + // We only bother looking for a candidate if there at least + // minPages free pages at all. + base, npages := p.chunkOf(ci).findScavengeCandidate(searchIdx, minPages, maxPages) + + // If we found something, scavenge it and return! + if npages != 0 { + // Compute the full address for the start of the range. + addr := chunkBase(ci) + uintptr(base)*pageSize + + // Mark the range we're about to scavenge as allocated, because + // we don't want any allocating goroutines to grab it while + // the scavenging is in progress. Be careful here -- just do the + // bare minimum to avoid stepping on our own scavenging stats. + p.chunkOf(ci).allocRange(base, npages) + p.update(addr, uintptr(npages), true, true) + + // With that done, it's safe to unlock. + unlock(p.mheapLock) + + if !p.test { + pageTraceScav(getg().m.p.ptr(), 0, addr, uintptr(npages)) + + // Only perform sys* operations if we're not in a test. + // It's dangerous to do so otherwise. + sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize) + + // Update global accounting only when not in test, otherwise + // the runtime's accounting will be wrong. + nbytes := int64(npages * pageSize) + gcController.heapReleased.add(nbytes) + gcController.heapFree.add(-nbytes) + + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.committed, -nbytes) + atomic.Xaddint64(&stats.released, nbytes) + memstats.heapStats.release() + } + + // Relock the heap, because now we need to make these pages + // available allocation. Free them back to the page allocator. + lock(p.mheapLock) + if b := (offAddr{addr}); b.lessThan(p.searchAddr) { + p.searchAddr = b + } + p.chunkOf(ci).free(base, npages) + p.update(addr, uintptr(npages), true, false) + + // Mark the range as scavenged. + p.chunkOf(ci).scavenged.setRange(base, npages) + unlock(p.mheapLock) + + return uintptr(npages) * pageSize + } + } + // Mark this chunk as having no free pages. + p.scav.index.setEmpty(ci) + unlock(p.mheapLock) + + return 0 +} + +// fillAligned returns x but with all zeroes in m-aligned +// groups of m bits set to 1 if any bit in the group is non-zero. +// +// For example, fillAligned(0x0100a3, 8) == 0xff00ff. +// +// Note that if m == 1, this is a no-op. +// +// m must be a power of 2 <= maxPagesPerPhysPage. +func fillAligned(x uint64, m uint) uint64 { + apply := func(x uint64, c uint64) uint64 { + // The technique used it here is derived from + // https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord + // and extended for more than just bytes (like nibbles + // and uint16s) by using an appropriate constant. + // + // To summarize the technique, quoting from that page: + // "[It] works by first zeroing the high bits of the [8] + // bytes in the word. Subsequently, it adds a number that + // will result in an overflow to the high bit of a byte if + // any of the low bits were initially set. Next the high + // bits of the original word are ORed with these values; + // thus, the high bit of a byte is set iff any bit in the + // byte was set. Finally, we determine if any of these high + // bits are zero by ORing with ones everywhere except the + // high bits and inverting the result." + return ^((((x & c) + c) | x) | c) + } + // Transform x to contain a 1 bit at the top of each m-aligned + // group of m zero bits. + switch m { + case 1: + return x + case 2: + x = apply(x, 0x5555555555555555) + case 4: + x = apply(x, 0x7777777777777777) + case 8: + x = apply(x, 0x7f7f7f7f7f7f7f7f) + case 16: + x = apply(x, 0x7fff7fff7fff7fff) + case 32: + x = apply(x, 0x7fffffff7fffffff) + case 64: // == maxPagesPerPhysPage + x = apply(x, 0x7fffffffffffffff) + default: + throw("bad m value") + } + // Now, the top bit of each m-aligned group in x is set + // that group was all zero in the original x. + + // From each group of m bits subtract 1. + // Because we know only the top bits of each + // m-aligned group are set, we know this will + // set each group to have all the bits set except + // the top bit, so just OR with the original + // result to set all the bits. + return ^((x - (x >> (m - 1))) | x) +} + +// findScavengeCandidate returns a start index and a size for this pallocData +// segment which represents a contiguous region of free and unscavenged memory. +// +// searchIdx indicates the page index within this chunk to start the search, but +// note that findScavengeCandidate searches backwards through the pallocData. As +// a result, it will return the highest scavenge candidate in address order. +// +// min indicates a hard minimum size and alignment for runs of pages. That is, +// findScavengeCandidate will not return a region smaller than min pages in size, +// or that is min pages or greater in size but not aligned to min. min must be +// a non-zero power of 2 <= maxPagesPerPhysPage. +// +// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then +// findScavengeCandidate effectively returns entire free and unscavenged regions. +// If max < pallocChunkPages, it may truncate the returned region such that size is +// max. However, findScavengeCandidate may still return a larger region if, for +// example, it chooses to preserve huge pages, or if max is not aligned to min (it +// will round up). That is, even if max is small, the returned size is not guaranteed +// to be equal to max. max is allowed to be less than min, in which case it is as if +// max == min. +func (m *pallocData) findScavengeCandidate(searchIdx uint, minimum, max uintptr) (uint, uint) { + if minimum&(minimum-1) != 0 || minimum == 0 { + print("runtime: min = ", minimum, "\n") + throw("min must be a non-zero power of 2") + } else if minimum > maxPagesPerPhysPage { + print("runtime: min = ", minimum, "\n") + throw("min too large") + } + // max may not be min-aligned, so we might accidentally truncate to + // a max value which causes us to return a non-min-aligned value. + // To prevent this, align max up to a multiple of min (which is always + // a power of 2). This also prevents max from ever being less than + // min, unless it's zero, so handle that explicitly. + if max == 0 { + max = minimum + } else { + max = alignUp(max, minimum) + } + + i := int(searchIdx / 64) + // Start by quickly skipping over blocks of non-free or scavenged pages. + for ; i >= 0; i-- { + // 1s are scavenged OR non-free => 0s are unscavenged AND free + x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum)) + if x != ^uint64(0) { + break + } + } + if i < 0 { + // Failed to find any free/unscavenged pages. + return 0, 0 + } + // We have something in the 64-bit chunk at i, but it could + // extend further. Loop until we find the extent of it. + + // 1s are scavenged OR non-free => 0s are unscavenged AND free + x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum)) + z1 := uint(sys.LeadingZeros64(^x)) + run, end := uint(0), uint(i)*64+(64-z1) + if x<= 0; j-- { + x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(minimum)) + run += uint(sys.LeadingZeros64(x)) + if x != 0 { + // The run stopped in this word. + break + } + } + } + + // Split the run we found if it's larger than max but hold on to + // our original length, since we may need it later. + size := min(run, uint(max)) + start := end - size + + // Each huge page is guaranteed to fit in a single palloc chunk. + // + // TODO(mknyszek): Support larger huge page sizes. + // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter + // so we can write tests for this. + if physHugePageSize > pageSize && physHugePageSize > physPageSize { + // We have huge pages, so let's ensure we don't break one by scavenging + // over a huge page boundary. If the range [start, start+size) overlaps with + // a free-and-unscavenged huge page, we want to grow the region we scavenge + // to include that huge page. + + // Compute the huge page boundary above our candidate. + pagesPerHugePage := physHugePageSize / pageSize + hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage)) + + // If that boundary is within our current candidate, then we may be breaking + // a huge page. + if hugePageAbove <= end { + // Compute the huge page boundary below our candidate. + hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage)) + + if hugePageBelow >= end-run { + // We're in danger of breaking apart a huge page since start+size crosses + // a huge page boundary and rounding down start to the nearest huge + // page boundary is included in the full run we found. Include the entire + // huge page in the bound by rounding down to the huge page size. + size = size + (start - hugePageBelow) + start = hugePageBelow + } + } + } + return start, size +} + +// scavengeIndex is a structure for efficiently managing which pageAlloc chunks have +// memory available to scavenge. +type scavengeIndex struct { + // chunks is a scavChunkData-per-chunk structure that indicates the presence of pages + // available for scavenging. Updates to the index are serialized by the pageAlloc lock. + // + // It tracks chunk occupancy and a generation counter per chunk. If a chunk's occupancy + // never exceeds pallocChunkDensePages over the course of a single GC cycle, the chunk + // becomes eligible for scavenging on the next cycle. If a chunk ever hits this density + // threshold it immediately becomes unavailable for scavenging in the current cycle as + // well as the next. + // + // [min, max) represents the range of chunks that is safe to access (i.e. will not cause + // a fault). As an optimization minHeapIdx represents the true minimum chunk that has been + // mapped, since min is likely rounded down to include the system page containing minHeapIdx. + // + // For a chunk size of 4 MiB this structure will only use 2 MiB for a 1 TiB contiguous heap. + chunks []atomicScavChunkData + min, max atomic.Uintptr + minHeapIdx atomic.Uintptr + + // searchAddr* is the maximum address (in the offset address space, so we have a linear + // view of the address space; see mranges.go:offAddr) containing memory available to + // scavenge. It is a hint to the find operation to avoid O(n^2) behavior in repeated lookups. + // + // searchAddr* is always inclusive and should be the base address of the highest runtime + // page available for scavenging. + // + // searchAddrForce is managed by find and free. + // searchAddrBg is managed by find and nextGen. + // + // Normally, find monotonically decreases searchAddr* as it finds no more free pages to + // scavenge. However, mark, when marking a new chunk at an index greater than the current + // searchAddr, sets searchAddr to the *negative* index into chunks of that page. The trick here + // is that concurrent calls to find will fail to monotonically decrease searchAddr*, and so they + // won't barge over new memory becoming available to scavenge. Furthermore, this ensures + // that some future caller of find *must* observe the new high index. That caller + // (or any other racing with it), then makes searchAddr positive before continuing, bringing + // us back to our monotonically decreasing steady-state. + // + // A pageAlloc lock serializes updates between min, max, and searchAddr, so abs(searchAddr) + // is always guaranteed to be >= min and < max (converted to heap addresses). + // + // searchAddrBg is increased only on each new generation and is mainly used by the + // background scavenger and heap-growth scavenging. searchAddrForce is increased continuously + // as memory gets freed and is mainly used by eager memory reclaim such as debug.FreeOSMemory + // and scavenging to maintain the memory limit. + searchAddrBg atomicOffAddr + searchAddrForce atomicOffAddr + + // freeHWM is the highest address (in offset address space) that was freed + // this generation. + freeHWM offAddr + + // Generation counter. Updated by nextGen at the end of each mark phase. + gen uint32 + + // test indicates whether or not we're in a test. + test bool +} + +// init initializes the scavengeIndex. +// +// Returns the amount added to sysStat. +func (s *scavengeIndex) init(test bool, sysStat *sysMemStat) uintptr { + s.searchAddrBg.Clear() + s.searchAddrForce.Clear() + s.freeHWM = minOffAddr + s.test = test + return s.sysInit(test, sysStat) +} + +// sysGrow updates the index's backing store in response to a heap growth. +// +// Returns the amount of memory added to sysStat. +func (s *scavengeIndex) grow(base, limit uintptr, sysStat *sysMemStat) uintptr { + // Update minHeapIdx. Note that even if there's no mapping work to do, + // we may still have a new, lower minimum heap address. + minHeapIdx := s.minHeapIdx.Load() + if baseIdx := uintptr(chunkIndex(base)); minHeapIdx == 0 || baseIdx < minHeapIdx { + s.minHeapIdx.Store(baseIdx) + } + return s.sysGrow(base, limit, sysStat) +} + +// find returns the highest chunk index that may contain pages available to scavenge. +// It also returns an offset to start searching in the highest chunk. +func (s *scavengeIndex) find(force bool) (chunkIdx, uint) { + cursor := &s.searchAddrBg + if force { + cursor = &s.searchAddrForce + } + searchAddr, marked := cursor.Load() + if searchAddr == minOffAddr.addr() { + // We got a cleared search addr. + return 0, 0 + } + + // Starting from searchAddr's chunk, iterate until we find a chunk with pages to scavenge. + gen := s.gen + min := chunkIdx(s.minHeapIdx.Load()) + start := chunkIndex(searchAddr) + // N.B. We'll never map the 0'th chunk, so minHeapIdx ensures this loop overflow. + for i := start; i >= min; i-- { + // Skip over chunks. + if !s.chunks[i].load().shouldScavenge(gen, force) { + continue + } + // We're still scavenging this chunk. + if i == start { + return i, chunkPageIndex(searchAddr) + } + // Try to reduce searchAddr to newSearchAddr. + newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize + if marked { + // Attempt to be the first one to decrease the searchAddr + // after an increase. If we fail, that means there was another + // increase, or somebody else got to it before us. Either way, + // it doesn't matter. We may lose some performance having an + // incorrect search address, but it's far more important that + // we don't miss updates. + cursor.StoreUnmark(searchAddr, newSearchAddr) + } else { + // Decrease searchAddr. + cursor.StoreMin(newSearchAddr) + } + return i, pallocChunkPages - 1 + } + // Clear searchAddr, because we've exhausted the heap. + cursor.Clear() + return 0, 0 +} + +// alloc updates metadata for chunk at index ci with the fact that +// an allocation of npages occurred. It also eagerly attempts to collapse +// the chunk's memory into hugepage if the chunk has become sufficiently +// dense and we're not allocating the whole chunk at once (which suggests +// the allocation is part of a bigger one and it's probably not worth +// eagerly collapsing). +// +// alloc may only run concurrently with find. +func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) { + sc := s.chunks[ci].load() + sc.alloc(npages, s.gen) + // TODO(mknyszek): Consider eagerly backing memory with huge pages + // here and track whether we believe this chunk is backed by huge pages. + // In the past we've attempted to use sysHugePageCollapse (which uses + // MADV_COLLAPSE on Linux, and is unsupported elswhere) for this purpose, + // but that caused performance issues in production environments. + s.chunks[ci].store(sc) +} + +// free updates metadata for chunk at index ci with the fact that +// a free of npages occurred. +// +// free may only run concurrently with find. +func (s *scavengeIndex) free(ci chunkIdx, page, npages uint) { + sc := s.chunks[ci].load() + sc.free(npages, s.gen) + s.chunks[ci].store(sc) + + // Update scavenge search addresses. + addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize + if s.freeHWM.lessThan(offAddr{addr}) { + s.freeHWM = offAddr{addr} + } + // N.B. Because free is serialized, it's not necessary to do a + // full CAS here. free only ever increases searchAddr, while + // find only ever decreases it. Since we only ever race with + // decreases, even if the value we loaded is stale, the actual + // value will never be larger. + searchAddr, _ := s.searchAddrForce.Load() + if (offAddr{searchAddr}).lessThan(offAddr{addr}) { + s.searchAddrForce.StoreMarked(addr) + } +} + +// nextGen moves the scavenger forward one generation. Must be called +// once per GC cycle, but may be called more often to force more memory +// to be released. +// +// nextGen may only run concurrently with find. +func (s *scavengeIndex) nextGen() { + s.gen++ + searchAddr, _ := s.searchAddrBg.Load() + if (offAddr{searchAddr}).lessThan(s.freeHWM) { + s.searchAddrBg.StoreMarked(s.freeHWM.addr()) + } + s.freeHWM = minOffAddr +} + +// setEmpty marks that the scavenger has finished looking at ci +// for now to prevent the scavenger from getting stuck looking +// at the same chunk. +// +// setEmpty may only run concurrently with find. +func (s *scavengeIndex) setEmpty(ci chunkIdx) { + val := s.chunks[ci].load() + val.setEmpty() + s.chunks[ci].store(val) +} + +// atomicScavChunkData is an atomic wrapper around a scavChunkData +// that stores it in its packed form. +type atomicScavChunkData struct { + value atomic.Uint64 +} + +// load loads and unpacks a scavChunkData. +func (sc *atomicScavChunkData) load() scavChunkData { + return unpackScavChunkData(sc.value.Load()) +} + +// store packs and writes a new scavChunkData. store must be serialized +// with other calls to store. +func (sc *atomicScavChunkData) store(ssc scavChunkData) { + sc.value.Store(ssc.pack()) +} + +// scavChunkData tracks information about a palloc chunk for +// scavenging. It packs well into 64 bits. +// +// The zero value always represents a valid newly-grown chunk. +type scavChunkData struct { + // inUse indicates how many pages in this chunk are currently + // allocated. + // + // Only the first 10 bits are used. + inUse uint16 + + // lastInUse indicates how many pages in this chunk were allocated + // when we transitioned from gen-1 to gen. + // + // Only the first 10 bits are used. + lastInUse uint16 + + // gen is the generation counter from a scavengeIndex from the + // last time this scavChunkData was updated. + gen uint32 + + // scavChunkFlags represents additional flags + // + // Note: only 6 bits are available. + scavChunkFlags +} + +// unpackScavChunkData unpacks a scavChunkData from a uint64. +func unpackScavChunkData(sc uint64) scavChunkData { + return scavChunkData{ + inUse: uint16(sc), + lastInUse: uint16(sc>>16) & scavChunkInUseMask, + gen: uint32(sc >> 32), + scavChunkFlags: scavChunkFlags(uint8(sc>>(16+logScavChunkInUseMax)) & scavChunkFlagsMask), + } +} + +// pack returns sc packed into a uint64. +func (sc scavChunkData) pack() uint64 { + return uint64(sc.inUse) | + (uint64(sc.lastInUse) << 16) | + (uint64(sc.scavChunkFlags) << (16 + logScavChunkInUseMax)) | + (uint64(sc.gen) << 32) +} + +const ( + // scavChunkHasFree indicates whether the chunk has anything left to + // scavenge. This is the opposite of "empty," used elsewhere in this + // file. The reason we say "HasFree" here is so the zero value is + // correct for a newly-grown chunk. (New memory is scavenged.) + scavChunkHasFree scavChunkFlags = 1 << iota + + // scavChunkMaxFlags is the maximum number of flags we can have, given how + // a scavChunkData is packed into 8 bytes. + scavChunkMaxFlags = 6 + scavChunkFlagsMask = (1 << scavChunkMaxFlags) - 1 + + // logScavChunkInUseMax is the number of bits needed to represent the number + // of pages allocated in a single chunk. This is 1 more than log2 of the + // number of pages in the chunk because we need to represent a fully-allocated + // chunk. + logScavChunkInUseMax = logPallocChunkPages + 1 + scavChunkInUseMask = (1 << logScavChunkInUseMax) - 1 +) + +// scavChunkFlags is a set of bit-flags for the scavenger for each palloc chunk. +type scavChunkFlags uint8 + +// isEmpty returns true if the hasFree flag is unset. +func (sc *scavChunkFlags) isEmpty() bool { + return (*sc)&scavChunkHasFree == 0 +} + +// setEmpty clears the hasFree flag. +func (sc *scavChunkFlags) setEmpty() { + *sc &^= scavChunkHasFree +} + +// setNonEmpty sets the hasFree flag. +func (sc *scavChunkFlags) setNonEmpty() { + *sc |= scavChunkHasFree +} + +// shouldScavenge returns true if the corresponding chunk should be interrogated +// by the scavenger. +func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool { + if sc.isEmpty() { + // Nothing to scavenge. + return false + } + if force { + // We're forcing the memory to be scavenged. + return true + } + if sc.gen == currGen { + // In the current generation, if either the current or last generation + // is dense, then skip scavenging. Inverting that, we should scavenge + // if both the current and last generation were not dense. + return sc.inUse < scavChunkHiOccPages && sc.lastInUse < scavChunkHiOccPages + } + // If we're one or more generations ahead, we know inUse represents the current + // state of the chunk, since otherwise it would've been updated already. + return sc.inUse < scavChunkHiOccPages +} + +// alloc updates sc given that npages were allocated in the corresponding chunk. +func (sc *scavChunkData) alloc(npages uint, newGen uint32) { + if uint(sc.inUse)+npages > pallocChunkPages { + print("runtime: inUse=", sc.inUse, " npages=", npages, "\n") + throw("too many pages allocated in chunk?") + } + if sc.gen != newGen { + sc.lastInUse = sc.inUse + sc.gen = newGen + } + sc.inUse += uint16(npages) + if sc.inUse == pallocChunkPages { + // There's nothing for the scavenger to take from here. + sc.setEmpty() + } +} + +// free updates sc given that npages was freed in the corresponding chunk. +func (sc *scavChunkData) free(npages uint, newGen uint32) { + if uint(sc.inUse) < npages { + print("runtime: inUse=", sc.inUse, " npages=", npages, "\n") + throw("allocated pages below zero?") + } + if sc.gen != newGen { + sc.lastInUse = sc.inUse + sc.gen = newGen + } + sc.inUse -= uint16(npages) + // The scavenger can no longer be done with this chunk now that + // new memory has been freed into it. + sc.setNonEmpty() +} + +type piController struct { + kp float64 // Proportional constant. + ti float64 // Integral time constant. + tt float64 // Reset time. + + min, max float64 // Output boundaries. + + // PI controller state. + + errIntegral float64 // Integral of the error from t=0 to now. + + // Error flags. + errOverflow bool // Set if errIntegral ever overflowed. + inputOverflow bool // Set if an operation with the input overflowed. +} + +// next provides a new sample to the controller. +// +// input is the sample, setpoint is the desired point, and period is how much +// time (in whatever unit makes the most sense) has passed since the last sample. +// +// Returns a new value for the variable it's controlling, and whether the operation +// completed successfully. One reason this might fail is if error has been growing +// in an unbounded manner, to the point of overflow. +// +// In the specific case of an error overflow occurs, the errOverflow field will be +// set and the rest of the controller's internal state will be fully reset. +func (c *piController) next(input, setpoint, period float64) (float64, bool) { + // Compute the raw output value. + prop := c.kp * (setpoint - input) + rawOutput := prop + c.errIntegral + + // Clamp rawOutput into output. + output := rawOutput + if isInf(output) || isNaN(output) { + // The input had a large enough magnitude that either it was already + // overflowed, or some operation with it overflowed. + // Set a flag and reset. That's the safest thing to do. + c.reset() + c.inputOverflow = true + return c.min, false + } + if output < c.min { + output = c.min + } else if output > c.max { + output = c.max + } + + // Update the controller's state. + if c.ti != 0 && c.tt != 0 { + c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput) + if isInf(c.errIntegral) || isNaN(c.errIntegral) { + // So much error has accumulated that we managed to overflow. + // The assumptions around the controller have likely broken down. + // Set a flag and reset. That's the safest thing to do. + c.reset() + c.errOverflow = true + return c.min, false + } + } + return output, true +} + +// reset resets the controller state, except for controller error flags. +func (c *piController) reset() { + c.errIntegral = 0 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcscavenge_test.go b/platform/dbops/binaries/go/go/src/runtime/mgcscavenge_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d7624d6d725581e8379622ea95e748f3e2d079cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcscavenge_test.go @@ -0,0 +1,884 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/goos" + "math" + "math/rand" + . "runtime" + "runtime/internal/atomic" + "testing" + "time" +) + +// makePallocData produces an initialized PallocData by setting +// the ranges of described in alloc and scavenge. +func makePallocData(alloc, scavenged []BitRange) *PallocData { + b := new(PallocData) + for _, v := range alloc { + if v.N == 0 { + // Skip N==0. It's harmless and allocRange doesn't + // handle this case. + continue + } + b.AllocRange(v.I, v.N) + } + for _, v := range scavenged { + if v.N == 0 { + // See the previous loop. + continue + } + b.ScavengedSetRange(v.I, v.N) + } + return b +} + +func TestFillAligned(t *testing.T) { + fillAlignedSlow := func(x uint64, m uint) uint64 { + if m == 1 { + return x + } + out := uint64(0) + for i := uint(0); i < 64; i += m { + for j := uint(0); j < m; j++ { + if x&(uint64(1)<<(i+j)) != 0 { + out |= ((uint64(1) << m) - 1) << i + break + } + } + } + return out + } + check := func(x uint64, m uint) { + want := fillAlignedSlow(x, m) + if got := FillAligned(x, m); got != want { + t.Logf("got: %064b", got) + t.Logf("want: %064b", want) + t.Errorf("bad fillAligned(%016x, %d)", x, m) + } + } + for m := uint(1); m <= 64; m *= 2 { + tests := []uint64{ + 0x0000000000000000, + 0x00000000ffffffff, + 0xffffffff00000000, + 0x8000000000000001, + 0xf00000000000000f, + 0xf00000010050000f, + 0xffffffffffffffff, + 0x0000000000000001, + 0x0000000000000002, + 0x0000000000000008, + uint64(1) << (m - 1), + uint64(1) << m, + // Try a few fixed arbitrary examples. + 0xb02b9effcf137016, + 0x3975a076a9fbff18, + 0x0f8c88ec3b81506e, + 0x60f14d80ef2fa0e6, + } + for _, test := range tests { + check(test, m) + } + for i := 0; i < 1000; i++ { + // Try a pseudo-random numbers. + check(rand.Uint64(), m) + + if m > 1 { + // For m != 1, let's construct a slightly more interesting + // random test. Generate a bitmap which is either 0 or + // randomly set bits for each m-aligned group of m bits. + val := uint64(0) + for n := uint(0); n < 64; n += m { + // For each group of m bits, flip a coin: + // * Leave them as zero. + // * Set them randomly. + if rand.Uint64()%2 == 0 { + val |= (rand.Uint64() & ((1 << m) - 1)) << n + } + } + check(val, m) + } + } + } +} + +func TestPallocDataFindScavengeCandidate(t *testing.T) { + type test struct { + alloc, scavenged []BitRange + min, max uintptr + want BitRange + } + tests := map[string]test{ + "MixedMin1": { + alloc: []BitRange{{0, 40}, {42, PallocChunkPages - 42}}, + scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}}, + min: 1, + max: PallocChunkPages, + want: BitRange{41, 1}, + }, + "MultiMin1": { + alloc: []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}}, + scavenged: []BitRange{{86, 1}}, + min: 1, + max: PallocChunkPages, + want: BitRange{85, 1}, + }, + } + // Try out different page minimums. + for m := uintptr(1); m <= 64; m *= 2 { + suffix := fmt.Sprintf("Min%d", m) + tests["AllFree"+suffix] = test{ + min: m, + max: PallocChunkPages, + want: BitRange{0, PallocChunkPages}, + } + tests["AllScavenged"+suffix] = test{ + scavenged: []BitRange{{0, PallocChunkPages}}, + min: m, + max: PallocChunkPages, + want: BitRange{0, 0}, + } + tests["NoneFree"+suffix] = test{ + alloc: []BitRange{{0, PallocChunkPages}}, + scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}}, + min: m, + max: PallocChunkPages, + want: BitRange{0, 0}, + } + tests["StartFree"+suffix] = test{ + alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}}, + min: m, + max: PallocChunkPages, + want: BitRange{0, uint(m)}, + } + tests["EndFree"+suffix] = test{ + alloc: []BitRange{{0, PallocChunkPages - uint(m)}}, + min: m, + max: PallocChunkPages, + want: BitRange{PallocChunkPages - uint(m), uint(m)}, + } + tests["Straddle64"+suffix] = test{ + alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}}, + min: m, + max: 2 * m, + want: BitRange{64 - uint(m), 2 * uint(m)}, + } + tests["BottomEdge64WithFull"+suffix] = test{ + alloc: []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}}, + scavenged: []BitRange{{1, 10}}, + min: m, + max: 3 * m, + want: BitRange{128, 3 * uint(m)}, + } + tests["BottomEdge64WithPocket"+suffix] = test{ + alloc: []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}}, + scavenged: []BitRange{{1, 10}}, + min: m, + max: 3 * m, + want: BitRange{128, 3 * uint(m)}, + } + tests["Max0"+suffix] = test{ + scavenged: []BitRange{{0, PallocChunkPages - uint(m)}}, + min: m, + max: 0, + want: BitRange{PallocChunkPages - uint(m), uint(m)}, + } + if m <= 8 { + tests["OneFree"] = test{ + alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}}, + min: m, + max: PallocChunkPages, + want: BitRange{40, uint(m)}, + } + tests["OneScavenged"] = test{ + alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}}, + scavenged: []BitRange{{40, 1}}, + min: m, + max: PallocChunkPages, + want: BitRange{0, 0}, + } + } + if m > 1 { + tests["MaxUnaligned"+suffix] = test{ + scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}}, + min: m, + max: m - 2, + want: BitRange{PallocChunkPages - uint(m), uint(m)}, + } + tests["SkipSmall"+suffix] = test{ + alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}}, + min: m, + max: m, + want: BitRange{64 - uint(m), uint(m)}, + } + tests["SkipMisaligned"+suffix] = test{ + alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}}, + min: m, + max: m, + want: BitRange{64 - uint(m), uint(m)}, + } + tests["MaxLessThan"+suffix] = test{ + scavenged: []BitRange{{0, PallocChunkPages - uint(m)}}, + min: m, + max: 1, + want: BitRange{PallocChunkPages - uint(m), uint(m)}, + } + } + } + if PhysHugePageSize > uintptr(PageSize) { + // Check hugepage preserving behavior. + bits := uint(PhysHugePageSize / uintptr(PageSize)) + if bits < PallocChunkPages { + tests["PreserveHugePageBottom"] = test{ + alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}}, + min: 1, + max: 3, // Make it so that max would have us try to break the huge page. + want: BitRange{0, bits + 2}, + } + if 3*bits < PallocChunkPages { + // We need at least 3 huge pages in a chunk for this test to make sense. + tests["PreserveHugePageMiddle"] = test{ + alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}}, + min: 1, + max: 12, // Make it so that max would have us try to break the huge page. + want: BitRange{bits, bits + 10}, + } + } + tests["PreserveHugePageTop"] = test{ + alloc: []BitRange{{0, PallocChunkPages - bits}}, + min: 1, + max: 1, // Even one page would break a huge page in this case. + want: BitRange{PallocChunkPages - bits, bits}, + } + } else if bits == PallocChunkPages { + tests["PreserveHugePageAll"] = test{ + min: 1, + max: 1, // Even one page would break a huge page in this case. + want: BitRange{0, PallocChunkPages}, + } + } else { + // The huge page size is greater than pallocChunkPages, so it should + // be effectively disabled. There's no way we can possible scavenge + // a huge page out of this bitmap chunk. + tests["PreserveHugePageNone"] = test{ + min: 1, + max: 1, + want: BitRange{PallocChunkPages - 1, 1}, + } + } + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := makePallocData(v.alloc, v.scavenged) + start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max) + got := BitRange{start, size} + if !(got.N == 0 && v.want.N == 0) && got != v.want { + t.Fatalf("candidate mismatch: got %v, want %v", got, v.want) + } + }) + } +} + +// Tests end-to-end scavenging on a pageAlloc. +func TestPageAllocScavenge(t *testing.T) { + if GOOS == "openbsd" && testing.Short() { + t.Skip("skipping because virtual memory is limited; see #36210") + } + type test struct { + request, expect uintptr + } + minPages := PhysPageSize / PageSize + if minPages < 1 { + minPages = 1 + } + type setup struct { + beforeAlloc map[ChunkIdx][]BitRange + beforeScav map[ChunkIdx][]BitRange + expect []test + afterScav map[ChunkIdx][]BitRange + } + tests := map[string]setup{ + "AllFreeUnscavExhaust": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {}, + }, + expect: []test{ + {^uintptr(0), 3 * PallocChunkPages * PageSize}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {{0, PallocChunkPages}}, + }, + }, + "NoneFreeUnscavExhaust": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {{0, PallocChunkPages}}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {}, + }, + expect: []test{ + {^uintptr(0), 0}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {}, + }, + }, + "ScavHighestPageFirst": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, + }, + expect: []test{ + {1, minPages * PageSize}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}}, + }, + }, + "ScavMultiple": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, + }, + expect: []test{ + {minPages * PageSize, minPages * PageSize}, + {minPages * PageSize, minPages * PageSize}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + }, + "ScavMultiple2": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, + BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}}, + }, + expect: []test{ + {2 * minPages * PageSize, 2 * minPages * PageSize}, + {minPages * PageSize, minPages * PageSize}, + {minPages * PageSize, minPages * PageSize}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + }, + }, + "ScavDiscontiguous": { + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 0xe: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}}, + BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}}, + }, + expect: []test{ + {2 * minPages * PageSize, 2 * minPages * PageSize}, + {^uintptr(0), 2 * minPages * PageSize}, + {^uintptr(0), 0}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 0xe: {{0, PallocChunkPages}}, + }, + }, + } + // Disable these tests on iOS since we have a small address space. + // See #46860. + if PageAlloc64Bit != 0 && goos.IsIos == 0 { + tests["ScavAllVeryDiscontiguous"] = setup{ + beforeAlloc: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 0x1000: {}, + }, + beforeScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 0x1000: {}, + }, + expect: []test{ + {^uintptr(0), 2 * PallocChunkPages * PageSize}, + {^uintptr(0), 0}, + }, + afterScav: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 0x1000: {{0, PallocChunkPages}}, + }, + } + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := NewPageAlloc(v.beforeAlloc, v.beforeScav) + defer FreePageAlloc(b) + + for iter, h := range v.expect { + if got := b.Scavenge(h.request); got != h.expect { + t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got) + } + } + want := NewPageAlloc(v.beforeAlloc, v.afterScav) + defer FreePageAlloc(want) + + checkPageAlloc(t, want, b) + }) + } +} + +func TestScavenger(t *testing.T) { + // workedTime is a standard conversion of bytes of scavenge + // work to time elapsed. + workedTime := func(bytes uintptr) int64 { + return int64((bytes+4095)/4096) * int64(10*time.Microsecond) + } + + // Set up a bunch of state that we're going to track and verify + // throughout the test. + totalWork := uint64(64<<20 - 3*PhysPageSize) + var totalSlept, totalWorked atomic.Int64 + var availableWork atomic.Uint64 + var stopAt atomic.Uint64 // How much available work to stop at. + + // Set up the scavenger. + var s Scavenger + s.Sleep = func(ns int64) int64 { + totalSlept.Add(ns) + return ns + } + s.Scavenge = func(bytes uintptr) (uintptr, int64) { + avail := availableWork.Load() + if uint64(bytes) > avail { + bytes = uintptr(avail) + } + t := workedTime(bytes) + if bytes != 0 { + availableWork.Add(-int64(bytes)) + totalWorked.Add(t) + } + return bytes, t + } + s.ShouldStop = func() bool { + if availableWork.Load() <= stopAt.Load() { + return true + } + return false + } + s.GoMaxProcs = func() int32 { + return 1 + } + + // Define a helper for verifying that various properties hold. + verifyScavengerState := func(t *testing.T, expWork uint64) { + t.Helper() + + // Check to make sure it did the amount of work we expected. + if workDone := uint64(s.Released()); workDone != expWork { + t.Errorf("want %d bytes of work done, got %d", expWork, workDone) + } + // Check to make sure the scavenger is meeting its CPU target. + idealFraction := float64(ScavengePercent) / 100.0 + cpuFraction := float64(totalWorked.Load()) / float64(totalWorked.Load()+totalSlept.Load()) + if cpuFraction < idealFraction-0.005 || cpuFraction > idealFraction+0.005 { + t.Errorf("want %f CPU fraction, got %f", idealFraction, cpuFraction) + } + } + + // Start the scavenger. + s.Start() + + // Set up some work and let the scavenger run to completion. + availableWork.Store(totalWork) + s.Wake() + if !s.BlockUntilParked(2e9 /* 2 seconds */) { + t.Fatal("timed out waiting for scavenger to run to completion") + } + // Run a check. + verifyScavengerState(t, totalWork) + + // Now let's do it again and see what happens when we have no work to do. + // It should've gone right back to sleep. + s.Wake() + if !s.BlockUntilParked(2e9 /* 2 seconds */) { + t.Fatal("timed out waiting for scavenger to run to completion") + } + // Run another check. + verifyScavengerState(t, totalWork) + + // One more time, this time doing the same amount of work as the first time. + // Let's see if we can get the scavenger to continue. + availableWork.Store(totalWork) + s.Wake() + if !s.BlockUntilParked(2e9 /* 2 seconds */) { + t.Fatal("timed out waiting for scavenger to run to completion") + } + // Run another check. + verifyScavengerState(t, 2*totalWork) + + // This time, let's stop after a certain amount of work. + // + // Pick a stopping point such that when subtracted from totalWork + // we get a multiple of a relatively large power of 2. verifyScavengerState + // always makes an exact check, but the scavenger might go a little over, + // which is OK. If this breaks often or gets annoying to maintain, modify + // verifyScavengerState. + availableWork.Store(totalWork) + stoppingPoint := uint64(1<<20 - 3*PhysPageSize) + stopAt.Store(stoppingPoint) + s.Wake() + if !s.BlockUntilParked(2e9 /* 2 seconds */) { + t.Fatal("timed out waiting for scavenger to run to completion") + } + // Run another check. + verifyScavengerState(t, 2*totalWork+(totalWork-stoppingPoint)) + + // Clean up. + s.Stop() +} + +func TestScavengeIndex(t *testing.T) { + // This test suite tests the scavengeIndex data structure. + + // markFunc is a function that makes the address range [base, limit) + // available for scavenging in a test index. + type markFunc func(base, limit uintptr) + + // findFunc is a function that searches for the next available page + // to scavenge in the index. It asserts that the page is found in + // chunk "ci" at page "offset." + type findFunc func(ci ChunkIdx, offset uint) + + // The structure of the tests below is as follows: + // + // setup creates a fake scavengeIndex that can be mutated and queried by + // the functions it returns. Those functions capture the testing.T that + // setup is called with, so they're bound to the subtest they're created in. + // + // Tests are then organized into test cases which mark some pages as + // scavenge-able then try to find them. Tests expect that the initial + // state of the scavengeIndex has all of the chunks as dense in the last + // generation and empty to the scavenger. + // + // There are a few additional tests that interleave mark and find operations, + // so they're defined separately, but use the same infrastructure. + setup := func(t *testing.T, force bool) (mark markFunc, find findFunc, nextGen func()) { + t.Helper() + + // Pick some reasonable bounds. We don't need a huge range just to test. + si := NewScavengeIndex(BaseChunkIdx, BaseChunkIdx+64) + + // Initialize all the chunks as dense and empty. + // + // Also, reset search addresses so that we can get page offsets. + si.AllocRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0)) + si.NextGen() + si.FreeRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0)) + for ci := BaseChunkIdx; ci < BaseChunkIdx+64; ci++ { + si.SetEmpty(ci) + } + si.ResetSearchAddrs() + + // Create and return test functions. + mark = func(base, limit uintptr) { + t.Helper() + + si.AllocRange(base, limit) + si.FreeRange(base, limit) + } + find = func(want ChunkIdx, wantOffset uint) { + t.Helper() + + got, gotOffset := si.Find(force) + if want != got { + t.Errorf("find: wanted chunk index %d, got %d", want, got) + } + if wantOffset != gotOffset { + t.Errorf("find: wanted page offset %d, got %d", wantOffset, gotOffset) + } + if t.Failed() { + t.FailNow() + } + si.SetEmpty(got) + } + nextGen = func() { + t.Helper() + + si.NextGen() + } + return + } + + // Each of these test cases calls mark and then find once. + type testCase struct { + name string + mark func(markFunc) + find func(findFunc) + } + for _, test := range []testCase{ + { + name: "Uninitialized", + mark: func(_ markFunc) {}, + find: func(_ findFunc) {}, + }, + { + name: "OnePage", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 3), PageBase(BaseChunkIdx, 4)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, 3) + }, + }, + { + name: "FirstPage", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx, 1)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, 0) + }, + }, + { + name: "SeveralPages", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 9), PageBase(BaseChunkIdx, 14)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, 13) + }, + }, + { + name: "WholeChunk", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, PallocChunkPages-1) + }, + }, + { + name: "LastPage", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, PallocChunkPages-1), PageBase(BaseChunkIdx+1, 0)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, PallocChunkPages-1) + }, + }, + { + name: "TwoChunks", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128)) + }, + find: func(find findFunc) { + find(BaseChunkIdx+1, 127) + find(BaseChunkIdx, PallocChunkPages-1) + }, + }, + { + name: "TwoChunksOffset", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129)) + }, + find: func(find findFunc) { + find(BaseChunkIdx+8, 128) + find(BaseChunkIdx+7, PallocChunkPages-1) + }, + }, + { + name: "SevenChunksOffset", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx+6, 11), PageBase(BaseChunkIdx+13, 15)) + }, + find: func(find findFunc) { + find(BaseChunkIdx+13, 14) + for i := BaseChunkIdx + 12; i >= BaseChunkIdx+6; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + { + name: "ThirtyTwoChunks", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0)) + }, + find: func(find findFunc) { + for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + { + name: "ThirtyTwoChunksOffset", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx+3, 0), PageBase(BaseChunkIdx+35, 0)) + }, + find: func(find findFunc) { + for i := BaseChunkIdx + 34; i >= BaseChunkIdx+3; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + { + name: "Mark", + mark: func(mark markFunc) { + for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ { + mark(PageBase(i, 0), PageBase(i+1, 0)) + } + }, + find: func(find findFunc) { + for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + { + name: "MarkIdempotentOneChunk", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)) + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)) + }, + find: func(find findFunc) { + find(BaseChunkIdx, PallocChunkPages-1) + }, + }, + { + name: "MarkIdempotentThirtyTwoChunks", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0)) + mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0)) + }, + find: func(find findFunc) { + for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + { + name: "MarkIdempotentThirtyTwoChunksOffset", + mark: func(mark markFunc) { + mark(PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+31, 0)) + mark(PageBase(BaseChunkIdx+5, 0), PageBase(BaseChunkIdx+36, 0)) + }, + find: func(find findFunc) { + for i := BaseChunkIdx + 35; i >= BaseChunkIdx+4; i-- { + find(i, PallocChunkPages-1) + } + }, + }, + } { + test := test + t.Run("Bg/"+test.name, func(t *testing.T) { + mark, find, nextGen := setup(t, false) + test.mark(mark) + find(0, 0) // Make sure we find nothing at this point. + nextGen() // Move to the next generation. + test.find(find) // Now we should be able to find things. + find(0, 0) // The test should always fully exhaust the index. + }) + t.Run("Force/"+test.name, func(t *testing.T) { + mark, find, _ := setup(t, true) + test.mark(mark) + test.find(find) // Finding should always work when forced. + find(0, 0) // The test should always fully exhaust the index. + }) + } + t.Run("Bg/MarkInterleaved", func(t *testing.T) { + mark, find, nextGen := setup(t, false) + for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ { + mark(PageBase(i, 0), PageBase(i+1, 0)) + nextGen() + find(i, PallocChunkPages-1) + } + find(0, 0) + }) + t.Run("Force/MarkInterleaved", func(t *testing.T) { + mark, find, _ := setup(t, true) + for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ { + mark(PageBase(i, 0), PageBase(i+1, 0)) + find(i, PallocChunkPages-1) + } + find(0, 0) + }) +} + +func TestScavChunkDataPack(t *testing.T) { + if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) { + t.Error("failed pack/unpack check for scavChunkData 1") + } + if !CheckPackScavChunkData(^uint32(0), 12, 0, 0b00) { + t.Error("failed pack/unpack check for scavChunkData 2") + } +} + +func FuzzPIController(f *testing.F) { + isNormal := func(x float64) bool { + return !math.IsInf(x, 0) && !math.IsNaN(x) + } + isPositive := func(x float64) bool { + return isNormal(x) && x > 0 + } + // Seed with constants from controllers in the runtime. + // It's not critical that we keep these in sync, they're just + // reasonable seed inputs. + f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01) + f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84) + f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) { + // Ignore uninteresting invalid parameters. These parameters + // are constant, so in practice surprising values will be documented + // or will be other otherwise immediately visible. + // + // We just want to make sure that given a non-Inf, non-NaN input, + // we always get a non-Inf, non-NaN output. + if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) { + return + } + if !isNormal(min) || !isNormal(max) || min > max { + return + } + // Use a random source, but make it deterministic. + rs := rand.New(rand.NewSource(800)) + randFloat64 := func() float64 { + return math.Float64frombits(rs.Uint64()) + } + p := NewPIController(kp, ti, tt, min, max) + state := float64(0) + for i := 0; i < 100; i++ { + input := randFloat64() + // Ignore the "ok" parameter. We're just trying to break it. + // state is intentionally completely uncorrelated with the input. + var ok bool + state, ok = p.Next(input, setPoint, 1.0) + if !isNormal(state) { + t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok) + } + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcstack.go b/platform/dbops/binaries/go/go/src/runtime/mgcstack.go new file mode 100644 index 0000000000000000000000000000000000000000..f4a83f5f59c27ab350895faec0980687444e7880 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcstack.go @@ -0,0 +1,348 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: stack objects and stack tracing +// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing +// Also see issue 22350. + +// Stack tracing solves the problem of determining which parts of the +// stack are live and should be scanned. It runs as part of scanning +// a single goroutine stack. +// +// Normally determining which parts of the stack are live is easy to +// do statically, as user code has explicit references (reads and +// writes) to stack variables. The compiler can do a simple dataflow +// analysis to determine liveness of stack variables at every point in +// the code. See cmd/compile/internal/gc/plive.go for that analysis. +// +// However, when we take the address of a stack variable, determining +// whether that variable is still live is less clear. We can still +// look for static accesses, but accesses through a pointer to the +// variable are difficult in general to track statically. That pointer +// can be passed among functions on the stack, conditionally retained, +// etc. +// +// Instead, we will track pointers to stack variables dynamically. +// All pointers to stack-allocated variables will themselves be on the +// stack somewhere (or in associated locations, like defer records), so +// we can find them all efficiently. +// +// Stack tracing is organized as a mini garbage collection tracing +// pass. The objects in this garbage collection are all the variables +// on the stack whose address is taken, and which themselves contain a +// pointer. We call these variables "stack objects". +// +// We begin by determining all the stack objects on the stack and all +// the statically live pointers that may point into the stack. We then +// process each pointer to see if it points to a stack object. If it +// does, we scan that stack object. It may contain pointers into the +// heap, in which case those pointers are passed to the main garbage +// collection. It may also contain pointers into the stack, in which +// case we add them to our set of stack pointers. +// +// Once we're done processing all the pointers (including the ones we +// added during processing), we've found all the stack objects that +// are live. Any dead stack objects are not scanned and their contents +// will not keep heap objects live. Unlike the main garbage +// collection, we can't sweep the dead stack objects; they live on in +// a moribund state until the stack frame that contains them is +// popped. +// +// A stack can look like this: +// +// +----------+ +// | foo() | +// | +------+ | +// | | A | | <---\ +// | +------+ | | +// | | | +// | +------+ | | +// | | B | | | +// | +------+ | | +// | | | +// +----------+ | +// | bar() | | +// | +------+ | | +// | | C | | <-\ | +// | +----|-+ | | | +// | | | | | +// | +----v-+ | | | +// | | D ---------/ +// | +------+ | | +// | | | +// +----------+ | +// | baz() | | +// | +------+ | | +// | | E -------/ +// | +------+ | +// | ^ | +// | F: --/ | +// | | +// +----------+ +// +// foo() calls bar() calls baz(). Each has a frame on the stack. +// foo() has stack objects A and B. +// bar() has stack objects C and D, with C pointing to D and D pointing to A. +// baz() has a stack object E pointing to C, and a local variable F pointing to E. +// +// Starting from the pointer in local variable F, we will eventually +// scan all of E, C, D, and A (in that order). B is never scanned +// because there is no live pointer to it. If B is also statically +// dead (meaning that foo() never accesses B again after it calls +// bar()), then B's pointers into the heap are not considered live. + +package runtime + +import ( + "internal/goarch" + "runtime/internal/sys" + "unsafe" +) + +const stackTraceDebug = false + +// Buffer for pointers found during stack tracing. +// Must be smaller than or equal to workbuf. +type stackWorkBuf struct { + _ sys.NotInHeap + stackWorkBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr +} + +// Header declaration must come after the buf declaration above, because of issue #14620. +type stackWorkBufHdr struct { + _ sys.NotInHeap + workbufhdr + next *stackWorkBuf // linked list of workbufs + // Note: we could theoretically repurpose lfnode.next as this next pointer. + // It would save 1 word, but that probably isn't worth busting open + // the lfnode API. +} + +// Buffer for stack objects found on a goroutine stack. +// Must be smaller than or equal to workbuf. +type stackObjectBuf struct { + _ sys.NotInHeap + stackObjectBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject +} + +type stackObjectBufHdr struct { + _ sys.NotInHeap + workbufhdr + next *stackObjectBuf +} + +func init() { + if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackWorkBuf too big") + } + if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackObjectBuf too big") + } +} + +// A stackObject represents a variable on the stack that has had +// its address taken. +type stackObject struct { + _ sys.NotInHeap + off uint32 // offset above stack.lo + size uint32 // size of object + r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned. + left *stackObject // objects with lower addresses + right *stackObject // objects with higher addresses +} + +// obj.r = r, but with no write barrier. +// +//go:nowritebarrier +func (obj *stackObject) setRecord(r *stackObjectRecord) { + // Types of stack objects are always in read-only memory, not the heap. + // So not using a write barrier is ok. + *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r)) +} + +// A stackScanState keeps track of the state used during the GC walk +// of a goroutine. +type stackScanState struct { + // stack limits + stack stack + + // conservative indicates that the next frame must be scanned conservatively. + // This applies only to the innermost frame at an async safe-point. + conservative bool + + // buf contains the set of possible pointers to stack objects. + // Organized as a LIFO linked list of buffers. + // All buffers except possibly the head buffer are full. + buf *stackWorkBuf + freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis + + // cbuf contains conservative pointers to stack objects. If + // all pointers to a stack object are obtained via + // conservative scanning, then the stack object may be dead + // and may contain dead pointers, so it must be scanned + // defensively. + cbuf *stackWorkBuf + + // list of stack objects + // Objects are in increasing address order. + head *stackObjectBuf + tail *stackObjectBuf + nobjs int + + // root of binary tree for fast object lookup by address + // Initialized by buildIndex. + root *stackObject +} + +// Add p as a potential pointer to a stack object. +// p must be a stack address. +func (s *stackScanState) putPtr(p uintptr, conservative bool) { + if p < s.stack.lo || p >= s.stack.hi { + throw("address not a stack address") + } + head := &s.buf + if conservative { + head = &s.cbuf + } + buf := *head + if buf == nil { + // Initial setup. + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + buf.nobj = 0 + buf.next = nil + *head = buf + } else if buf.nobj == len(buf.obj) { + if s.freeBuf != nil { + buf = s.freeBuf + s.freeBuf = nil + } else { + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + } + buf.nobj = 0 + buf.next = *head + *head = buf + } + buf.obj[buf.nobj] = p + buf.nobj++ +} + +// Remove and return a potential pointer to a stack object. +// Returns 0 if there are no more pointers available. +// +// This prefers non-conservative pointers so we scan stack objects +// precisely if there are any non-conservative pointers to them. +func (s *stackScanState) getPtr() (p uintptr, conservative bool) { + for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} { + buf := *head + if buf == nil { + // Never had any data. + continue + } + if buf.nobj == 0 { + if s.freeBuf != nil { + // Free old freeBuf. + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + } + // Move buf to the freeBuf. + s.freeBuf = buf + buf = buf.next + *head = buf + if buf == nil { + // No more data in this list. + continue + } + } + buf.nobj-- + return buf.obj[buf.nobj], head == &s.cbuf + } + // No more data in either list. + if s.freeBuf != nil { + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + s.freeBuf = nil + } + return 0, false +} + +// addObject adds a stack object at addr of type typ to the set of stack objects. +func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) { + x := s.tail + if x == nil { + // initial setup + x = (*stackObjectBuf)(unsafe.Pointer(getempty())) + x.next = nil + s.head = x + s.tail = x + } + if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { + throw("objects added out of order or overlapping") + } + if x.nobj == len(x.obj) { + // full buffer - allocate a new buffer, add to end of linked list + y := (*stackObjectBuf)(unsafe.Pointer(getempty())) + y.next = nil + x.next = y + s.tail = y + x = y + } + obj := &x.obj[x.nobj] + x.nobj++ + obj.off = uint32(addr - s.stack.lo) + obj.size = uint32(r.size) + obj.setRecord(r) + // obj.left and obj.right will be initialized by buildIndex before use. + s.nobjs++ +} + +// buildIndex initializes s.root to a binary search tree. +// It should be called after all addObject calls but before +// any call of findObject. +func (s *stackScanState) buildIndex() { + s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) +} + +// Build a binary search tree with the n objects in the list +// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... +// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. +// (The first object that was not included in the binary search tree.) +// If n == 0, returns nil, x. +func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { + if n == 0 { + return nil, x, idx + } + var left, right *stackObject + left, x, idx = binarySearchTree(x, idx, n/2) + root = &x.obj[idx] + idx++ + if idx == len(x.obj) { + x = x.next + idx = 0 + } + right, x, idx = binarySearchTree(x, idx, n-n/2-1) + root.left = left + root.right = right + return root, x, idx +} + +// findObject returns the stack object containing address a, if any. +// Must have called buildIndex previously. +func (s *stackScanState) findObject(a uintptr) *stackObject { + off := uint32(a - s.stack.lo) + obj := s.root + for { + if obj == nil { + return nil + } + if off < obj.off { + obj = obj.left + continue + } + if off >= obj.off+obj.size { + obj = obj.right + continue + } + return obj + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcsweep.go b/platform/dbops/binaries/go/go/src/runtime/mgcsweep.go new file mode 100644 index 0000000000000000000000000000000000000000..35be7949472f8c45d0cd41f20a1e7ad6c9caa0fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcsweep.go @@ -0,0 +1,1000 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: sweeping + +// The sweeper consists of two different algorithms: +// +// * The object reclaimer finds and frees unmarked slots in spans. It +// can free a whole span if none of the objects are marked, but that +// isn't its goal. This can be driven either synchronously by +// mcentral.cacheSpan for mcentral spans, or asynchronously by +// sweepone, which looks at all the mcentral lists. +// +// * The span reclaimer looks for spans that contain no marked objects +// and frees whole spans. This is a separate algorithm because +// freeing whole spans is the hardest task for the object reclaimer, +// but is critical when allocating new spans. The entry point for +// this is mheap_.reclaim and it's driven by a sequential scan of +// the page marks bitmap in the heap arenas. +// +// Both algorithms ultimately call mspan.sweep, which sweeps a single +// heap span. + +package runtime + +import ( + "internal/abi" + "internal/goexperiment" + "runtime/internal/atomic" + "unsafe" +) + +var sweep sweepdata + +// State of background sweep. +type sweepdata struct { + lock mutex + g *g + parked bool + + // active tracks outstanding sweepers and the sweep + // termination condition. + active activeSweep + + // centralIndex is the current unswept span class. + // It represents an index into the mcentral span + // sets. Accessed and updated via its load and + // update methods. Not protected by a lock. + // + // Reset at mark termination. + // Used by mheap.nextSpanForSweep. + centralIndex sweepClass +} + +// sweepClass is a spanClass and one bit to represent whether we're currently +// sweeping partial or full spans. +type sweepClass uint32 + +const ( + numSweepClasses = numSpanClasses * 2 + sweepClassDone sweepClass = sweepClass(^uint32(0)) +) + +func (s *sweepClass) load() sweepClass { + return sweepClass(atomic.Load((*uint32)(s))) +} + +func (s *sweepClass) update(sNew sweepClass) { + // Only update *s if its current value is less than sNew, + // since *s increases monotonically. + sOld := s.load() + for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) { + sOld = s.load() + } + // TODO(mknyszek): This isn't the only place we have + // an atomic monotonically increasing counter. It would + // be nice to have an "atomic max" which is just implemented + // as the above on most architectures. Some architectures + // like RISC-V however have native support for an atomic max. +} + +func (s *sweepClass) clear() { + atomic.Store((*uint32)(s), 0) +} + +// split returns the underlying span class as well as +// whether we're interested in the full or partial +// unswept lists for that class, indicated as a boolean +// (true means "full"). +func (s sweepClass) split() (spc spanClass, full bool) { + return spanClass(s >> 1), s&1 == 0 +} + +// nextSpanForSweep finds and pops the next span for sweeping from the +// central sweep buffers. It returns ownership of the span to the caller. +// Returns nil if no such span exists. +func (h *mheap) nextSpanForSweep() *mspan { + sg := h.sweepgen + for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ { + spc, full := sc.split() + c := &h.central[spc].mcentral + var s *mspan + if full { + s = c.fullUnswept(sg).pop() + } else { + s = c.partialUnswept(sg).pop() + } + if s != nil { + // Write down that we found something so future sweepers + // can start from here. + sweep.centralIndex.update(sc) + return s + } + } + // Write down that we found nothing. + sweep.centralIndex.update(sweepClassDone) + return nil +} + +const sweepDrainedMask = 1 << 31 + +// activeSweep is a type that captures whether sweeping +// is done, and whether there are any outstanding sweepers. +// +// Every potential sweeper must call begin() before they look +// for work, and end() after they've finished sweeping. +type activeSweep struct { + // state is divided into two parts. + // + // The top bit (masked by sweepDrainedMask) is a boolean + // value indicating whether all the sweep work has been + // drained from the queue. + // + // The rest of the bits are a counter, indicating the + // number of outstanding concurrent sweepers. + state atomic.Uint32 +} + +// begin registers a new sweeper. Returns a sweepLocker +// for acquiring spans for sweeping. Any outstanding sweeper blocks +// sweep termination. +// +// If the sweepLocker is invalid, the caller can be sure that all +// outstanding sweep work has been drained, so there is nothing left +// to sweep. Note that there may be sweepers currently running, so +// this does not indicate that all sweeping has completed. +// +// Even if the sweepLocker is invalid, its sweepGen is always valid. +func (a *activeSweep) begin() sweepLocker { + for { + state := a.state.Load() + if state&sweepDrainedMask != 0 { + return sweepLocker{mheap_.sweepgen, false} + } + if a.state.CompareAndSwap(state, state+1) { + return sweepLocker{mheap_.sweepgen, true} + } + } +} + +// end deregisters a sweeper. Must be called once for each time +// begin is called if the sweepLocker is valid. +func (a *activeSweep) end(sl sweepLocker) { + if sl.sweepGen != mheap_.sweepgen { + throw("sweeper left outstanding across sweep generations") + } + for { + state := a.state.Load() + if (state&^sweepDrainedMask)-1 >= sweepDrainedMask { + throw("mismatched begin/end of activeSweep") + } + if a.state.CompareAndSwap(state, state-1) { + if state != sweepDrainedMask { + return + } + if debug.gcpacertrace > 0 { + live := gcController.heapLive.Load() + print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n") + } + return + } + } +} + +// markDrained marks the active sweep cycle as having drained +// all remaining work. This is safe to be called concurrently +// with all other methods of activeSweep, though may race. +// +// Returns true if this call was the one that actually performed +// the mark. +func (a *activeSweep) markDrained() bool { + for { + state := a.state.Load() + if state&sweepDrainedMask != 0 { + return false + } + if a.state.CompareAndSwap(state, state|sweepDrainedMask) { + return true + } + } +} + +// sweepers returns the current number of active sweepers. +func (a *activeSweep) sweepers() uint32 { + return a.state.Load() &^ sweepDrainedMask +} + +// isDone returns true if all sweep work has been drained and no more +// outstanding sweepers exist. That is, when the sweep phase is +// completely done. +func (a *activeSweep) isDone() bool { + return a.state.Load() == sweepDrainedMask +} + +// reset sets up the activeSweep for the next sweep cycle. +// +// The world must be stopped. +func (a *activeSweep) reset() { + assertWorldStopped() + a.state.Store(0) +} + +// finishsweep_m ensures that all spans are swept. +// +// The world must be stopped. This ensures there are no sweeps in +// progress. +// +//go:nowritebarrier +func finishsweep_m() { + assertWorldStopped() + + // Sweeping must be complete before marking commences, so + // sweep any unswept spans. If this is a concurrent GC, there + // shouldn't be any spans left to sweep, so this should finish + // instantly. If GC was forced before the concurrent sweep + // finished, there may be spans to sweep. + for sweepone() != ^uintptr(0) { + } + + // Make sure there aren't any outstanding sweepers left. + // At this point, with the world stopped, it means one of two + // things. Either we were able to preempt a sweeper, or that + // a sweeper didn't call sweep.active.end when it should have. + // Both cases indicate a bug, so throw. + if sweep.active.sweepers() != 0 { + throw("active sweepers found at start of mark phase") + } + + // Reset all the unswept buffers, which should be empty. + // Do this in sweep termination as opposed to mark termination + // so that we can catch unswept spans and reclaim blocks as + // soon as possible. + sg := mheap_.sweepgen + for i := range mheap_.central { + c := &mheap_.central[i].mcentral + c.partialUnswept(sg).reset() + c.fullUnswept(sg).reset() + } + + // Sweeping is done, so there won't be any new memory to + // scavenge for a bit. + // + // If the scavenger isn't already awake, wake it up. There's + // definitely work for it to do at this point. + scavenger.wake() + + nextMarkBitArenaEpoch() +} + +func bgsweep(c chan int) { + sweep.g = getg() + + lockInit(&sweep.lock, lockRankSweep) + lock(&sweep.lock) + sweep.parked = true + c <- 1 + goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1) + + for { + // bgsweep attempts to be a "low priority" goroutine by intentionally + // yielding time. It's OK if it doesn't run, because goroutines allocating + // memory will sweep and ensure that all spans are swept before the next + // GC cycle. We really only want to run when we're idle. + // + // However, calling Gosched after each span swept produces a tremendous + // amount of tracing events, sometimes up to 50% of events in a trace. It's + // also inefficient to call into the scheduler so much because sweeping a + // single span is in general a very fast operation, taking as little as 30 ns + // on modern hardware. (See #54767.) + // + // As a result, bgsweep sweeps in batches, and only calls into the scheduler + // at the end of every batch. Furthermore, it only yields its time if there + // isn't spare idle time available on other cores. If there's available idle + // time, helping to sweep can reduce allocation latencies by getting ahead of + // the proportional sweeper and having spans ready to go for allocation. + const sweepBatchSize = 10 + nSwept := 0 + for sweepone() != ^uintptr(0) { + nSwept++ + if nSwept%sweepBatchSize == 0 { + goschedIfBusy() + } + } + for freeSomeWbufs(true) { + // N.B. freeSomeWbufs is already batched internally. + goschedIfBusy() + } + lock(&sweep.lock) + if !isSweepDone() { + // This can happen if a GC runs between + // gosweepone returning ^0 above + // and the lock being acquired. + unlock(&sweep.lock) + continue + } + sweep.parked = true + goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1) + } +} + +// sweepLocker acquires sweep ownership of spans. +type sweepLocker struct { + // sweepGen is the sweep generation of the heap. + sweepGen uint32 + valid bool +} + +// sweepLocked represents sweep ownership of a span. +type sweepLocked struct { + *mspan +} + +// tryAcquire attempts to acquire sweep ownership of span s. If it +// successfully acquires ownership, it blocks sweep completion. +func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) { + if !l.valid { + throw("use of invalid sweepLocker") + } + // Check before attempting to CAS. + if atomic.Load(&s.sweepgen) != l.sweepGen-2 { + return sweepLocked{}, false + } + // Attempt to acquire sweep ownership of s. + if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) { + return sweepLocked{}, false + } + return sweepLocked{s}, true +} + +// sweepone sweeps some unswept heap span and returns the number of pages returned +// to the heap, or ^uintptr(0) if there was nothing to sweep. +func sweepone() uintptr { + gp := getg() + + // Increment locks to ensure that the goroutine is not preempted + // in the middle of sweep thus leaving the span in an inconsistent state for next GC + gp.m.locks++ + + // TODO(austin): sweepone is almost always called in a loop; + // lift the sweepLocker into its callers. + sl := sweep.active.begin() + if !sl.valid { + gp.m.locks-- + return ^uintptr(0) + } + + // Find a span to sweep. + npages := ^uintptr(0) + var noMoreWork bool + for { + s := mheap_.nextSpanForSweep() + if s == nil { + noMoreWork = sweep.active.markDrained() + break + } + if state := s.state.get(); state != mSpanInUse { + // This can happen if direct sweeping already + // swept this span, but in that case the sweep + // generation should always be up-to-date. + if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) { + print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n") + throw("non in-use span in unswept list") + } + continue + } + if s, ok := sl.tryAcquire(s); ok { + // Sweep the span we found. + npages = s.npages + if s.sweep(false) { + // Whole span was freed. Count it toward the + // page reclaimer credit since these pages can + // now be used for span allocation. + mheap_.reclaimCredit.Add(npages) + } else { + // Span is still in-use, so this returned no + // pages to the heap and the span needs to + // move to the swept in-use list. + npages = 0 + } + break + } + } + sweep.active.end(sl) + + if noMoreWork { + // The sweep list is empty. There may still be + // concurrent sweeps running, but we're at least very + // close to done sweeping. + + // Move the scavenge gen forward (signaling + // that there's new work to do) and wake the scavenger. + // + // The scavenger is signaled by the last sweeper because once + // sweeping is done, we will definitely have useful work for + // the scavenger to do, since the scavenger only runs over the + // heap once per GC cycle. This update is not done during sweep + // termination because in some cases there may be a long delay + // between sweep done and sweep termination (e.g. not enough + // allocations to trigger a GC) which would be nice to fill in + // with scavenging work. + if debug.scavtrace > 0 { + systemstack(func() { + lock(&mheap_.lock) + + // Get released stats. + releasedBg := mheap_.pages.scav.releasedBg.Load() + releasedEager := mheap_.pages.scav.releasedEager.Load() + + // Print the line. + printScavTrace(releasedBg, releasedEager, false) + + // Update the stats. + mheap_.pages.scav.releasedBg.Add(-releasedBg) + mheap_.pages.scav.releasedEager.Add(-releasedEager) + unlock(&mheap_.lock) + }) + } + scavenger.ready() + } + + gp.m.locks-- + return npages +} + +// isSweepDone reports whether all spans are swept. +// +// Note that this condition may transition from false to true at any +// time as the sweeper runs. It may transition from true to false if a +// GC runs; to prevent that the caller must be non-preemptible or must +// somehow block GC progress. +func isSweepDone() bool { + return sweep.active.isDone() +} + +// Returns only when span s has been swept. +// +//go:nowritebarrier +func (s *mspan) ensureSwept() { + // Caller must disable preemption. + // Otherwise when this function returns the span can become unswept again + // (if GC is triggered on another goroutine). + gp := getg() + if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 { + throw("mspan.ensureSwept: m is not locked") + } + + // If this operation fails, then that means that there are + // no more spans to be swept. In this case, either s has already + // been swept, or is about to be acquired for sweeping and swept. + sl := sweep.active.begin() + if sl.valid { + // The caller must be sure that the span is a mSpanInUse span. + if s, ok := sl.tryAcquire(s); ok { + s.sweep(false) + sweep.active.end(sl) + return + } + sweep.active.end(sl) + } + + // Unfortunately we can't sweep the span ourselves. Somebody else + // got to it first. We don't have efficient means to wait, but that's + // OK, it will be swept fairly soon. + for { + spangen := atomic.Load(&s.sweepgen) + if spangen == sl.sweepGen || spangen == sl.sweepGen+3 { + break + } + osyield() + } +} + +// sweep frees or collects finalizers for blocks not marked in the mark phase. +// It clears the mark bits in preparation for the next GC round. +// Returns true if the span was returned to heap. +// If preserve=true, don't return it to heap nor relink in mcentral lists; +// caller takes care of it. +func (sl *sweepLocked) sweep(preserve bool) bool { + // It's critical that we enter this function with preemption disabled, + // GC must not start while we are in the middle of this function. + gp := getg() + if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 { + throw("mspan.sweep: m is not locked") + } + + s := sl.mspan + if !preserve { + // We'll release ownership of this span. Nil it out to + // prevent the caller from accidentally using it. + sl.mspan = nil + } + + sweepgen := mheap_.sweepgen + if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 { + print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") + throw("mspan.sweep: bad span state") + } + + trace := traceAcquire() + if trace.ok() { + trace.GCSweepSpan(s.npages * _PageSize) + traceRelease(trace) + } + + mheap_.pagesSwept.Add(int64(s.npages)) + + spc := s.spanclass + size := s.elemsize + + // The allocBits indicate which unmarked objects don't need to be + // processed since they were free at the end of the last GC cycle + // and were not allocated since then. + // If the allocBits index is >= s.freeindex and the bit + // is not marked then the object remains unallocated + // since the last GC. + // This situation is analogous to being on a freelist. + + // Unlink & free special records for any objects we're about to free. + // Two complications here: + // 1. An object can have both finalizer and profile special records. + // In such case we need to queue finalizer for execution, + // mark the object as live and preserve the profile special. + // 2. A tiny object can have several finalizers setup for different offsets. + // If such object is not marked, we need to queue all finalizers at once. + // Both 1 and 2 are possible at the same time. + hadSpecials := s.specials != nil + siter := newSpecialsIter(s) + for siter.valid() { + // A finalizer can be set for an inner byte of an object, find object beginning. + objIndex := uintptr(siter.s.offset) / size + p := s.base() + objIndex*size + mbits := s.markBitsForIndex(objIndex) + if !mbits.isMarked() { + // This object is not marked and has at least one special record. + // Pass 1: see if it has at least one finalizer. + hasFin := false + endOffset := p - s.base() + size + for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next { + if tmp.kind == _KindSpecialFinalizer { + // Stop freeing of object if it has a finalizer. + mbits.setMarkedNonAtomic() + hasFin = true + break + } + } + // Pass 2: queue all finalizers _or_ handle profile record. + for siter.valid() && uintptr(siter.s.offset) < endOffset { + // Find the exact byte for which the special was setup + // (as opposed to object beginning). + special := siter.s + p := s.base() + uintptr(special.offset) + if special.kind == _KindSpecialFinalizer || !hasFin { + siter.unlinkAndNext() + freeSpecial(special, unsafe.Pointer(p), size) + } else { + // The object has finalizers, so we're keeping it alive. + // All other specials only apply when an object is freed, + // so just keep the special record. + siter.next() + } + } + } else { + // object is still live + if siter.s.kind == _KindSpecialReachable { + special := siter.unlinkAndNext() + (*specialReachable)(unsafe.Pointer(special)).reachable = true + freeSpecial(special, unsafe.Pointer(p), size) + } else { + // keep special record + siter.next() + } + } + } + if hadSpecials && s.specials == nil { + spanHasNoSpecials(s) + } + + if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled { + // Find all newly freed objects. This doesn't have to + // efficient; allocfreetrace has massive overhead. + mbits := s.markBitsForBase() + abits := s.allocBitsForIndex(0) + for i := uintptr(0); i < uintptr(s.nelems); i++ { + if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) { + x := s.base() + i*s.elemsize + if debug.allocfreetrace != 0 { + tracefree(unsafe.Pointer(x), size) + } + if debug.clobberfree != 0 { + clobberfree(unsafe.Pointer(x), size) + } + // User arenas are handled on explicit free. + if raceenabled && !s.isUserArenaChunk { + racefree(unsafe.Pointer(x), size) + } + if msanenabled && !s.isUserArenaChunk { + msanfree(unsafe.Pointer(x), size) + } + if asanenabled && !s.isUserArenaChunk { + asanpoison(unsafe.Pointer(x), size) + } + } + mbits.advance() + abits.advance() + } + } + + // Check for zombie objects. + if s.freeindex < s.nelems { + // Everything < freeindex is allocated and hence + // cannot be zombies. + // + // Check the first bitmap byte, where we have to be + // careful with freeindex. + obj := uintptr(s.freeindex) + if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 { + s.reportZombies() + } + // Check remaining bytes. + for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ { + if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 { + s.reportZombies() + } + } + } + + // Count the number of free objects in this span. + nalloc := uint16(s.countAlloc()) + nfreed := s.allocCount - nalloc + if nalloc > s.allocCount { + // The zombie check above should have caught this in + // more detail. + print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") + throw("sweep increased allocation count") + } + + s.allocCount = nalloc + s.freeindex = 0 // reset allocation index to start of span. + s.freeIndexForScan = 0 + if traceEnabled() { + getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize + } + + // gcmarkBits becomes the allocBits. + // get a fresh cleared gcmarkBits in preparation for next GC + s.allocBits = s.gcmarkBits + s.gcmarkBits = newMarkBits(uintptr(s.nelems)) + + // refresh pinnerBits if they exists + if s.pinnerBits != nil { + s.refreshPinnerBits() + } + + // Initialize alloc bits cache. + s.refillAllocCache(0) + + // The span must be in our exclusive ownership until we update sweepgen, + // check for potential races. + if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 { + print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") + throw("mspan.sweep: bad span state after sweep") + } + if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 { + throw("swept cached span") + } + + // We need to set s.sweepgen = h.sweepgen only when all blocks are swept, + // because of the potential for a concurrent free/SetFinalizer. + // + // But we need to set it before we make the span available for allocation + // (return it to heap or mcentral), because allocation code assumes that a + // span is already swept if available for allocation. + // + // Serialization point. + // At this point the mark bits are cleared and allocation ready + // to go so release the span. + atomic.Store(&s.sweepgen, sweepgen) + + if s.isUserArenaChunk { + if preserve { + // This is a case that should never be handled by a sweeper that + // preserves the span for reuse. + throw("sweep: tried to preserve a user arena span") + } + if nalloc > 0 { + // There still exist pointers into the span or the span hasn't been + // freed yet. It's not ready to be reused. Put it back on the + // full swept list for the next cycle. + mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s) + return false + } + + // It's only at this point that the sweeper doesn't actually need to look + // at this arena anymore, so subtract from pagesInUse now. + mheap_.pagesInUse.Add(-s.npages) + s.state.set(mSpanDead) + + // The arena is ready to be recycled. Remove it from the quarantine list + // and place it on the ready list. Don't add it back to any sweep lists. + systemstack(func() { + // It's the arena code's responsibility to get the chunk on the quarantine + // list by the time all references to the chunk are gone. + if s.list != &mheap_.userArena.quarantineList { + throw("user arena span is on the wrong list") + } + lock(&mheap_.lock) + mheap_.userArena.quarantineList.remove(s) + mheap_.userArena.readyList.insert(s) + unlock(&mheap_.lock) + }) + return false + } + + if spc.sizeclass() != 0 { + // Handle spans for small objects. + if nfreed > 0 { + // Only mark the span as needing zeroing if we've freed any + // objects, because a fresh span that had been allocated into, + // wasn't totally filled, but then swept, still has all of its + // free slots zeroed. + s.needzero = 1 + stats := memstats.heapStats.acquire() + atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed)) + memstats.heapStats.release() + + // Count the frees in the inconsistent, internal stats. + gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize)) + } + if !preserve { + // The caller may not have removed this span from whatever + // unswept set its on but taken ownership of the span for + // sweeping by updating sweepgen. If this span still is in + // an unswept set, then the mcentral will pop it off the + // set, check its sweepgen, and ignore it. + if nalloc == 0 { + // Free totally free span directly back to the heap. + mheap_.freeSpan(s) + return true + } + // Return span back to the right mcentral list. + if nalloc == s.nelems { + mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s) + } else { + mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s) + } + } + } else if !preserve { + // Handle spans for large objects. + if nfreed != 0 { + // Free large object span to heap. + + // Count the free in the consistent, external stats. + // + // Do this before freeSpan, which might update heapStats' inHeap + // value. If it does so, then metrics that subtract object footprint + // from inHeap might overflow. See #67019. + stats := memstats.heapStats.acquire() + atomic.Xadd64(&stats.largeFreeCount, 1) + atomic.Xadd64(&stats.largeFree, int64(size)) + memstats.heapStats.release() + + // Count the free in the inconsistent, internal stats. + gcController.totalFree.Add(int64(size)) + + // NOTE(rsc,dvyukov): The original implementation of efence + // in CL 22060046 used sysFree instead of sysFault, so that + // the operating system would eventually give the memory + // back to us again, so that an efence program could run + // longer without running out of memory. Unfortunately, + // calling sysFree here without any kind of adjustment of the + // heap data structures means that when the memory does + // come back to us, we have the wrong metadata for it, either in + // the mspan structures or in the garbage collection bitmap. + // Using sysFault here means that the program will run out of + // memory fairly quickly in efence mode, but at least it won't + // have mysterious crashes due to confused memory reuse. + // It should be possible to switch back to sysFree if we also + // implement and then call some kind of mheap.deleteSpan. + if debug.efence > 0 { + s.limit = 0 // prevent mlookup from finding this span + sysFault(unsafe.Pointer(s.base()), size) + } else { + mheap_.freeSpan(s) + } + if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.TFlag&abi.TFlagUnrolledBitmap != 0 { + // In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately. + // Free the space for the unrolled bitmap. + systemstack(func() { + s := spanOf(uintptr(unsafe.Pointer(s.largeType))) + mheap_.freeManual(s, spanAllocPtrScalarBits) + }) + // Make sure to zero this pointer without putting the old + // value in a write buffer, as the old value might be an + // invalid pointer. See arena.go:(*mheap).allocUserArenaChunk. + *(*uintptr)(unsafe.Pointer(&s.largeType)) = 0 + } + return true + } + + // Add a large span directly onto the full+swept list. + mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s) + } + return false +} + +// reportZombies reports any marked but free objects in s and throws. +// +// This generally means one of the following: +// +// 1. User code converted a pointer to a uintptr and then back +// unsafely, and a GC ran while the uintptr was the only reference to +// an object. +// +// 2. User code (or a compiler bug) constructed a bad pointer that +// points to a free slot, often a past-the-end pointer. +// +// 3. The GC two cycles ago missed a pointer and freed a live object, +// but it was still live in the last cycle, so this GC cycle found a +// pointer to that object and marked it. +func (s *mspan) reportZombies() { + printlock() + print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n") + mbits := s.markBitsForBase() + abits := s.allocBitsForIndex(0) + for i := uintptr(0); i < uintptr(s.nelems); i++ { + addr := s.base() + i*s.elemsize + print(hex(addr)) + alloc := i < uintptr(s.freeindex) || abits.isMarked() + if alloc { + print(" alloc") + } else { + print(" free ") + } + if mbits.isMarked() { + print(" marked ") + } else { + print(" unmarked") + } + zombie := mbits.isMarked() && !alloc + if zombie { + print(" zombie") + } + print("\n") + if zombie { + length := s.elemsize + if length > 1024 { + length = 1024 + } + hexdumpWords(addr, addr+length, nil) + } + mbits.advance() + abits.advance() + } + throw("found pointer to free object") +} + +// deductSweepCredit deducts sweep credit for allocating a span of +// size spanBytes. This must be performed *before* the span is +// allocated to ensure the system has enough credit. If necessary, it +// performs sweeping to prevent going in to debt. If the caller will +// also sweep pages (e.g., for a large allocation), it can pass a +// non-zero callerSweepPages to leave that many pages unswept. +// +// deductSweepCredit makes a worst-case assumption that all spanBytes +// bytes of the ultimately allocated span will be available for object +// allocation. +// +// deductSweepCredit is the core of the "proportional sweep" system. +// It uses statistics gathered by the garbage collector to perform +// enough sweeping so that all pages are swept during the concurrent +// sweep phase between GC cycles. +// +// mheap_ must NOT be locked. +func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { + if mheap_.sweepPagesPerByte == 0 { + // Proportional sweep is done or disabled. + return + } + + trace := traceAcquire() + if trace.ok() { + trace.GCSweepStart() + traceRelease(trace) + } + + // Fix debt if necessary. +retry: + sweptBasis := mheap_.pagesSweptBasis.Load() + live := gcController.heapLive.Load() + liveBasis := mheap_.sweepHeapLiveBasis + newHeapLive := spanBytes + if liveBasis < live { + // Only do this subtraction when we don't overflow. Otherwise, pagesTarget + // might be computed as something really huge, causing us to get stuck + // sweeping here until the next mark phase. + // + // Overflow can happen here if gcPaceSweeper is called concurrently with + // sweeping (i.e. not during a STW, like it usually is) because this code + // is intentionally racy. A concurrent call to gcPaceSweeper can happen + // if a GC tuning parameter is modified and we read an older value of + // heapLive than what was used to set the basis. + // + // This state should be transient, so it's fine to just let newHeapLive + // be a relatively small number. We'll probably just skip this attempt to + // sweep. + // + // See issue #57523. + newHeapLive += uintptr(live - liveBasis) + } + pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages) + for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) { + if sweepone() == ^uintptr(0) { + mheap_.sweepPagesPerByte = 0 + break + } + if mheap_.pagesSweptBasis.Load() != sweptBasis { + // Sweep pacing changed. Recompute debt. + goto retry + } + } + + trace = traceAcquire() + if trace.ok() { + trace.GCSweepDone() + traceRelease(trace) + } +} + +// clobberfree sets the memory content at x to bad content, for debugging +// purposes. +func clobberfree(x unsafe.Pointer, size uintptr) { + // size (span.elemsize) is always a multiple of 4. + for i := uintptr(0); i < size; i += 4 { + *(*uint32)(add(x, i)) = 0xdeadbeef + } +} + +// gcPaceSweeper updates the sweeper's pacing parameters. +// +// Must be called whenever the GC's pacing is updated. +// +// The world must be stopped, or mheap_.lock must be held. +func gcPaceSweeper(trigger uint64) { + assertWorldStoppedOrLockHeld(&mheap_.lock) + + // Update sweep pacing. + if isSweepDone() { + mheap_.sweepPagesPerByte = 0 + } else { + // Concurrent sweep needs to sweep all of the in-use + // pages by the time the allocated heap reaches the GC + // trigger. Compute the ratio of in-use pages to sweep + // per byte allocated, accounting for the fact that + // some might already be swept. + heapLiveBasis := gcController.heapLive.Load() + heapDistance := int64(trigger) - int64(heapLiveBasis) + // Add a little margin so rounding errors and + // concurrent sweep are less likely to leave pages + // unswept when GC starts. + heapDistance -= 1024 * 1024 + if heapDistance < _PageSize { + // Avoid setting the sweep ratio extremely high + heapDistance = _PageSize + } + pagesSwept := mheap_.pagesSwept.Load() + pagesInUse := mheap_.pagesInUse.Load() + sweepDistancePages := int64(pagesInUse) - int64(pagesSwept) + if sweepDistancePages <= 0 { + mheap_.sweepPagesPerByte = 0 + } else { + mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance) + mheap_.sweepHeapLiveBasis = heapLiveBasis + // Write pagesSweptBasis last, since this + // signals concurrent sweeps to recompute + // their debt. + mheap_.pagesSweptBasis.Store(pagesSwept) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mgcwork.go b/platform/dbops/binaries/go/go/src/runtime/mgcwork.go new file mode 100644 index 0000000000000000000000000000000000000000..7ab89754d42a2668964159d4e1ad05252ff64bd6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mgcwork.go @@ -0,0 +1,489 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/goarch" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const ( + _WorkbufSize = 2048 // in bytes; larger values result in less contention + + // workbufAlloc is the number of bytes to allocate at a time + // for new workbufs. This must be a multiple of pageSize and + // should be a multiple of _WorkbufSize. + // + // Larger values reduce workbuf allocation overhead. Smaller + // values reduce heap fragmentation. + workbufAlloc = 32 << 10 +) + +func init() { + if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 { + throw("bad workbufAlloc") + } +} + +// Garbage collector work pool abstraction. +// +// This implements a producer/consumer model for pointers to grey +// objects. A grey object is one that is marked and on a work +// queue. A black object is marked and not on a work queue. +// +// Write barriers, root discovery, stack scanning, and object scanning +// produce pointers to grey objects. Scanning consumes pointers to +// grey objects, thus blackening them, and then scans them, +// potentially producing new pointers to grey objects. + +// A gcWork provides the interface to produce and consume work for the +// garbage collector. +// +// A gcWork can be used on the stack as follows: +// +// (preemption must be disabled) +// gcw := &getg().m.p.ptr().gcw +// .. call gcw.put() to produce and gcw.tryGet() to consume .. +// +// It's important that any use of gcWork during the mark phase prevent +// the garbage collector from transitioning to mark termination since +// gcWork may locally hold GC work buffers. This can be done by +// disabling preemption (systemstack or acquirem). +type gcWork struct { + // wbuf1 and wbuf2 are the primary and secondary work buffers. + // + // This can be thought of as a stack of both work buffers' + // pointers concatenated. When we pop the last pointer, we + // shift the stack up by one work buffer by bringing in a new + // full buffer and discarding an empty one. When we fill both + // buffers, we shift the stack down by one work buffer by + // bringing in a new empty buffer and discarding a full one. + // This way we have one buffer's worth of hysteresis, which + // amortizes the cost of getting or putting a work buffer over + // at least one buffer of work and reduces contention on the + // global work lists. + // + // wbuf1 is always the buffer we're currently pushing to and + // popping from and wbuf2 is the buffer that will be discarded + // next. + // + // Invariant: Both wbuf1 and wbuf2 are nil or neither are. + wbuf1, wbuf2 *workbuf + + // Bytes marked (blackened) on this gcWork. This is aggregated + // into work.bytesMarked by dispose. + bytesMarked uint64 + + // Heap scan work performed on this gcWork. This is aggregated into + // gcController by dispose and may also be flushed by callers. + // Other types of scan work are flushed immediately. + heapScanWork int64 + + // flushedWork indicates that a non-empty work buffer was + // flushed to the global work list since the last gcMarkDone + // termination check. Specifically, this indicates that this + // gcWork may have communicated work to another gcWork. + flushedWork bool +} + +// Most of the methods of gcWork are go:nowritebarrierrec because the +// write barrier itself can invoke gcWork methods but the methods are +// not generally re-entrant. Hence, if a gcWork method invoked the +// write barrier while the gcWork was in an inconsistent state, and +// the write barrier in turn invoked a gcWork method, it could +// permanently corrupt the gcWork. + +func (w *gcWork) init() { + w.wbuf1 = getempty() + wbuf2 := trygetfull() + if wbuf2 == nil { + wbuf2 = getempty() + } + w.wbuf2 = wbuf2 +} + +// put enqueues a pointer for the garbage collector to trace. +// obj must point to the beginning of a heap object or an oblet. +// +//go:nowritebarrierrec +func (w *gcWork) put(obj uintptr) { + flushed := false + wbuf := w.wbuf1 + // Record that this may acquire the wbufSpans or heap lock to + // allocate a workbuf. + lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) + lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) + if wbuf == nil { + w.init() + wbuf = w.wbuf1 + // wbuf is empty at this point. + } else if wbuf.nobj == len(wbuf.obj) { + w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1 + wbuf = w.wbuf1 + if wbuf.nobj == len(wbuf.obj) { + putfull(wbuf) + w.flushedWork = true + wbuf = getempty() + w.wbuf1 = wbuf + flushed = true + } + } + + wbuf.obj[wbuf.nobj] = obj + wbuf.nobj++ + + // If we put a buffer on full, let the GC controller know so + // it can encourage more workers to run. We delay this until + // the end of put so that w is in a consistent state, since + // enlistWorker may itself manipulate w. + if flushed && gcphase == _GCmark { + gcController.enlistWorker() + } +} + +// putFast does a put and reports whether it can be done quickly +// otherwise it returns false and the caller needs to call put. +// +//go:nowritebarrierrec +func (w *gcWork) putFast(obj uintptr) bool { + wbuf := w.wbuf1 + if wbuf == nil || wbuf.nobj == len(wbuf.obj) { + return false + } + + wbuf.obj[wbuf.nobj] = obj + wbuf.nobj++ + return true +} + +// putBatch performs a put on every pointer in obj. See put for +// constraints on these pointers. +// +//go:nowritebarrierrec +func (w *gcWork) putBatch(obj []uintptr) { + if len(obj) == 0 { + return + } + + flushed := false + wbuf := w.wbuf1 + if wbuf == nil { + w.init() + wbuf = w.wbuf1 + } + + for len(obj) > 0 { + for wbuf.nobj == len(wbuf.obj) { + putfull(wbuf) + w.flushedWork = true + w.wbuf1, w.wbuf2 = w.wbuf2, getempty() + wbuf = w.wbuf1 + flushed = true + } + n := copy(wbuf.obj[wbuf.nobj:], obj) + wbuf.nobj += n + obj = obj[n:] + } + + if flushed && gcphase == _GCmark { + gcController.enlistWorker() + } +} + +// tryGet dequeues a pointer for the garbage collector to trace. +// +// If there are no pointers remaining in this gcWork or in the global +// queue, tryGet returns 0. Note that there may still be pointers in +// other gcWork instances or other caches. +// +//go:nowritebarrierrec +func (w *gcWork) tryGet() uintptr { + wbuf := w.wbuf1 + if wbuf == nil { + w.init() + wbuf = w.wbuf1 + // wbuf is empty at this point. + } + if wbuf.nobj == 0 { + w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1 + wbuf = w.wbuf1 + if wbuf.nobj == 0 { + owbuf := wbuf + wbuf = trygetfull() + if wbuf == nil { + return 0 + } + putempty(owbuf) + w.wbuf1 = wbuf + } + } + + wbuf.nobj-- + return wbuf.obj[wbuf.nobj] +} + +// tryGetFast dequeues a pointer for the garbage collector to trace +// if one is readily available. Otherwise it returns 0 and +// the caller is expected to call tryGet(). +// +//go:nowritebarrierrec +func (w *gcWork) tryGetFast() uintptr { + wbuf := w.wbuf1 + if wbuf == nil || wbuf.nobj == 0 { + return 0 + } + + wbuf.nobj-- + return wbuf.obj[wbuf.nobj] +} + +// dispose returns any cached pointers to the global queue. +// The buffers are being put on the full queue so that the +// write barriers will not simply reacquire them before the +// GC can inspect them. This helps reduce the mutator's +// ability to hide pointers during the concurrent mark phase. +// +//go:nowritebarrierrec +func (w *gcWork) dispose() { + if wbuf := w.wbuf1; wbuf != nil { + if wbuf.nobj == 0 { + putempty(wbuf) + } else { + putfull(wbuf) + w.flushedWork = true + } + w.wbuf1 = nil + + wbuf = w.wbuf2 + if wbuf.nobj == 0 { + putempty(wbuf) + } else { + putfull(wbuf) + w.flushedWork = true + } + w.wbuf2 = nil + } + if w.bytesMarked != 0 { + // dispose happens relatively infrequently. If this + // atomic becomes a problem, we should first try to + // dispose less and if necessary aggregate in a per-P + // counter. + atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked)) + w.bytesMarked = 0 + } + if w.heapScanWork != 0 { + gcController.heapScanWork.Add(w.heapScanWork) + w.heapScanWork = 0 + } +} + +// balance moves some work that's cached in this gcWork back on the +// global queue. +// +//go:nowritebarrierrec +func (w *gcWork) balance() { + if w.wbuf1 == nil { + return + } + if wbuf := w.wbuf2; wbuf.nobj != 0 { + putfull(wbuf) + w.flushedWork = true + w.wbuf2 = getempty() + } else if wbuf := w.wbuf1; wbuf.nobj > 4 { + w.wbuf1 = handoff(wbuf) + w.flushedWork = true // handoff did putfull + } else { + return + } + // We flushed a buffer to the full list, so wake a worker. + if gcphase == _GCmark { + gcController.enlistWorker() + } +} + +// empty reports whether w has no mark work available. +// +//go:nowritebarrierrec +func (w *gcWork) empty() bool { + return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0) +} + +// Internally, the GC work pool is kept in arrays in work buffers. +// The gcWork interface caches a work buffer until full (or empty) to +// avoid contending on the global work buffer lists. + +type workbufhdr struct { + node lfnode // must be first + nobj int +} + +type workbuf struct { + _ sys.NotInHeap + workbufhdr + // account for the above fields + obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr +} + +// workbuf factory routines. These funcs are used to manage the +// workbufs. +// If the GC asks for some work these are the only routines that +// make wbufs available to the GC. + +func (b *workbuf) checknonempty() { + if b.nobj == 0 { + throw("workbuf is empty") + } +} + +func (b *workbuf) checkempty() { + if b.nobj != 0 { + throw("workbuf is not empty") + } +} + +// getempty pops an empty work buffer off the work.empty list, +// allocating new buffers if none are available. +// +//go:nowritebarrier +func getempty() *workbuf { + var b *workbuf + if work.empty != 0 { + b = (*workbuf)(work.empty.pop()) + if b != nil { + b.checkempty() + } + } + // Record that this may acquire the wbufSpans or heap lock to + // allocate a workbuf. + lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) + lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) + if b == nil { + // Allocate more workbufs. + var s *mspan + if work.wbufSpans.free.first != nil { + lock(&work.wbufSpans.lock) + s = work.wbufSpans.free.first + if s != nil { + work.wbufSpans.free.remove(s) + work.wbufSpans.busy.insert(s) + } + unlock(&work.wbufSpans.lock) + } + if s == nil { + systemstack(func() { + s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf) + }) + if s == nil { + throw("out of memory") + } + // Record the new span in the busy list. + lock(&work.wbufSpans.lock) + work.wbufSpans.busy.insert(s) + unlock(&work.wbufSpans.lock) + } + // Slice up the span into new workbufs. Return one and + // put the rest on the empty list. + for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize { + newb := (*workbuf)(unsafe.Pointer(s.base() + i)) + newb.nobj = 0 + lfnodeValidate(&newb.node) + if i == 0 { + b = newb + } else { + putempty(newb) + } + } + } + return b +} + +// putempty puts a workbuf onto the work.empty list. +// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership. +// +//go:nowritebarrier +func putempty(b *workbuf) { + b.checkempty() + work.empty.push(&b.node) +} + +// putfull puts the workbuf on the work.full list for the GC. +// putfull accepts partially full buffers so the GC can avoid competing +// with the mutators for ownership of partially full buffers. +// +//go:nowritebarrier +func putfull(b *workbuf) { + b.checknonempty() + work.full.push(&b.node) +} + +// trygetfull tries to get a full or partially empty workbuffer. +// If one is not immediately available return nil. +// +//go:nowritebarrier +func trygetfull() *workbuf { + b := (*workbuf)(work.full.pop()) + if b != nil { + b.checknonempty() + return b + } + return b +} + +//go:nowritebarrier +func handoff(b *workbuf) *workbuf { + // Make new buffer with half of b's pointers. + b1 := getempty() + n := b.nobj / 2 + b.nobj -= n + b1.nobj = n + memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0])) + + // Put b on full list - let first half of b get stolen. + putfull(b) + return b1 +} + +// prepareFreeWorkbufs moves busy workbuf spans to free list so they +// can be freed to the heap. This must only be called when all +// workbufs are on the empty list. +func prepareFreeWorkbufs() { + lock(&work.wbufSpans.lock) + if work.full != 0 { + throw("cannot free workbufs when work.full != 0") + } + // Since all workbufs are on the empty list, we don't care + // which ones are in which spans. We can wipe the entire empty + // list and move all workbuf spans to the free list. + work.empty = 0 + work.wbufSpans.free.takeAll(&work.wbufSpans.busy) + unlock(&work.wbufSpans.lock) +} + +// freeSomeWbufs frees some workbufs back to the heap and returns +// true if it should be called again to free more. +func freeSomeWbufs(preemptible bool) bool { + const batchSize = 64 // ~1–2 µs per span. + lock(&work.wbufSpans.lock) + if gcphase != _GCoff || work.wbufSpans.free.isEmpty() { + unlock(&work.wbufSpans.lock) + return false + } + systemstack(func() { + gp := getg().m.curg + for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ { + span := work.wbufSpans.free.first + if span == nil { + break + } + work.wbufSpans.free.remove(span) + mheap_.freeManual(span, spanAllocWorkBuf) + } + }) + more := !work.wbufSpans.free.isEmpty() + unlock(&work.wbufSpans.lock) + return more +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mheap.go b/platform/dbops/binaries/go/go/src/runtime/mheap.go new file mode 100644 index 0000000000000000000000000000000000000000..00693283467b4e6dae5dcae047b588243f67e754 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mheap.go @@ -0,0 +1,2261 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Page heap. +// +// See malloc.go for overview. + +package runtime + +import ( + "internal/cpu" + "internal/goarch" + "internal/goexperiment" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +const ( + // minPhysPageSize is a lower-bound on the physical page size. The + // true physical page size may be larger than this. In contrast, + // sys.PhysPageSize is an upper-bound on the physical page size. + minPhysPageSize = 4096 + + // maxPhysPageSize is the maximum page size the runtime supports. + maxPhysPageSize = 512 << 10 + + // maxPhysHugePageSize sets an upper-bound on the maximum huge page size + // that the runtime supports. + maxPhysHugePageSize = pallocChunkBytes + + // pagesPerReclaimerChunk indicates how many pages to scan from the + // pageInUse bitmap at a time. Used by the page reclaimer. + // + // Higher values reduce contention on scanning indexes (such as + // h.reclaimIndex), but increase the minimum latency of the + // operation. + // + // The time required to scan this many pages can vary a lot depending + // on how many spans are actually freed. Experimentally, it can + // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only + // free spans at ~32 MB/ms. Using 512 pages bounds this at + // roughly 100µs. + // + // Must be a multiple of the pageInUse bitmap element size and + // must also evenly divide pagesPerArena. + pagesPerReclaimerChunk = 512 + + // physPageAlignedStacks indicates whether stack allocations must be + // physical page aligned. This is a requirement for MAP_STACK on + // OpenBSD. + physPageAlignedStacks = GOOS == "openbsd" +) + +// Main malloc heap. +// The heap itself is the "free" and "scav" treaps, +// but all the other global data is here too. +// +// mheap must not be heap-allocated because it contains mSpanLists, +// which must not be heap-allocated. +type mheap struct { + _ sys.NotInHeap + + // lock must only be acquired on the system stack, otherwise a g + // could self-deadlock if its stack grows with the lock held. + lock mutex + + pages pageAlloc // page allocation data structure + + sweepgen uint32 // sweep generation, see comment in mspan; written during STW + + // allspans is a slice of all mspans ever created. Each mspan + // appears exactly once. + // + // The memory for allspans is manually managed and can be + // reallocated and move as the heap grows. + // + // In general, allspans is protected by mheap_.lock, which + // prevents concurrent access as well as freeing the backing + // store. Accesses during STW might not hold the lock, but + // must ensure that allocation cannot happen around the + // access (since that may free the backing store). + allspans []*mspan // all spans out there + + // Proportional sweep + // + // These parameters represent a linear function from gcController.heapLive + // to page sweep count. The proportional sweep system works to + // stay in the black by keeping the current page sweep count + // above this line at the current gcController.heapLive. + // + // The line has slope sweepPagesPerByte and passes through a + // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At + // any given time, the system is at (gcController.heapLive, + // pagesSwept) in this space. + // + // It is important that the line pass through a point we + // control rather than simply starting at a 0,0 origin + // because that lets us adjust sweep pacing at any time while + // accounting for current progress. If we could only adjust + // the slope, it would create a discontinuity in debt if any + // progress has already been made. + pagesInUse atomic.Uintptr // pages of spans in stats mSpanInUse + pagesSwept atomic.Uint64 // pages swept this cycle + pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio + sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without + sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without + + // Page reclaimer state + + // reclaimIndex is the page index in allArenas of next page to + // reclaim. Specifically, it refers to page (i % + // pagesPerArena) of arena allArenas[i / pagesPerArena]. + // + // If this is >= 1<<63, the page reclaimer is done scanning + // the page marks. + reclaimIndex atomic.Uint64 + + // reclaimCredit is spare credit for extra pages swept. Since + // the page reclaimer works in large chunks, it may reclaim + // more than requested. Any spare pages released go to this + // credit pool. + reclaimCredit atomic.Uintptr + + _ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables + + // arenas is the heap arena map. It points to the metadata for + // the heap for every arena frame of the entire usable virtual + // address space. + // + // Use arenaIndex to compute indexes into this array. + // + // For regions of the address space that are not backed by the + // Go heap, the arena map contains nil. + // + // Modifications are protected by mheap_.lock. Reads can be + // performed without locking; however, a given entry can + // transition from nil to non-nil at any time when the lock + // isn't held. (Entries never transitions back to nil.) + // + // In general, this is a two-level mapping consisting of an L1 + // map and possibly many L2 maps. This saves space when there + // are a huge number of arena frames. However, on many + // platforms (even 64-bit), arenaL1Bits is 0, making this + // effectively a single-level map. In this case, arenas[0] + // will never be nil. + arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena + + // arenasHugePages indicates whether arenas' L2 entries are eligible + // to be backed by huge pages. + arenasHugePages bool + + // heapArenaAlloc is pre-reserved space for allocating heapArena + // objects. This is only used on 32-bit, where we pre-reserve + // this space to avoid interleaving it with the heap itself. + heapArenaAlloc linearAlloc + + // arenaHints is a list of addresses at which to attempt to + // add more heap arenas. This is initially populated with a + // set of general hint addresses, and grown with the bounds of + // actual heap arena ranges. + arenaHints *arenaHint + + // arena is a pre-reserved space for allocating heap arenas + // (the actual arenas). This is only used on 32-bit. + arena linearAlloc + + // allArenas is the arenaIndex of every mapped arena. This can + // be used to iterate through the address space. + // + // Access is protected by mheap_.lock. However, since this is + // append-only and old backing arrays are never freed, it is + // safe to acquire mheap_.lock, copy the slice header, and + // then release mheap_.lock. + allArenas []arenaIdx + + // sweepArenas is a snapshot of allArenas taken at the + // beginning of the sweep cycle. This can be read safely by + // simply blocking GC (by disabling preemption). + sweepArenas []arenaIdx + + // markArenas is a snapshot of allArenas taken at the beginning + // of the mark cycle. Because allArenas is append-only, neither + // this slice nor its contents will change during the mark, so + // it can be read safely. + markArenas []arenaIdx + + // curArena is the arena that the heap is currently growing + // into. This should always be physPageSize-aligned. + curArena struct { + base, end uintptr + } + + // central free lists for small size classes. + // the padding makes sure that the mcentrals are + // spaced CacheLinePadSize bytes apart, so that each mcentral.lock + // gets its own cache line. + // central is indexed by spanClass. + central [numSpanClasses]struct { + mcentral mcentral + pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte + } + + spanalloc fixalloc // allocator for span* + cachealloc fixalloc // allocator for mcache* + specialfinalizeralloc fixalloc // allocator for specialfinalizer* + specialprofilealloc fixalloc // allocator for specialprofile* + specialReachableAlloc fixalloc // allocator for specialReachable + specialPinCounterAlloc fixalloc // allocator for specialPinCounter + speciallock mutex // lock for special record allocators. + arenaHintAlloc fixalloc // allocator for arenaHints + + // User arena state. + // + // Protected by mheap_.lock. + userArena struct { + // arenaHints is a list of addresses at which to attempt to + // add more heap arenas for user arena chunks. This is initially + // populated with a set of general hint addresses, and grown with + // the bounds of actual heap arena ranges. + arenaHints *arenaHint + + // quarantineList is a list of user arena spans that have been set to fault, but + // are waiting for all pointers into them to go away. Sweeping handles + // identifying when this is true, and moves the span to the ready list. + quarantineList mSpanList + + // readyList is a list of empty user arena spans that are ready for reuse. + readyList mSpanList + } + + unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF +} + +var mheap_ mheap + +// A heapArena stores metadata for a heap arena. heapArenas are stored +// outside of the Go heap and accessed via the mheap_.arenas index. +type heapArena struct { + _ sys.NotInHeap + + // heapArenaPtrScalar contains pointer/scalar data about the heap for this heap arena. + heapArenaPtrScalar + + // spans maps from virtual address page ID within this arena to *mspan. + // For allocated spans, their pages map to the span itself. + // For free spans, only the lowest and highest pages map to the span itself. + // Internal pages map to an arbitrary span. + // For pages that have never been allocated, spans entries are nil. + // + // Modifications are protected by mheap.lock. Reads can be + // performed without locking, but ONLY from indexes that are + // known to contain in-use or stack spans. This means there + // must not be a safe-point between establishing that an + // address is live and looking it up in the spans array. + spans [pagesPerArena]*mspan + + // pageInUse is a bitmap that indicates which spans are in + // state mSpanInUse. This bitmap is indexed by page number, + // but only the bit corresponding to the first page in each + // span is used. + // + // Reads and writes are atomic. + pageInUse [pagesPerArena / 8]uint8 + + // pageMarks is a bitmap that indicates which spans have any + // marked objects on them. Like pageInUse, only the bit + // corresponding to the first page in each span is used. + // + // Writes are done atomically during marking. Reads are + // non-atomic and lock-free since they only occur during + // sweeping (and hence never race with writes). + // + // This is used to quickly find whole spans that can be freed. + // + // TODO(austin): It would be nice if this was uint64 for + // faster scanning, but we don't have 64-bit atomic bit + // operations. + pageMarks [pagesPerArena / 8]uint8 + + // pageSpecials is a bitmap that indicates which spans have + // specials (finalizers or other). Like pageInUse, only the bit + // corresponding to the first page in each span is used. + // + // Writes are done atomically whenever a special is added to + // a span and whenever the last special is removed from a span. + // Reads are done atomically to find spans containing specials + // during marking. + pageSpecials [pagesPerArena / 8]uint8 + + // checkmarks stores the debug.gccheckmark state. It is only + // used if debug.gccheckmark > 0. + checkmarks *checkmarksMap + + // zeroedBase marks the first byte of the first page in this + // arena which hasn't been used yet and is therefore already + // zero. zeroedBase is relative to the arena base. + // Increases monotonically until it hits heapArenaBytes. + // + // This field is sufficient to determine if an allocation + // needs to be zeroed because the page allocator follows an + // address-ordered first-fit policy. + // + // Read atomically and written with an atomic CAS. + zeroedBase uintptr +} + +// arenaHint is a hint for where to grow the heap arenas. See +// mheap_.arenaHints. +type arenaHint struct { + _ sys.NotInHeap + addr uintptr + down bool + next *arenaHint +} + +// An mspan is a run of pages. +// +// When a mspan is in the heap free treap, state == mSpanFree +// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. +// If the mspan is in the heap scav treap, then in addition to the +// above scavenged == true. scavenged == false in all other cases. +// +// When a mspan is allocated, state == mSpanInUse or mSpanManual +// and heapmap(i) == span for all s->start <= i < s->start+s->npages. + +// Every mspan is in one doubly-linked list, either in the mheap's +// busy list or one of the mcentral's span lists. + +// An mspan representing actual memory has state mSpanInUse, +// mSpanManual, or mSpanFree. Transitions between these states are +// constrained as follows: +// +// - A span may transition from free to in-use or manual during any GC +// phase. +// +// - During sweeping (gcphase == _GCoff), a span may transition from +// in-use to free (as a result of sweeping) or manual to free (as a +// result of stacks being freed). +// +// - During GC (gcphase != _GCoff), a span *must not* transition from +// manual or in-use to free. Because concurrent GC may read a pointer +// and then look up its span, the span state must be monotonic. +// +// Setting mspan.state to mSpanInUse or mSpanManual must be done +// atomically and only after all other span fields are valid. +// Likewise, if inspecting a span is contingent on it being +// mSpanInUse, the state should be loaded atomically and checked +// before depending on other fields. This allows the garbage collector +// to safely deal with potentially invalid pointers, since resolving +// such pointers may race with a span being allocated. +type mSpanState uint8 + +const ( + mSpanDead mSpanState = iota + mSpanInUse // allocated for garbage collected heap + mSpanManual // allocated for manual management (e.g., stack allocator) +) + +// mSpanStateNames are the names of the span states, indexed by +// mSpanState. +var mSpanStateNames = []string{ + "mSpanDead", + "mSpanInUse", + "mSpanManual", +} + +// mSpanStateBox holds an atomic.Uint8 to provide atomic operations on +// an mSpanState. This is a separate type to disallow accidental comparison +// or assignment with mSpanState. +type mSpanStateBox struct { + s atomic.Uint8 +} + +// It is nosplit to match get, below. + +//go:nosplit +func (b *mSpanStateBox) set(s mSpanState) { + b.s.Store(uint8(s)) +} + +// It is nosplit because it's called indirectly by typedmemclr, +// which must not be preempted. + +//go:nosplit +func (b *mSpanStateBox) get() mSpanState { + return mSpanState(b.s.Load()) +} + +// mSpanList heads a linked list of spans. +type mSpanList struct { + _ sys.NotInHeap + first *mspan // first span in list, or nil if none + last *mspan // last span in list, or nil if none +} + +type mspan struct { + _ sys.NotInHeap + next *mspan // next span in list, or nil if none + prev *mspan // previous span in list, or nil if none + list *mSpanList // For debugging. + + startAddr uintptr // address of first byte of span aka s.base() + npages uintptr // number of pages in span + + manualFreeList gclinkptr // list of free objects in mSpanManual spans + + // freeindex is the slot index between 0 and nelems at which to begin scanning + // for the next free object in this span. + // Each allocation scans allocBits starting at freeindex until it encounters a 0 + // indicating a free object. freeindex is then adjusted so that subsequent scans begin + // just past the newly discovered free object. + // + // If freeindex == nelem, this span has no free objects. + // + // allocBits is a bitmap of objects in this span. + // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 + // then object n is free; + // otherwise, object n is allocated. Bits starting at nelem are + // undefined and should never be referenced. + // + // Object n starts at address n*elemsize + (start << pageShift). + freeindex uint16 + // TODO: Look up nelems from sizeclass and remove this field if it + // helps performance. + nelems uint16 // number of object in the span. + // freeIndexForScan is like freeindex, except that freeindex is + // used by the allocator whereas freeIndexForScan is used by the + // GC scanner. They are two fields so that the GC sees the object + // is allocated only when the object and the heap bits are + // initialized (see also the assignment of freeIndexForScan in + // mallocgc, and issue 54596). + freeIndexForScan uint16 + + // Cache of the allocBits at freeindex. allocCache is shifted + // such that the lowest bit corresponds to the bit freeindex. + // allocCache holds the complement of allocBits, thus allowing + // ctz (count trailing zero) to use it directly. + // allocCache may contain bits beyond s.nelems; the caller must ignore + // these. + allocCache uint64 + + // allocBits and gcmarkBits hold pointers to a span's mark and + // allocation bits. The pointers are 8 byte aligned. + // There are three arenas where this data is held. + // free: Dirty arenas that are no longer accessed + // and can be reused. + // next: Holds information to be used in the next GC cycle. + // current: Information being used during this GC cycle. + // previous: Information being used during the last GC cycle. + // A new GC cycle starts with the call to finishsweep_m. + // finishsweep_m moves the previous arena to the free arena, + // the current arena to the previous arena, and + // the next arena to the current arena. + // The next arena is populated as the spans request + // memory to hold gcmarkBits for the next GC cycle as well + // as allocBits for newly allocated spans. + // + // The pointer arithmetic is done "by hand" instead of using + // arrays to avoid bounds checks along critical performance + // paths. + // The sweep will free the old allocBits and set allocBits to the + // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed + // out memory. + allocBits *gcBits + gcmarkBits *gcBits + pinnerBits *gcBits // bitmap for pinned objects; accessed atomically + + // sweep generation: + // if sweepgen == h->sweepgen - 2, the span needs sweeping + // if sweepgen == h->sweepgen - 1, the span is currently being swept + // if sweepgen == h->sweepgen, the span is swept and ready to use + // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping + // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached + // h->sweepgen is incremented by 2 after every GC + + sweepgen uint32 + divMul uint32 // for divide by elemsize + allocCount uint16 // number of allocated objects + spanclass spanClass // size class and noscan (uint8) + state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods) + needzero uint8 // needs to be zeroed before allocation + isUserArenaChunk bool // whether or not this span represents a user arena + allocCountBeforeCache uint16 // a copy of allocCount that is stored just before this span is cached + elemsize uintptr // computed from sizeclass or from npages + limit uintptr // end of data in span + speciallock mutex // guards specials list and changes to pinnerBits + specials *special // linked list of special records sorted by offset. + userArenaChunkFree addrRange // interval for managing chunk allocation + largeType *_type // malloc header for large objects. +} + +func (s *mspan) base() uintptr { + return s.startAddr +} + +func (s *mspan) layout() (size, n, total uintptr) { + total = s.npages << _PageShift + size = s.elemsize + if size > 0 { + n = total / size + } + return +} + +// recordspan adds a newly allocated span to h.allspans. +// +// This only happens the first time a span is allocated from +// mheap.spanalloc (it is not called when a span is reused). +// +// Write barriers are disallowed here because it can be called from +// gcWork when allocating new workbufs. However, because it's an +// indirect call from the fixalloc initializer, the compiler can't see +// this. +// +// The heap lock must be held. +// +//go:nowritebarrierrec +func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { + h := (*mheap)(vh) + s := (*mspan)(p) + + assertLockHeld(&h.lock) + + if len(h.allspans) >= cap(h.allspans) { + n := 64 * 1024 / goarch.PtrSize + if n < cap(h.allspans)*3/2 { + n = cap(h.allspans) * 3 / 2 + } + var new []*mspan + sp := (*slice)(unsafe.Pointer(&new)) + sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys) + if sp.array == nil { + throw("runtime: cannot allocate memory") + } + sp.len = len(h.allspans) + sp.cap = n + if len(h.allspans) > 0 { + copy(new, h.allspans) + } + oldAllspans := h.allspans + *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new)) + if len(oldAllspans) != 0 { + sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys) + } + } + h.allspans = h.allspans[:len(h.allspans)+1] + h.allspans[len(h.allspans)-1] = s +} + +// A spanClass represents the size class and noscan-ness of a span. +// +// Each size class has a noscan spanClass and a scan spanClass. The +// noscan spanClass contains only noscan objects, which do not contain +// pointers and thus do not need to be scanned by the garbage +// collector. +type spanClass uint8 + +const ( + numSpanClasses = _NumSizeClasses << 1 + tinySpanClass = spanClass(tinySizeClass<<1 | 1) +) + +func makeSpanClass(sizeclass uint8, noscan bool) spanClass { + return spanClass(sizeclass<<1) | spanClass(bool2int(noscan)) +} + +//go:nosplit +func (sc spanClass) sizeclass() int8 { + return int8(sc >> 1) +} + +//go:nosplit +func (sc spanClass) noscan() bool { + return sc&1 != 0 +} + +// arenaIndex returns the index into mheap_.arenas of the arena +// containing metadata for p. This index combines of an index into the +// L1 map and an index into the L2 map and should be used as +// mheap_.arenas[ai.l1()][ai.l2()]. +// +// If p is outside the range of valid heap addresses, either l1() or +// l2() will be out of bounds. +// +// It is nosplit because it's called by spanOf and several other +// nosplit functions. +// +//go:nosplit +func arenaIndex(p uintptr) arenaIdx { + return arenaIdx((p - arenaBaseOffset) / heapArenaBytes) +} + +// arenaBase returns the low address of the region covered by heap +// arena i. +func arenaBase(i arenaIdx) uintptr { + return uintptr(i)*heapArenaBytes + arenaBaseOffset +} + +type arenaIdx uint + +// l1 returns the "l1" portion of an arenaIdx. +// +// Marked nosplit because it's called by spanOf and other nosplit +// functions. +// +//go:nosplit +func (i arenaIdx) l1() uint { + if arenaL1Bits == 0 { + // Let the compiler optimize this away if there's no + // L1 map. + return 0 + } else { + return uint(i) >> arenaL1Shift + } +} + +// l2 returns the "l2" portion of an arenaIdx. +// +// Marked nosplit because it's called by spanOf and other nosplit funcs. +// functions. +// +//go:nosplit +func (i arenaIdx) l2() uint { + if arenaL1Bits == 0 { + return uint(i) + } else { + return uint(i) & (1<= uint(len(mheap_.arenas[0])) { + return nil + } + } else { + // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't. + if ri.l1() >= uint(len(mheap_.arenas)) { + return nil + } + } + l2 := mheap_.arenas[ri.l1()] + if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1. + return nil + } + ha := l2[ri.l2()] + if ha == nil { + return nil + } + return ha.spans[(p/pageSize)%pagesPerArena] +} + +// spanOfUnchecked is equivalent to spanOf, but the caller must ensure +// that p points into an allocated heap arena. +// +// Must be nosplit because it has callers that are nosplit. +// +//go:nosplit +func spanOfUnchecked(p uintptr) *mspan { + ai := arenaIndex(p) + return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] +} + +// spanOfHeap is like spanOf, but returns nil if p does not point to a +// heap object. +// +// Must be nosplit because it has callers that are nosplit. +// +//go:nosplit +func spanOfHeap(p uintptr) *mspan { + s := spanOf(p) + // s is nil if it's never been allocated. Otherwise, we check + // its state first because we don't trust this pointer, so we + // have to synchronize with span initialization. Then, it's + // still possible we picked up a stale span pointer, so we + // have to check the span's bounds. + if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit { + return nil + } + return s +} + +// pageIndexOf returns the arena, page index, and page mask for pointer p. +// The caller must ensure p is in the heap. +func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) { + ai := arenaIndex(p) + arena = mheap_.arenas[ai.l1()][ai.l2()] + pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse)) + pageMask = byte(1 << ((p / pageSize) % 8)) + return +} + +// Initialize the heap. +func (h *mheap) init() { + lockInit(&h.lock, lockRankMheap) + lockInit(&h.speciallock, lockRankMheapSpecial) + + h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) + h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) + h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) + h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) + h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys) + h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys) + h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys) + + // Don't zero mspan allocations. Background sweeping can + // inspect a span concurrently with allocating it, so it's + // important that the span's sweepgen survive across freeing + // and re-allocating a span to prevent background sweeping + // from improperly cas'ing it from 0. + // + // This is safe because mspan contains no heap pointers. + h.spanalloc.zero = false + + // h->mapcache needs no init + + for i := range h.central { + h.central[i].mcentral.init(spanClass(i)) + } + + h.pages.init(&h.lock, &memstats.gcMiscSys, false) +} + +// reclaim sweeps and reclaims at least npage pages into the heap. +// It is called before allocating npage pages to keep growth in check. +// +// reclaim implements the page-reclaimer half of the sweeper. +// +// h.lock must NOT be held. +func (h *mheap) reclaim(npage uintptr) { + // TODO(austin): Half of the time spent freeing spans is in + // locking/unlocking the heap (even with low contention). We + // could make the slow path here several times faster by + // batching heap frees. + + // Bail early if there's no more reclaim work. + if h.reclaimIndex.Load() >= 1<<63 { + return + } + + // Disable preemption so the GC can't start while we're + // sweeping, so we can read h.sweepArenas, and so + // traceGCSweepStart/Done pair on the P. + mp := acquirem() + + trace := traceAcquire() + if trace.ok() { + trace.GCSweepStart() + traceRelease(trace) + } + + arenas := h.sweepArenas + locked := false + for npage > 0 { + // Pull from accumulated credit first. + if credit := h.reclaimCredit.Load(); credit > 0 { + take := credit + if take > npage { + // Take only what we need. + take = npage + } + if h.reclaimCredit.CompareAndSwap(credit, credit-take) { + npage -= take + } + continue + } + + // Claim a chunk of work. + idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk) + if idx/pagesPerArena >= uintptr(len(arenas)) { + // Page reclaiming is done. + h.reclaimIndex.Store(1 << 63) + break + } + + if !locked { + // Lock the heap for reclaimChunk. + lock(&h.lock) + locked = true + } + + // Scan this chunk. + nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk) + if nfound <= npage { + npage -= nfound + } else { + // Put spare pages toward global credit. + h.reclaimCredit.Add(nfound - npage) + npage = 0 + } + } + if locked { + unlock(&h.lock) + } + + trace = traceAcquire() + if trace.ok() { + trace.GCSweepDone() + traceRelease(trace) + } + releasem(mp) +} + +// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n). +// It returns the number of pages returned to the heap. +// +// h.lock must be held and the caller must be non-preemptible. Note: h.lock may be +// temporarily unlocked and re-locked in order to do sweeping or if tracing is +// enabled. +func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { + // The heap lock must be held because this accesses the + // heapArena.spans arrays using potentially non-live pointers. + // In particular, if a span were freed and merged concurrently + // with this probing heapArena.spans, it would be possible to + // observe arbitrary, stale span pointers. + assertLockHeld(&h.lock) + + n0 := n + var nFreed uintptr + sl := sweep.active.begin() + if !sl.valid { + return 0 + } + for n > 0 { + ai := arenas[pageIdx/pagesPerArena] + ha := h.arenas[ai.l1()][ai.l2()] + + // Get a chunk of the bitmap to work on. + arenaPage := uint(pageIdx % pagesPerArena) + inUse := ha.pageInUse[arenaPage/8:] + marked := ha.pageMarks[arenaPage/8:] + if uintptr(len(inUse)) > n/8 { + inUse = inUse[:n/8] + marked = marked[:n/8] + } + + // Scan this bitmap chunk for spans that are in-use + // but have no marked objects on them. + for i := range inUse { + inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i] + if inUseUnmarked == 0 { + continue + } + + for j := uint(0); j < 8; j++ { + if inUseUnmarked&(1< 0 { + ai := arenaIndex(base) + ha := h.arenas[ai.l1()][ai.l2()] + + zeroedBase := atomic.Loaduintptr(&ha.zeroedBase) + arenaBase := base % heapArenaBytes + if arenaBase < zeroedBase { + // We extended into the non-zeroed part of the + // arena, so this region needs to be zeroed before use. + // + // zeroedBase is monotonically increasing, so if we see this now then + // we can be sure we need to zero this memory region. + // + // We still need to update zeroedBase for this arena, and + // potentially more arenas. + needZero = true + } + // We may observe arenaBase > zeroedBase if we're racing with one or more + // allocations which are acquiring memory directly before us in the address + // space. But, because we know no one else is acquiring *this* memory, it's + // still safe to not zero. + + // Compute how far into the arena we extend into, capped + // at heapArenaBytes. + arenaLimit := arenaBase + npage*pageSize + if arenaLimit > heapArenaBytes { + arenaLimit = heapArenaBytes + } + // Increase ha.zeroedBase so it's >= arenaLimit. + // We may be racing with other updates. + for arenaLimit > zeroedBase { + if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) { + break + } + zeroedBase = atomic.Loaduintptr(&ha.zeroedBase) + // Double check basic conditions of zeroedBase. + if zeroedBase <= arenaLimit && zeroedBase > arenaBase { + // The zeroedBase moved into the space we were trying to + // claim. That's very bad, and indicates someone allocated + // the same region we did. + throw("potentially overlapping in-use allocations detected") + } + } + + // Move base forward and subtract from npage to move into + // the next arena, or finish. + base += arenaLimit - arenaBase + npage -= (arenaLimit - arenaBase) / pageSize + } + return +} + +// tryAllocMSpan attempts to allocate an mspan object from +// the P-local cache, but may fail. +// +// h.lock need not be held. +// +// This caller must ensure that its P won't change underneath +// it during this function. Currently to ensure that we enforce +// that the function is run on the system stack, because that's +// the only place it is used now. In the future, this requirement +// may be relaxed if its use is necessary elsewhere. +// +//go:systemstack +func (h *mheap) tryAllocMSpan() *mspan { + pp := getg().m.p.ptr() + // If we don't have a p or the cache is empty, we can't do + // anything here. + if pp == nil || pp.mspancache.len == 0 { + return nil + } + // Pull off the last entry in the cache. + s := pp.mspancache.buf[pp.mspancache.len-1] + pp.mspancache.len-- + return s +} + +// allocMSpanLocked allocates an mspan object. +// +// h.lock must be held. +// +// allocMSpanLocked must be called on the system stack because +// its caller holds the heap lock. See mheap for details. +// Running on the system stack also ensures that we won't +// switch Ps during this function. See tryAllocMSpan for details. +// +//go:systemstack +func (h *mheap) allocMSpanLocked() *mspan { + assertLockHeld(&h.lock) + + pp := getg().m.p.ptr() + if pp == nil { + // We don't have a p so just do the normal thing. + return (*mspan)(h.spanalloc.alloc()) + } + // Refill the cache if necessary. + if pp.mspancache.len == 0 { + const refillCount = len(pp.mspancache.buf) / 2 + for i := 0; i < refillCount; i++ { + pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc()) + } + pp.mspancache.len = refillCount + } + // Pull off the last entry in the cache. + s := pp.mspancache.buf[pp.mspancache.len-1] + pp.mspancache.len-- + return s +} + +// freeMSpanLocked free an mspan object. +// +// h.lock must be held. +// +// freeMSpanLocked must be called on the system stack because +// its caller holds the heap lock. See mheap for details. +// Running on the system stack also ensures that we won't +// switch Ps during this function. See tryAllocMSpan for details. +// +//go:systemstack +func (h *mheap) freeMSpanLocked(s *mspan) { + assertLockHeld(&h.lock) + + pp := getg().m.p.ptr() + // First try to free the mspan directly to the cache. + if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) { + pp.mspancache.buf[pp.mspancache.len] = s + pp.mspancache.len++ + return + } + // Failing that (or if we don't have a p), just free it to + // the heap. + h.spanalloc.free(unsafe.Pointer(s)) +} + +// allocSpan allocates an mspan which owns npages worth of memory. +// +// If typ.manual() == false, allocSpan allocates a heap span of class spanclass +// and updates heap accounting. If manual == true, allocSpan allocates a +// manually-managed span (spanclass is ignored), and the caller is +// responsible for any accounting related to its use of the span. Either +// way, allocSpan will atomically add the bytes in the newly allocated +// span to *sysStat. +// +// The returned span is fully initialized. +// +// h.lock must not be held. +// +// allocSpan must be called on the system stack both because it acquires +// the heap lock and because it must block GC transitions. +// +//go:systemstack +func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) { + // Function-global state. + gp := getg() + base, scav := uintptr(0), uintptr(0) + growth := uintptr(0) + + // On some platforms we need to provide physical page aligned stack + // allocations. Where the page size is less than the physical page + // size, we already manage to do this by default. + needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize + + // If the allocation is small enough, try the page cache! + // The page cache does not support aligned allocations, so we cannot use + // it if we need to provide a physical page aligned stack allocation. + pp := gp.m.p.ptr() + if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 { + c := &pp.pcache + + // If the cache is empty, refill it. + if c.empty() { + lock(&h.lock) + *c = h.pages.allocToCache() + unlock(&h.lock) + } + + // Try to allocate from the cache. + base, scav = c.alloc(npages) + if base != 0 { + s = h.tryAllocMSpan() + if s != nil { + goto HaveSpan + } + // We have a base but no mspan, so we need + // to lock the heap. + } + } + + // For one reason or another, we couldn't get the + // whole job done without the heap lock. + lock(&h.lock) + + if needPhysPageAlign { + // Overallocate by a physical page to allow for later alignment. + extraPages := physPageSize / pageSize + + // Find a big enough region first, but then only allocate the + // aligned portion. We can't just allocate and then free the + // edges because we need to account for scavenged memory, and + // that's difficult with alloc. + // + // Note that we skip updates to searchAddr here. It's OK if + // it's stale and higher than normal; it'll operate correctly, + // just come with a performance cost. + base, _ = h.pages.find(npages + extraPages) + if base == 0 { + var ok bool + growth, ok = h.grow(npages + extraPages) + if !ok { + unlock(&h.lock) + return nil + } + base, _ = h.pages.find(npages + extraPages) + if base == 0 { + throw("grew heap, but no adequate free space found") + } + } + base = alignUp(base, physPageSize) + scav = h.pages.allocRange(base, npages) + } + + if base == 0 { + // Try to acquire a base address. + base, scav = h.pages.alloc(npages) + if base == 0 { + var ok bool + growth, ok = h.grow(npages) + if !ok { + unlock(&h.lock) + return nil + } + base, scav = h.pages.alloc(npages) + if base == 0 { + throw("grew heap, but no adequate free space found") + } + } + } + if s == nil { + // We failed to get an mspan earlier, so grab + // one now that we have the heap lock. + s = h.allocMSpanLocked() + } + unlock(&h.lock) + +HaveSpan: + // Decide if we need to scavenge in response to what we just allocated. + // Specifically, we track the maximum amount of memory to scavenge of all + // the alternatives below, assuming that the maximum satisfies *all* + // conditions we check (e.g. if we need to scavenge X to satisfy the + // memory limit and Y to satisfy heap-growth scavenging, and Y > X, then + // it's fine to pick Y, because the memory limit is still satisfied). + // + // It's fine to do this after allocating because we expect any scavenged + // pages not to get touched until we return. Simultaneously, it's important + // to do this before calling sysUsed because that may commit address space. + bytesToScavenge := uintptr(0) + forceScavenge := false + if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() { + // Assist with scavenging to maintain the memory limit by the amount + // that we expect to page in. + inuse := gcController.mappedReady.Load() + // Be careful about overflow, especially with uintptrs. Even on 32-bit platforms + // someone can set a really big memory limit that isn't maxInt64. + if uint64(scav)+inuse > uint64(limit) { + bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit)) + forceScavenge = true + } + } + if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 { + // We just caused a heap growth, so scavenge down what will soon be used. + // By scavenging inline we deal with the failure to allocate out of + // memory fragments by scavenging the memory fragments that are least + // likely to be re-used. + // + // Only bother with this because we're not using a memory limit. We don't + // care about heap growths as long as we're under the memory limit, and the + // previous check for scaving already handles that. + if retained := heapRetained(); retained+uint64(growth) > goal { + // The scavenging algorithm requires the heap lock to be dropped so it + // can acquire it only sparingly. This is a potentially expensive operation + // so it frees up other goroutines to allocate in the meanwhile. In fact, + // they can make use of the growth we just created. + todo := growth + if overage := uintptr(retained + uint64(growth) - goal); todo > overage { + todo = overage + } + if todo > bytesToScavenge { + bytesToScavenge = todo + } + } + } + // There are a few very limited circumstances where we won't have a P here. + // It's OK to simply skip scavenging in these cases. Something else will notice + // and pick up the tab. + var now int64 + if pp != nil && bytesToScavenge > 0 { + // Measure how long we spent scavenging and add that measurement to the assist + // time so we can track it for the GC CPU limiter. + // + // Limiter event tracking might be disabled if we end up here + // while on a mark worker. + start := nanotime() + track := pp.limiterEvent.start(limiterEventScavengeAssist, start) + + // Scavenge, but back out if the limiter turns on. + released := h.pages.scavenge(bytesToScavenge, func() bool { + return gcCPULimiter.limiting() + }, forceScavenge) + + mheap_.pages.scav.releasedEager.Add(released) + + // Finish up accounting. + now = nanotime() + if track { + pp.limiterEvent.stop(limiterEventScavengeAssist, now) + } + scavenge.assistTime.Add(now - start) + } + + // Initialize the span. + h.initSpan(s, typ, spanclass, base, npages) + + // Commit and account for any scavenged memory that the span now owns. + nbytes := npages * pageSize + if scav != 0 { + // sysUsed all the pages that are actually available + // in the span since some of them might be scavenged. + sysUsed(unsafe.Pointer(base), nbytes, scav) + gcController.heapReleased.add(-int64(scav)) + } + // Update stats. + gcController.heapFree.add(-int64(nbytes - scav)) + if typ == spanAllocHeap { + gcController.heapInUse.add(int64(nbytes)) + } + // Update consistent stats. + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.committed, int64(scav)) + atomic.Xaddint64(&stats.released, -int64(scav)) + switch typ { + case spanAllocHeap: + atomic.Xaddint64(&stats.inHeap, int64(nbytes)) + case spanAllocStack: + atomic.Xaddint64(&stats.inStacks, int64(nbytes)) + case spanAllocPtrScalarBits: + atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes)) + case spanAllocWorkBuf: + atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes)) + } + memstats.heapStats.release() + + pageTraceAlloc(pp, now, base, npages) + return s +} + +// initSpan initializes a blank span s which will represent the range +// [base, base+npages*pageSize). typ is the type of span being allocated. +func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { + // At this point, both s != nil and base != 0, and the heap + // lock is no longer held. Initialize the span. + s.init(base, npages) + if h.allocNeedsZero(base, npages) { + s.needzero = 1 + } + nbytes := npages * pageSize + if typ.manual() { + s.manualFreeList = 0 + s.nelems = 0 + s.limit = s.base() + s.npages*pageSize + s.state.set(mSpanManual) + } else { + // We must set span properties before the span is published anywhere + // since we're not holding the heap lock. + s.spanclass = spanclass + if sizeclass := spanclass.sizeclass(); sizeclass == 0 { + s.elemsize = nbytes + s.nelems = 1 + s.divMul = 0 + } else { + s.elemsize = uintptr(class_to_size[sizeclass]) + if goexperiment.AllocHeaders && !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) { + // In the allocheaders experiment, reserve space for the pointer/scan bitmap at the end. + s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize) + } else { + s.nelems = uint16(nbytes / s.elemsize) + } + s.divMul = class_to_divmagic[sizeclass] + } + + // Initialize mark and allocation structures. + s.freeindex = 0 + s.freeIndexForScan = 0 + s.allocCache = ^uint64(0) // all 1s indicating all free. + s.gcmarkBits = newMarkBits(uintptr(s.nelems)) + s.allocBits = newAllocBits(uintptr(s.nelems)) + + // It's safe to access h.sweepgen without the heap lock because it's + // only ever updated with the world stopped and we run on the + // systemstack which blocks a STW transition. + atomic.Store(&s.sweepgen, h.sweepgen) + + // Now that the span is filled in, set its state. This + // is a publication barrier for the other fields in + // the span. While valid pointers into this span + // should never be visible until the span is returned, + // if the garbage collector finds an invalid pointer, + // access to the span may race with initialization of + // the span. We resolve this race by atomically + // setting the state after the span is fully + // initialized, and atomically checking the state in + // any situation where a pointer is suspect. + s.state.set(mSpanInUse) + } + + // Publish the span in various locations. + + // This is safe to call without the lock held because the slots + // related to this span will only ever be read or modified by + // this thread until pointers into the span are published (and + // we execute a publication barrier at the end of this function + // before that happens) or pageInUse is updated. + h.setSpans(s.base(), npages, s) + + if !typ.manual() { + // Mark in-use span in arena page bitmap. + // + // This publishes the span to the page sweeper, so + // it's imperative that the span be completely initialized + // prior to this line. + arena, pageIdx, pageMask := pageIndexOf(s.base()) + atomic.Or8(&arena.pageInUse[pageIdx], pageMask) + + // Update related page sweeper stats. + h.pagesInUse.Add(npages) + } + + // Make sure the newly allocated span will be observed + // by the GC before pointers into the span are published. + publicationBarrier() +} + +// Try to add at least npage pages of memory to the heap, +// returning how much the heap grew by and whether it worked. +// +// h.lock must be held. +func (h *mheap) grow(npage uintptr) (uintptr, bool) { + assertLockHeld(&h.lock) + + // We must grow the heap in whole palloc chunks. + // We call sysMap below but note that because we + // round up to pallocChunkPages which is on the order + // of MiB (generally >= to the huge page size) we + // won't be calling it too much. + ask := alignUp(npage, pallocChunkPages) * pageSize + + totalGrowth := uintptr(0) + // This may overflow because ask could be very large + // and is otherwise unrelated to h.curArena.base. + end := h.curArena.base + ask + nBase := alignUp(end, physPageSize) + if nBase > h.curArena.end || /* overflow */ end < h.curArena.base { + // Not enough room in the current arena. Allocate more + // arena space. This may not be contiguous with the + // current arena, so we have to request the full ask. + av, asize := h.sysAlloc(ask, &h.arenaHints, true) + if av == nil { + inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load() + print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n") + return 0, false + } + + if uintptr(av) == h.curArena.end { + // The new space is contiguous with the old + // space, so just extend the current space. + h.curArena.end = uintptr(av) + asize + } else { + // The new space is discontiguous. Track what + // remains of the current space and switch to + // the new space. This should be rare. + if size := h.curArena.end - h.curArena.base; size != 0 { + // Transition this space from Reserved to Prepared and mark it + // as released since we'll be able to start using it after updating + // the page allocator and releasing the lock at any time. + sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased) + // Update stats. + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.released, int64(size)) + memstats.heapStats.release() + // Update the page allocator's structures to make this + // space ready for allocation. + h.pages.grow(h.curArena.base, size) + totalGrowth += size + } + // Switch to the new space. + h.curArena.base = uintptr(av) + h.curArena.end = uintptr(av) + asize + } + + // Recalculate nBase. + // We know this won't overflow, because sysAlloc returned + // a valid region starting at h.curArena.base which is at + // least ask bytes in size. + nBase = alignUp(h.curArena.base+ask, physPageSize) + } + + // Grow into the current arena. + v := h.curArena.base + h.curArena.base = nBase + + // Transition the space we're going to use from Reserved to Prepared. + // + // The allocation is always aligned to the heap arena + // size which is always > physPageSize, so its safe to + // just add directly to heapReleased. + sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased) + + // The memory just allocated counts as both released + // and idle, even though it's not yet backed by spans. + stats := memstats.heapStats.acquire() + atomic.Xaddint64(&stats.released, int64(nBase-v)) + memstats.heapStats.release() + + // Update the page allocator's structures to make this + // space ready for allocation. + h.pages.grow(v, nBase-v) + totalGrowth += nBase - v + return totalGrowth, true +} + +// Free the span back into the heap. +func (h *mheap) freeSpan(s *mspan) { + systemstack(func() { + pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages) + + lock(&h.lock) + if msanenabled { + // Tell msan that this entire span is no longer in use. + base := unsafe.Pointer(s.base()) + bytes := s.npages << _PageShift + msanfree(base, bytes) + } + if asanenabled { + // Tell asan that this entire span is no longer in use. + base := unsafe.Pointer(s.base()) + bytes := s.npages << _PageShift + asanpoison(base, bytes) + } + h.freeSpanLocked(s, spanAllocHeap) + unlock(&h.lock) + }) +} + +// freeManual frees a manually-managed span returned by allocManual. +// typ must be the same as the spanAllocType passed to the allocManual that +// allocated s. +// +// This must only be called when gcphase == _GCoff. See mSpanState for +// an explanation. +// +// freeManual must be called on the system stack because it acquires +// the heap lock. See mheap for details. +// +//go:systemstack +func (h *mheap) freeManual(s *mspan, typ spanAllocType) { + pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages) + + s.needzero = 1 + lock(&h.lock) + h.freeSpanLocked(s, typ) + unlock(&h.lock) +} + +func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { + assertLockHeld(&h.lock) + + switch s.state.get() { + case mSpanManual: + if s.allocCount != 0 { + throw("mheap.freeSpanLocked - invalid stack free") + } + case mSpanInUse: + if s.isUserArenaChunk { + throw("mheap.freeSpanLocked - invalid free of user arena chunk") + } + if s.allocCount != 0 || s.sweepgen != h.sweepgen { + print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") + throw("mheap.freeSpanLocked - invalid free") + } + h.pagesInUse.Add(-s.npages) + + // Clear in-use bit in arena page bitmap. + arena, pageIdx, pageMask := pageIndexOf(s.base()) + atomic.And8(&arena.pageInUse[pageIdx], ^pageMask) + default: + throw("mheap.freeSpanLocked - invalid span state") + } + + // Update stats. + // + // Mirrors the code in allocSpan. + nbytes := s.npages * pageSize + gcController.heapFree.add(int64(nbytes)) + if typ == spanAllocHeap { + gcController.heapInUse.add(-int64(nbytes)) + } + // Update consistent stats. + stats := memstats.heapStats.acquire() + switch typ { + case spanAllocHeap: + atomic.Xaddint64(&stats.inHeap, -int64(nbytes)) + case spanAllocStack: + atomic.Xaddint64(&stats.inStacks, -int64(nbytes)) + case spanAllocPtrScalarBits: + atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes)) + case spanAllocWorkBuf: + atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes)) + } + memstats.heapStats.release() + + // Mark the space as free. + h.pages.free(s.base(), s.npages) + + // Free the span structure. We no longer have a use for it. + s.state.set(mSpanDead) + h.freeMSpanLocked(s) +} + +// scavengeAll acquires the heap lock (blocking any additional +// manipulation of the page allocator) and iterates over the whole +// heap, scavenging every free page available. +// +// Must run on the system stack because it acquires the heap lock. +// +//go:systemstack +func (h *mheap) scavengeAll() { + // Disallow malloc or panic while holding the heap lock. We do + // this here because this is a non-mallocgc entry-point to + // the mheap API. + gp := getg() + gp.m.mallocing++ + + // Force scavenge everything. + released := h.pages.scavenge(^uintptr(0), nil, true) + + gp.m.mallocing-- + + if debug.scavtrace > 0 { + printScavTrace(0, released, true) + } +} + +//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory +func runtime_debug_freeOSMemory() { + GC() + systemstack(func() { mheap_.scavengeAll() }) +} + +// Initialize a new span with the given start and npages. +func (span *mspan) init(base uintptr, npages uintptr) { + // span is *not* zeroed. + span.next = nil + span.prev = nil + span.list = nil + span.startAddr = base + span.npages = npages + span.allocCount = 0 + span.spanclass = 0 + span.elemsize = 0 + span.speciallock.key = 0 + span.specials = nil + span.needzero = 0 + span.freeindex = 0 + span.freeIndexForScan = 0 + span.allocBits = nil + span.gcmarkBits = nil + span.pinnerBits = nil + span.state.set(mSpanDead) + lockInit(&span.speciallock, lockRankMspanSpecial) +} + +func (span *mspan) inList() bool { + return span.list != nil +} + +// Initialize an empty doubly-linked list. +func (list *mSpanList) init() { + list.first = nil + list.last = nil +} + +func (list *mSpanList) remove(span *mspan) { + if span.list != list { + print("runtime: failed mSpanList.remove span.npages=", span.npages, + " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n") + throw("mSpanList.remove") + } + if list.first == span { + list.first = span.next + } else { + span.prev.next = span.next + } + if list.last == span { + list.last = span.prev + } else { + span.next.prev = span.prev + } + span.next = nil + span.prev = nil + span.list = nil +} + +func (list *mSpanList) isEmpty() bool { + return list.first == nil +} + +func (list *mSpanList) insert(span *mspan) { + if span.next != nil || span.prev != nil || span.list != nil { + println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list) + throw("mSpanList.insert") + } + span.next = list.first + if list.first != nil { + // The list contains at least one span; link it in. + // The last span in the list doesn't change. + list.first.prev = span + } else { + // The list contains no spans, so this is also the last span. + list.last = span + } + list.first = span + span.list = list +} + +func (list *mSpanList) insertBack(span *mspan) { + if span.next != nil || span.prev != nil || span.list != nil { + println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list) + throw("mSpanList.insertBack") + } + span.prev = list.last + if list.last != nil { + // The list contains at least one span. + list.last.next = span + } else { + // The list contains no spans, so this is also the first span. + list.first = span + } + list.last = span + span.list = list +} + +// takeAll removes all spans from other and inserts them at the front +// of list. +func (list *mSpanList) takeAll(other *mSpanList) { + if other.isEmpty() { + return + } + + // Reparent everything in other to list. + for s := other.first; s != nil; s = s.next { + s.list = list + } + + // Concatenate the lists. + if list.isEmpty() { + *list = *other + } else { + // Neither list is empty. Put other before list. + other.last.next = list.first + list.first.prev = other.last + list.first = other.first + } + + other.first, other.last = nil, nil +} + +const ( + _KindSpecialFinalizer = 1 + _KindSpecialProfile = 2 + // _KindSpecialReachable is a special used for tracking + // reachability during testing. + _KindSpecialReachable = 3 + // _KindSpecialPinCounter is a special used for objects that are pinned + // multiple times + _KindSpecialPinCounter = 4 + // Note: The finalizer special must be first because if we're freeing + // an object, a finalizer special will cause the freeing operation + // to abort, and we want to keep the other special records around + // if that happens. +) + +type special struct { + _ sys.NotInHeap + next *special // linked list in span + offset uint16 // span offset of object + kind byte // kind of special +} + +// spanHasSpecials marks a span as having specials in the arena bitmap. +func spanHasSpecials(s *mspan) { + arenaPage := (s.base() / pageSize) % pagesPerArena + ai := arenaIndex(s.base()) + ha := mheap_.arenas[ai.l1()][ai.l2()] + atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8)) +} + +// spanHasNoSpecials marks a span as having no specials in the arena bitmap. +func spanHasNoSpecials(s *mspan) { + arenaPage := (s.base() / pageSize) % pagesPerArena + ai := arenaIndex(s.base()) + ha := mheap_.arenas[ai.l1()][ai.l2()] + atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8))) +} + +// Adds the special record s to the list of special records for +// the object p. All fields of s should be filled in except for +// offset & next, which this routine will fill in. +// Returns true if the special was successfully added, false otherwise. +// (The add will fail only if a record with the same p and s->kind +// already exists.) +func addspecial(p unsafe.Pointer, s *special) bool { + span := spanOfHeap(uintptr(p)) + if span == nil { + throw("addspecial on invalid pointer") + } + + // Ensure that the span is swept. + // Sweeping accesses the specials list w/o locks, so we have + // to synchronize with it. And it's just much safer. + mp := acquirem() + span.ensureSwept() + + offset := uintptr(p) - span.base() + kind := s.kind + + lock(&span.speciallock) + + // Find splice point, check for existing record. + iter, exists := span.specialFindSplicePoint(offset, kind) + if !exists { + // Splice in record, fill in offset. + s.offset = uint16(offset) + s.next = *iter + *iter = s + spanHasSpecials(span) + } + + unlock(&span.speciallock) + releasem(mp) + return !exists // already exists +} + +// Removes the Special record of the given kind for the object p. +// Returns the record if the record existed, nil otherwise. +// The caller must FixAlloc_Free the result. +func removespecial(p unsafe.Pointer, kind uint8) *special { + span := spanOfHeap(uintptr(p)) + if span == nil { + throw("removespecial on invalid pointer") + } + + // Ensure that the span is swept. + // Sweeping accesses the specials list w/o locks, so we have + // to synchronize with it. And it's just much safer. + mp := acquirem() + span.ensureSwept() + + offset := uintptr(p) - span.base() + + var result *special + lock(&span.speciallock) + + iter, exists := span.specialFindSplicePoint(offset, kind) + if exists { + s := *iter + *iter = s.next + result = s + } + if span.specials == nil { + spanHasNoSpecials(span) + } + unlock(&span.speciallock) + releasem(mp) + return result +} + +// Find a splice point in the sorted list and check for an already existing +// record. Returns a pointer to the next-reference in the list predecessor. +// Returns true, if the referenced item is an exact match. +func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) { + // Find splice point, check for existing record. + iter := &span.specials + found := false + for { + s := *iter + if s == nil { + break + } + if offset == uintptr(s.offset) && kind == s.kind { + found = true + break + } + if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) { + break + } + iter = &s.next + } + return iter, found +} + +// The described object has a finalizer set for it. +// +// specialfinalizer is allocated from non-GC'd memory, so any heap +// pointers must be specially handled. +type specialfinalizer struct { + _ sys.NotInHeap + special special + fn *funcval // May be a heap pointer. + nret uintptr + fint *_type // May be a heap pointer, but always live. + ot *ptrtype // May be a heap pointer, but always live. +} + +// Adds a finalizer to the object p. Returns true if it succeeded. +func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { + lock(&mheap_.speciallock) + s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) + unlock(&mheap_.speciallock) + s.special.kind = _KindSpecialFinalizer + s.fn = f + s.nret = nret + s.fint = fint + s.ot = ot + if addspecial(p, &s.special) { + // This is responsible for maintaining the same + // GC-related invariants as markrootSpans in any + // situation where it's possible that markrootSpans + // has already run but mark termination hasn't yet. + if gcphase != _GCoff { + base, span, _ := findObject(uintptr(p), 0, 0) + mp := acquirem() + gcw := &mp.p.ptr().gcw + // Mark everything reachable from the object + // so it's retained for the finalizer. + if !span.spanclass.noscan() { + scanobject(base, gcw) + } + // Mark the finalizer itself, since the + // special isn't part of the GC'd heap. + scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) + releasem(mp) + } + return true + } + + // There was an old finalizer + lock(&mheap_.speciallock) + mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) + unlock(&mheap_.speciallock) + return false +} + +// Removes the finalizer (if any) from the object p. +func removefinalizer(p unsafe.Pointer) { + s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) + if s == nil { + return // there wasn't a finalizer to remove + } + lock(&mheap_.speciallock) + mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) + unlock(&mheap_.speciallock) +} + +// The described object is being heap profiled. +type specialprofile struct { + _ sys.NotInHeap + special special + b *bucket +} + +// Set the heap profile bucket associated with addr to b. +func setprofilebucket(p unsafe.Pointer, b *bucket) { + lock(&mheap_.speciallock) + s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) + unlock(&mheap_.speciallock) + s.special.kind = _KindSpecialProfile + s.b = b + if !addspecial(p, &s.special) { + throw("setprofilebucket: profile already set") + } +} + +// specialReachable tracks whether an object is reachable on the next +// GC cycle. This is used by testing. +type specialReachable struct { + special special + done bool + reachable bool +} + +// specialPinCounter tracks whether an object is pinned multiple times. +type specialPinCounter struct { + special special + counter uintptr +} + +// specialsIter helps iterate over specials lists. +type specialsIter struct { + pprev **special + s *special +} + +func newSpecialsIter(span *mspan) specialsIter { + return specialsIter{&span.specials, span.specials} +} + +func (i *specialsIter) valid() bool { + return i.s != nil +} + +func (i *specialsIter) next() { + i.pprev = &i.s.next + i.s = *i.pprev +} + +// unlinkAndNext removes the current special from the list and moves +// the iterator to the next special. It returns the unlinked special. +func (i *specialsIter) unlinkAndNext() *special { + cur := i.s + i.s = cur.next + *i.pprev = i.s + return cur +} + +// freeSpecial performs any cleanup on special s and deallocates it. +// s must already be unlinked from the specials list. +func freeSpecial(s *special, p unsafe.Pointer, size uintptr) { + switch s.kind { + case _KindSpecialFinalizer: + sf := (*specialfinalizer)(unsafe.Pointer(s)) + queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) + lock(&mheap_.speciallock) + mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) + unlock(&mheap_.speciallock) + case _KindSpecialProfile: + sp := (*specialprofile)(unsafe.Pointer(s)) + mProf_Free(sp.b, size) + lock(&mheap_.speciallock) + mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) + unlock(&mheap_.speciallock) + case _KindSpecialReachable: + sp := (*specialReachable)(unsafe.Pointer(s)) + sp.done = true + // The creator frees these. + case _KindSpecialPinCounter: + lock(&mheap_.speciallock) + mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s)) + unlock(&mheap_.speciallock) + default: + throw("bad special kind") + panic("not reached") + } +} + +// gcBits is an alloc/mark bitmap. This is always used as gcBits.x. +type gcBits struct { + _ sys.NotInHeap + x uint8 +} + +// bytep returns a pointer to the n'th byte of b. +func (b *gcBits) bytep(n uintptr) *uint8 { + return addb(&b.x, n) +} + +// bitp returns a pointer to the byte containing bit n and a mask for +// selecting that bit from *bytep. +func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { + return b.bytep(n / 8), 1 << (n % 8) +} + +const gcBitsChunkBytes = uintptr(64 << 10) +const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) + +type gcBitsHeader struct { + free uintptr // free is the index into bits of the next free byte. + next uintptr // *gcBits triggers recursive type bug. (issue 14620) +} + +type gcBitsArena struct { + _ sys.NotInHeap + // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. + free uintptr // free is the index into bits of the next free byte; read/write atomically + next *gcBitsArena + bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits +} + +var gcBitsArenas struct { + lock mutex + free *gcBitsArena + next *gcBitsArena // Read atomically. Write atomically under lock. + current *gcBitsArena + previous *gcBitsArena +} + +// tryAlloc allocates from b or returns nil if b does not have enough room. +// This is safe to call concurrently. +func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { + if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { + return nil + } + // Try to allocate from this block. + end := atomic.Xadduintptr(&b.free, bytes) + if end > uintptr(len(b.bits)) { + return nil + } + // There was enough room. + start := end - bytes + return &b.bits[start] +} + +// newMarkBits returns a pointer to 8 byte aligned bytes +// to be used for a span's mark bits. +func newMarkBits(nelems uintptr) *gcBits { + blocksNeeded := (nelems + 63) / 64 + bytesNeeded := blocksNeeded * 8 + + // Try directly allocating from the current head arena. + head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) + if p := head.tryAlloc(bytesNeeded); p != nil { + return p + } + + // There's not enough room in the head arena. We may need to + // allocate a new arena. + lock(&gcBitsArenas.lock) + // Try the head arena again, since it may have changed. Now + // that we hold the lock, the list head can't change, but its + // free position still can. + if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { + unlock(&gcBitsArenas.lock) + return p + } + + // Allocate a new arena. This may temporarily drop the lock. + fresh := newArenaMayUnlock() + // If newArenaMayUnlock dropped the lock, another thread may + // have put a fresh arena on the "next" list. Try allocating + // from next again. + if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { + // Put fresh back on the free list. + // TODO: Mark it "already zeroed" + fresh.next = gcBitsArenas.free + gcBitsArenas.free = fresh + unlock(&gcBitsArenas.lock) + return p + } + + // Allocate from the fresh arena. We haven't linked it in yet, so + // this cannot race and is guaranteed to succeed. + p := fresh.tryAlloc(bytesNeeded) + if p == nil { + throw("markBits overflow") + } + + // Add the fresh arena to the "next" list. + fresh.next = gcBitsArenas.next + atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh)) + + unlock(&gcBitsArenas.lock) + return p +} + +// newAllocBits returns a pointer to 8 byte aligned bytes +// to be used for this span's alloc bits. +// newAllocBits is used to provide newly initialized spans +// allocation bits. For spans not being initialized the +// mark bits are repurposed as allocation bits when +// the span is swept. +func newAllocBits(nelems uintptr) *gcBits { + return newMarkBits(nelems) +} + +// nextMarkBitArenaEpoch establishes a new epoch for the arenas +// holding the mark bits. The arenas are named relative to the +// current GC cycle which is demarcated by the call to finishweep_m. +// +// All current spans have been swept. +// During that sweep each span allocated room for its gcmarkBits in +// gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current +// where the GC will mark objects and after each span is swept these bits +// will be used to allocate objects. +// gcBitsArenas.current becomes gcBitsArenas.previous where the span's +// gcAllocBits live until all the spans have been swept during this GC cycle. +// The span's sweep extinguishes all the references to gcBitsArenas.previous +// by pointing gcAllocBits into the gcBitsArenas.current. +// The gcBitsArenas.previous is released to the gcBitsArenas.free list. +func nextMarkBitArenaEpoch() { + lock(&gcBitsArenas.lock) + if gcBitsArenas.previous != nil { + if gcBitsArenas.free == nil { + gcBitsArenas.free = gcBitsArenas.previous + } else { + // Find end of previous arenas. + last := gcBitsArenas.previous + for last = gcBitsArenas.previous; last.next != nil; last = last.next { + } + last.next = gcBitsArenas.free + gcBitsArenas.free = gcBitsArenas.previous + } + } + gcBitsArenas.previous = gcBitsArenas.current + gcBitsArenas.current = gcBitsArenas.next + atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed + unlock(&gcBitsArenas.lock) +} + +// newArenaMayUnlock allocates and zeroes a gcBits arena. +// The caller must hold gcBitsArena.lock. This may temporarily release it. +func newArenaMayUnlock() *gcBitsArena { + var result *gcBitsArena + if gcBitsArenas.free == nil { + unlock(&gcBitsArenas.lock) + result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys)) + if result == nil { + throw("runtime: cannot allocate memory") + } + lock(&gcBitsArenas.lock) + } else { + result = gcBitsArenas.free + gcBitsArenas.free = gcBitsArenas.free.next + memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) + } + result.next = nil + // If result.bits is not 8 byte aligned adjust index so + // that &result.bits[result.free] is 8 byte aligned. + if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 { + result.free = 0 + } else { + result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) + } + return result +} diff --git a/platform/dbops/binaries/go/go/src/runtime/minmax.go b/platform/dbops/binaries/go/go/src/runtime/minmax.go new file mode 100644 index 0000000000000000000000000000000000000000..e5efc65c1d2600079e8db72fe7e5d12d9f648546 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/minmax.go @@ -0,0 +1,72 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func strmin(x, y string) string { + if y < x { + return y + } + return x +} + +func strmax(x, y string) string { + if y > x { + return y + } + return x +} + +func fmin32(x, y float32) float32 { return fmin(x, y) } +func fmin64(x, y float64) float64 { return fmin(x, y) } +func fmax32(x, y float32) float32 { return fmax(x, y) } +func fmax64(x, y float64) float64 { return fmax(x, y) } + +type floaty interface{ ~float32 | ~float64 } + +func fmin[F floaty](x, y F) F { + if y != y || y < x { + return y + } + if x != x || x < y || x != 0 { + return x + } + // x and y are both ±0 + // if either is -0, return -0; else return +0 + return forbits(x, y) +} + +func fmax[F floaty](x, y F) F { + if y != y || y > x { + return y + } + if x != x || x > y || x != 0 { + return x + } + // x and y are both ±0 + // if both are -0, return -0; else return +0 + return fandbits(x, y) +} + +func forbits[F floaty](x, y F) F { + switch unsafe.Sizeof(x) { + case 4: + *(*uint32)(unsafe.Pointer(&x)) |= *(*uint32)(unsafe.Pointer(&y)) + case 8: + *(*uint64)(unsafe.Pointer(&x)) |= *(*uint64)(unsafe.Pointer(&y)) + } + return x +} + +func fandbits[F floaty](x, y F) F { + switch unsafe.Sizeof(x) { + case 4: + *(*uint32)(unsafe.Pointer(&x)) &= *(*uint32)(unsafe.Pointer(&y)) + case 8: + *(*uint64)(unsafe.Pointer(&x)) &= *(*uint64)(unsafe.Pointer(&y)) + } + return x +} diff --git a/platform/dbops/binaries/go/go/src/runtime/minmax_test.go b/platform/dbops/binaries/go/go/src/runtime/minmax_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e0bc28fbf62b608bc40eca80259ab7fa107a380a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/minmax_test.go @@ -0,0 +1,129 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math" + "strings" + "testing" + "unsafe" +) + +var ( + zero = math.Copysign(0, +1) + negZero = math.Copysign(0, -1) + inf = math.Inf(+1) + negInf = math.Inf(-1) + nan = math.NaN() +) + +var tests = []struct{ min, max float64 }{ + {1, 2}, + {-2, 1}, + {negZero, zero}, + {zero, inf}, + {negInf, zero}, + {negInf, inf}, + {1, inf}, + {negInf, 1}, +} + +var all = []float64{1, 2, -1, -2, zero, negZero, inf, negInf, nan} + +func eq(x, y float64) bool { + return x == y && math.Signbit(x) == math.Signbit(y) +} + +func TestMinFloat(t *testing.T) { + for _, tt := range tests { + if z := min(tt.min, tt.max); !eq(z, tt.min) { + t.Errorf("min(%v, %v) = %v, want %v", tt.min, tt.max, z, tt.min) + } + if z := min(tt.max, tt.min); !eq(z, tt.min) { + t.Errorf("min(%v, %v) = %v, want %v", tt.max, tt.min, z, tt.min) + } + } + for _, x := range all { + if z := min(nan, x); !math.IsNaN(z) { + t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + if z := min(x, nan); !math.IsNaN(z) { + t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + } +} + +func TestMaxFloat(t *testing.T) { + for _, tt := range tests { + if z := max(tt.min, tt.max); !eq(z, tt.max) { + t.Errorf("max(%v, %v) = %v, want %v", tt.min, tt.max, z, tt.max) + } + if z := max(tt.max, tt.min); !eq(z, tt.max) { + t.Errorf("max(%v, %v) = %v, want %v", tt.max, tt.min, z, tt.max) + } + } + for _, x := range all { + if z := max(nan, x); !math.IsNaN(z) { + t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + if z := max(x, nan); !math.IsNaN(z) { + t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + } +} + +// testMinMax tests that min/max behave correctly on every pair of +// values in vals. +// +// vals should be a sequence of values in strictly ascending order. +func testMinMax[T int | uint8 | string](t *testing.T, vals ...T) { + for i, x := range vals { + for _, y := range vals[i+1:] { + if !(x < y) { + t.Fatalf("values out of order: !(%v < %v)", x, y) + } + + if z := min(x, y); z != x { + t.Errorf("min(%v, %v) = %v, want %v", x, y, z, x) + } + if z := min(y, x); z != x { + t.Errorf("min(%v, %v) = %v, want %v", y, x, z, x) + } + + if z := max(x, y); z != y { + t.Errorf("max(%v, %v) = %v, want %v", x, y, z, y) + } + if z := max(y, x); z != y { + t.Errorf("max(%v, %v) = %v, want %v", y, x, z, y) + } + } + } +} + +func TestMinMaxInt(t *testing.T) { testMinMax[int](t, -7, 0, 9) } +func TestMinMaxUint8(t *testing.T) { testMinMax[uint8](t, 0, 1, 2, 4, 7) } +func TestMinMaxString(t *testing.T) { testMinMax[string](t, "a", "b", "c") } + +// TestMinMaxStringTies ensures that min(a, b) returns a when a == b. +func TestMinMaxStringTies(t *testing.T) { + s := "xxx" + x := strings.Split(s, "") + + test := func(i, j, k int) { + if z := min(x[i], x[j], x[k]); unsafe.StringData(z) != unsafe.StringData(x[i]) { + t.Errorf("min(x[%v], x[%v], x[%v]) = %p, want %p", i, j, k, unsafe.StringData(z), unsafe.StringData(x[i])) + } + if z := max(x[i], x[j], x[k]); unsafe.StringData(z) != unsafe.StringData(x[i]) { + t.Errorf("max(x[%v], x[%v], x[%v]) = %p, want %p", i, j, k, unsafe.StringData(z), unsafe.StringData(x[i])) + } + } + + test(0, 1, 2) + test(0, 2, 1) + test(1, 0, 2) + test(1, 2, 0) + test(2, 0, 1) + test(2, 1, 0) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mkduff.go b/platform/dbops/binaries/go/go/src/runtime/mkduff.go new file mode 100644 index 0000000000000000000000000000000000000000..b7f07b5087caf9353cb0adae0c5070177850e425 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mkduff.go @@ -0,0 +1,286 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// runtime·duffzero is a Duff's device for zeroing memory. +// The compiler jumps to computed addresses within +// the routine to zero chunks of memory. +// Do not change duffzero without also +// changing the uses in cmd/compile/internal/*/*.go. + +// runtime·duffcopy is a Duff's device for copying memory. +// The compiler jumps to computed addresses within +// the routine to copy chunks of memory. +// Source and destination must not overlap. +// Do not change duffcopy without also +// changing the uses in cmd/compile/internal/*/*.go. + +// See the zero* and copy* generators below +// for architecture-specific comments. + +// mkduff generates duff_*.s. +package main + +import ( + "bytes" + "fmt" + "io" + "log" + "os" +) + +func main() { + gen("amd64", notags, zeroAMD64, copyAMD64) + gen("386", notags, zero386, copy386) + gen("arm", notags, zeroARM, copyARM) + gen("arm64", notags, zeroARM64, copyARM64) + gen("loong64", notags, zeroLOONG64, copyLOONG64) + gen("ppc64x", tagsPPC64x, zeroPPC64x, copyPPC64x) + gen("mips64x", tagsMIPS64x, zeroMIPS64x, copyMIPS64x) + gen("riscv64", notags, zeroRISCV64, copyRISCV64) +} + +func gen(arch string, tags, zero, copy func(io.Writer)) { + var buf bytes.Buffer + + fmt.Fprintln(&buf, "// Code generated by mkduff.go; DO NOT EDIT.") + fmt.Fprintln(&buf, "// Run go generate from src/runtime to update.") + fmt.Fprintln(&buf, "// See mkduff.go for comments.") + tags(&buf) + fmt.Fprintln(&buf, "#include \"textflag.h\"") + fmt.Fprintln(&buf) + zero(&buf) + fmt.Fprintln(&buf) + copy(&buf) + + if err := os.WriteFile("duff_"+arch+".s", buf.Bytes(), 0644); err != nil { + log.Fatalln(err) + } +} + +func notags(w io.Writer) { fmt.Fprintln(w) } + +func zeroAMD64(w io.Writer) { + // X15: zero + // DI: ptr to memory to be zeroed + // DI is updated as a side effect. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 16; i++ { + fmt.Fprintln(w, "\tMOVUPS\tX15,(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,16(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,32(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,48(DI)") + fmt.Fprintln(w, "\tLEAQ\t64(DI),DI") // We use lea instead of add, to avoid clobbering flags + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func copyAMD64(w io.Writer) { + // SI: ptr to source memory + // DI: ptr to destination memory + // SI and DI are updated as a side effect. + // + // This is equivalent to a sequence of MOVSQ but + // for some reason that is 3.5x slower than this code. + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 64; i++ { + fmt.Fprintln(w, "\tMOVUPS\t(SI), X0") + fmt.Fprintln(w, "\tADDQ\t$16, SI") + fmt.Fprintln(w, "\tMOVUPS\tX0, (DI)") + fmt.Fprintln(w, "\tADDQ\t$16, DI") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func zero386(w io.Writer) { + // AX: zero + // DI: ptr to memory to be zeroed + // DI is updated as a side effect. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tSTOSL") + } + fmt.Fprintln(w, "\tRET") +} + +func copy386(w io.Writer) { + // SI: ptr to source memory + // DI: ptr to destination memory + // SI and DI are updated as a side effect. + // + // This is equivalent to a sequence of MOVSL but + // for some reason MOVSL is really slow. + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVL\t(SI), CX") + fmt.Fprintln(w, "\tADDL\t$4, SI") + fmt.Fprintln(w, "\tMOVL\tCX, (DI)") + fmt.Fprintln(w, "\tADDL\t$4, DI") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func zeroARM(w io.Writer) { + // R0: zero + // R1: ptr to memory to be zeroed + // R1 is updated as a side effect. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVW.P\tR0, 4(R1)") + } + fmt.Fprintln(w, "\tRET") +} + +func copyARM(w io.Writer) { + // R0: scratch space + // R1: ptr to source memory + // R2: ptr to destination memory + // R1 and R2 are updated as a side effect + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVW.P\t4(R1), R0") + fmt.Fprintln(w, "\tMOVW.P\tR0, 4(R2)") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func zeroARM64(w io.Writer) { + // ZR: always zero + // R20: ptr to memory to be zeroed + // On return, R20 points to the last zeroed dword. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 63; i++ { + fmt.Fprintln(w, "\tSTP.P\t(ZR, ZR), 16(R20)") + } + fmt.Fprintln(w, "\tSTP\t(ZR, ZR), (R20)") + fmt.Fprintln(w, "\tRET") +} + +func copyARM64(w io.Writer) { + // R20: ptr to source memory + // R21: ptr to destination memory + // R26, R27 (aka REGTMP): scratch space + // R20 and R21 are updated as a side effect + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + + for i := 0; i < 64; i++ { + fmt.Fprintln(w, "\tLDP.P\t16(R20), (R26, R27)") + fmt.Fprintln(w, "\tSTP.P\t(R26, R27), 16(R21)") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func zeroLOONG64(w io.Writer) { + // R0: always zero + // R19 (aka REGRT1): ptr to memory to be zeroed + // On return, R19 points to the last zeroed dword. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVV\tR0, (R20)") + fmt.Fprintln(w, "\tADDV\t$8, R20") + } + fmt.Fprintln(w, "\tRET") +} + +func copyLOONG64(w io.Writer) { + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVV\t(R20), R30") + fmt.Fprintln(w, "\tADDV\t$8, R20") + fmt.Fprintln(w, "\tMOVV\tR30, (R21)") + fmt.Fprintln(w, "\tADDV\t$8, R21") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func tagsPPC64x(w io.Writer) { + fmt.Fprintln(w) + fmt.Fprintln(w, "//go:build ppc64 || ppc64le") + fmt.Fprintln(w) +} + +func zeroPPC64x(w io.Writer) { + // R0: always zero + // R3 (aka REGRT1): ptr to memory to be zeroed - 8 + // On return, R3 points to the last zeroed dword. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVDU\tR0, 8(R20)") + } + fmt.Fprintln(w, "\tRET") +} + +func copyPPC64x(w io.Writer) { + // duffcopy is not used on PPC64. + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVDU\t8(R20), R5") + fmt.Fprintln(w, "\tMOVDU\tR5, 8(R21)") + } + fmt.Fprintln(w, "\tRET") +} + +func tagsMIPS64x(w io.Writer) { + fmt.Fprintln(w) + fmt.Fprintln(w, "//go:build mips64 || mips64le") + fmt.Fprintln(w) +} + +func zeroMIPS64x(w io.Writer) { + // R0: always zero + // R1 (aka REGRT1): ptr to memory to be zeroed - 8 + // On return, R1 points to the last zeroed dword. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVV\tR0, 8(R1)") + fmt.Fprintln(w, "\tADDV\t$8, R1") + } + fmt.Fprintln(w, "\tRET") +} + +func copyMIPS64x(w io.Writer) { + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOVV\t(R1), R23") + fmt.Fprintln(w, "\tADDV\t$8, R1") + fmt.Fprintln(w, "\tMOVV\tR23, (R2)") + fmt.Fprintln(w, "\tADDV\t$8, R2") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} + +func zeroRISCV64(w io.Writer) { + // ZERO: always zero + // X25: ptr to memory to be zeroed + // X25 is updated as a side effect. + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOV\tZERO, (X25)") + fmt.Fprintln(w, "\tADD\t$8, X25") + } + fmt.Fprintln(w, "\tRET") +} + +func copyRISCV64(w io.Writer) { + // X24: ptr to source memory + // X25: ptr to destination memory + // X24 and X25 are updated as a side effect + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + for i := 0; i < 128; i++ { + fmt.Fprintln(w, "\tMOV\t(X24), X31") + fmt.Fprintln(w, "\tADD\t$8, X24") + fmt.Fprintln(w, "\tMOV\tX31, (X25)") + fmt.Fprintln(w, "\tADD\t$8, X25") + fmt.Fprintln(w) + } + fmt.Fprintln(w, "\tRET") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mkfastlog2table.go b/platform/dbops/binaries/go/go/src/runtime/mkfastlog2table.go new file mode 100644 index 0000000000000000000000000000000000000000..614d1f7e03fac02d75951d3824a1752b7974815f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mkfastlog2table.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// fastlog2Table contains log2 approximations for 5 binary digits. +// This is used to implement fastlog2, which is used for heap sampling. + +package main + +import ( + "bytes" + "fmt" + "log" + "math" + "os" +) + +func main() { + var buf bytes.Buffer + + fmt.Fprintln(&buf, "// Code generated by mkfastlog2table.go; DO NOT EDIT.") + fmt.Fprintln(&buf, "// Run go generate from src/runtime to update.") + fmt.Fprintln(&buf, "// See mkfastlog2table.go for comments.") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "package runtime") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "const fastlogNumBits =", fastlogNumBits) + fmt.Fprintln(&buf) + + fmt.Fprintln(&buf, "var fastlog2Table = [1<" partial order. This + // ways, locks are acquired from the top going down + // and time moves forward over the edges instead of + // backward. + g.Transpose() + generateDot(&b, g) + out = b.Bytes() + } else { + var b bytes.Buffer + generateGo(&b, g) + out, err = format.Source(b.Bytes()) + if err != nil { + log.Fatal(err) + } + } + + if *flagO != "" { + err = os.WriteFile(*flagO, out, 0666) + } else { + _, err = os.Stdout.Write(out) + } + if err != nil { + log.Fatal(err) + } +} + +func generateGo(w io.Writer, g *dag.Graph) { + fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT. + +package runtime + +type lockRank int + +`) + + // Create numeric ranks. + topo := g.Topo() + for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 { + topo[i], topo[j] = topo[j], topo[i] + } + fmt.Fprintf(w, ` +// Constants representing the ranks of all non-leaf runtime locks, in rank order. +// Locks with lower rank must be taken before locks with higher rank, +// in addition to satisfying the partial order in lockPartialOrder. +// A few ranks allow self-cycles, which are specified in lockPartialOrder. +const ( + lockRankUnknown lockRank = iota + +`) + for _, rank := range topo { + if isPseudo(rank) { + fmt.Fprintf(w, "\t// %s\n", rank) + } else { + fmt.Fprintf(w, "\t%s\n", cname(rank)) + } + } + fmt.Fprintf(w, `) + +// lockRankLeafRank is the rank of lock that does not have a declared rank, +// and hence is a leaf lock. +const lockRankLeafRank lockRank = 1000 +`) + + // Create string table. + fmt.Fprintf(w, ` +// lockNames gives the names associated with each of the above ranks. +var lockNames = []string{ +`) + for _, rank := range topo { + if !isPseudo(rank) { + fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank) + } + } + fmt.Fprintf(w, `} + +func (rank lockRank) String() string { + if rank == 0 { + return "UNKNOWN" + } + if rank == lockRankLeafRank { + return "LEAF" + } + if rank < 0 || int(rank) >= len(lockNames) { + return "BAD RANK" + } + return lockNames[rank] +} +`) + + // Create partial order structure. + fmt.Fprintf(w, ` +// lockPartialOrder is the transitive closure of the lock rank graph. +// An entry for rank X lists all of the ranks that can already be held +// when rank X is acquired. +// +// Lock ranks that allow self-cycles list themselves. +var lockPartialOrder [][]lockRank = [][]lockRank{ +`) + for _, rank := range topo { + if isPseudo(rank) { + continue + } + list := []string{} + for _, before := range g.Edges(rank) { + if !isPseudo(before) { + list = append(list, cname(before)) + } + } + if cyclicRanks[rank] { + list = append(list, cname(rank)) + } + + fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", ")) + } + fmt.Fprintf(w, "}\n") +} + +// cname returns the Go const name for the given lock rank label. +func cname(label string) string { + return "lockRank" + strings.ToUpper(label[:1]) + label[1:] +} + +func isPseudo(label string) bool { + return strings.ToUpper(label) == label +} + +// generateDot emits a Graphviz dot representation of g to w. +func generateDot(w io.Writer, g *dag.Graph) { + fmt.Fprintf(w, "digraph g {\n") + + // Define all nodes. + for _, node := range g.Nodes { + fmt.Fprintf(w, "%q;\n", node) + } + + // Create edges. + for _, node := range g.Nodes { + for _, to := range g.Edges(node) { + fmt.Fprintf(w, "%q -> %q;\n", node, to) + } + } + + fmt.Fprintf(w, "}\n") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mkpreempt.go b/platform/dbops/binaries/go/go/src/runtime/mkpreempt.go new file mode 100644 index 0000000000000000000000000000000000000000..17544d6b21532c3ea9a40107c234e4b1fc62c867 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mkpreempt.go @@ -0,0 +1,630 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// mkpreempt generates the asyncPreempt functions for each +// architecture. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "strings" +) + +// Copied from cmd/compile/internal/ssa/gen/*Ops.go + +var regNames386 = []string{ + "AX", + "CX", + "DX", + "BX", + "SP", + "BP", + "SI", + "DI", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", +} + +var regNamesAMD64 = []string{ + "AX", + "CX", + "DX", + "BX", + "SP", + "BP", + "SI", + "DI", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + "X8", + "X9", + "X10", + "X11", + "X12", + "X13", + "X14", + "X15", +} + +var out io.Writer + +var arches = map[string]func(){ + "386": gen386, + "amd64": genAMD64, + "arm": genARM, + "arm64": genARM64, + "loong64": genLoong64, + "mips64x": func() { genMIPS(true) }, + "mipsx": func() { genMIPS(false) }, + "ppc64x": genPPC64, + "riscv64": genRISCV64, + "s390x": genS390X, + "wasm": genWasm, +} +var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true} + +func main() { + flag.Parse() + if flag.NArg() > 0 { + out = os.Stdout + for _, arch := range flag.Args() { + gen, ok := arches[arch] + if !ok { + log.Fatalf("unknown arch %s", arch) + } + header(arch) + gen() + } + return + } + + for arch, gen := range arches { + f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch)) + if err != nil { + log.Fatal(err) + } + out = f + header(arch) + gen() + if err := f.Close(); err != nil { + log.Fatal(err) + } + } +} + +func header(arch string) { + fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") + if beLe[arch] { + base := arch[:len(arch)-1] + fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base) + } + fmt.Fprintf(out, "#include \"go_asm.h\"\n") + if arch == "amd64" { + fmt.Fprintf(out, "#include \"asm_amd64.h\"\n") + } + fmt.Fprintf(out, "#include \"textflag.h\"\n\n") + fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n") +} + +func p(f string, args ...any) { + fmted := fmt.Sprintf(f, args...) + fmt.Fprintf(out, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t")) +} + +func label(l string) { + fmt.Fprintf(out, "%s\n", l) +} + +type layout struct { + stack int + regs []regPos + sp string // stack pointer register +} + +type regPos struct { + pos int + + saveOp string + restoreOp string + reg string + + // If this register requires special save and restore, these + // give those operations with a %d placeholder for the stack + // offset. + save, restore string +} + +func (l *layout) add(op, reg string, size int) { + l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack}) + l.stack += size +} + +func (l *layout) add2(sop, rop, reg string, size int) { + l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack}) + l.stack += size +} + +func (l *layout) addSpecial(save, restore string, size int) { + l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack}) + l.stack += size +} + +func (l *layout) save() { + for _, reg := range l.regs { + if reg.save != "" { + p(reg.save, reg.pos) + } else { + p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + } + } +} + +func (l *layout) restore() { + for i := len(l.regs) - 1; i >= 0; i-- { + reg := l.regs[i] + if reg.restore != "" { + p(reg.restore, reg.pos) + } else { + p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) + } + } +} + +func gen386() { + p("PUSHFL") + // Save general purpose registers. + var l = layout{sp: "SP"} + for _, reg := range regNames386 { + if reg == "SP" || strings.HasPrefix(reg, "X") { + continue + } + l.add("MOVL", reg, 4) + } + + softfloat := "GO386_softfloat" + + // Save SSE state only if supported. + lSSE := layout{stack: l.stack, sp: "SP"} + for i := 0; i < 8; i++ { + lSSE.add("MOVUPS", fmt.Sprintf("X%d", i), 16) + } + + p("ADJSP $%d", lSSE.stack) + p("NOP SP") + l.save() + p("#ifndef %s", softfloat) + lSSE.save() + p("#endif") + p("CALL ·asyncPreempt2(SB)") + p("#ifndef %s", softfloat) + lSSE.restore() + p("#endif") + l.restore() + p("ADJSP $%d", -lSSE.stack) + + p("POPFL") + p("RET") +} + +func genAMD64() { + // Assign stack offsets. + var l = layout{sp: "SP"} + for _, reg := range regNamesAMD64 { + if reg == "SP" || reg == "BP" { + continue + } + if !strings.HasPrefix(reg, "X") { + l.add("MOVQ", reg, 8) + } + } + lSSE := layout{stack: l.stack, sp: "SP"} + for _, reg := range regNamesAMD64 { + if strings.HasPrefix(reg, "X") { + lSSE.add("MOVUPS", reg, 16) + } + } + + // TODO: MXCSR register? + + p("PUSHQ BP") + p("MOVQ SP, BP") + p("// Save flags before clobbering them") + p("PUSHFQ") + p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP") + p("ADJSP $%d", lSSE.stack) + p("// But vet doesn't know ADJSP, so suppress vet stack checking") + p("NOP SP") + + l.save() + + // Apparently, the signal handling code path in darwin kernel leaves + // the upper bits of Y registers in a dirty state, which causes + // many SSE operations (128-bit and narrower) become much slower. + // Clear the upper bits to get to a clean state. See issue #37174. + // It is safe here as Go code don't use the upper bits of Y registers. + p("#ifdef GOOS_darwin") + p("#ifndef hasAVX") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0") + p("JE 2(PC)") + p("#endif") + p("VZEROUPPER") + p("#endif") + + lSSE.save() + p("CALL ·asyncPreempt2(SB)") + lSSE.restore() + l.restore() + p("ADJSP $%d", -lSSE.stack) + p("POPFQ") + p("POPQ BP") + p("RET") +} + +func genARM() { + // Add integer registers R0-R12. + // R13 (SP), R14 (LR), R15 (PC) are special and not saved here. + var l = layout{sp: "R13", stack: 4} // add LR slot + for i := 0; i <= 12; i++ { + reg := fmt.Sprintf("R%d", i) + if i == 10 { + continue // R10 is g register, no need to save/restore + } + l.add("MOVW", reg, 4) + } + // Add flag register. + l.addSpecial( + "MOVW CPSR, R0\nMOVW R0, %d(R13)", + "MOVW %d(R13), R0\nMOVW R0, CPSR", + 4) + + // Add floating point registers F0-F15 and flag register. + var lfp = layout{stack: l.stack, sp: "R13"} + lfp.addSpecial( + "MOVW FPCR, R0\nMOVW R0, %d(R13)", + "MOVW %d(R13), R0\nMOVW R0, FPCR", + 4) + for i := 0; i <= 15; i++ { + reg := fmt.Sprintf("F%d", i) + lfp.add("MOVD", reg, 8) + } + + p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR + l.save() + p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. + lfp.save() + label("nofp:") + p("CALL ·asyncPreempt2(SB)") + p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. + lfp.restore() + label("nofp2:") + l.restore() + + p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it + p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall) + p("UNDEF") // shouldn't get here +} + +func genARM64() { + // Add integer registers R0-R26 + // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special + // and not saved here. + var l = layout{sp: "RSP", stack: 8} // add slot to save PC of interrupted instruction + for i := 0; i < 26; i += 2 { + if i == 18 { + i-- + continue // R18 is not used, skip + } + reg := fmt.Sprintf("(R%d, R%d)", i, i+1) + l.add2("STP", "LDP", reg, 16) + } + // Add flag registers. + l.addSpecial( + "MOVD NZCV, R0\nMOVD R0, %d(RSP)", + "MOVD %d(RSP), R0\nMOVD R0, NZCV", + 8) + l.addSpecial( + "MOVD FPSR, R0\nMOVD R0, %d(RSP)", + "MOVD %d(RSP), R0\nMOVD R0, FPSR", + 8) + // TODO: FPCR? I don't think we'll change it, so no need to save. + // Add floating point registers F0-F31. + for i := 0; i < 31; i += 2 { + reg := fmt.Sprintf("(F%d, F%d)", i, i+1) + l.add2("FSTPD", "FLDPD", reg, 16) + } + if l.stack%16 != 0 { + l.stack += 8 // SP needs 16-byte alignment + } + + // allocate frame, save PC of interrupted instruction (in LR) + p("MOVD R30, %d(RSP)", -l.stack) + p("SUB $%d, RSP", l.stack) + p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux) + p("SUB $8, RSP, R29") // set up new frame pointer + // On iOS, save the LR again after decrementing SP. We run the + // signal handler on the G stack (as it doesn't support sigaltstack), + // so any writes below SP may be clobbered. + p("#ifdef GOOS_ios") + p("MOVD R30, (RSP)") + p("#endif") + + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + + p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p("MOVD -8(RSP), R29") // restore frame pointer + p("MOVD (RSP), R27") // load PC to REGTMP + p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (R27)") +} + +func genMIPS(_64bit bool) { + mov := "MOVW" + movf := "MOVF" + add := "ADD" + sub := "SUB" + r28 := "R28" + regsize := 4 + softfloat := "GOMIPS_softfloat" + if _64bit { + mov = "MOVV" + movf = "MOVD" + add = "ADDV" + sub = "SUBV" + r28 = "RSB" + regsize = 8 + softfloat = "GOMIPS64_softfloat" + } + + // Add integer registers R1-R22, R24-R25, R28 + // R0 (zero), R23 (REGTMP), R29 (SP), R30 (g), R31 (LR) are special, + // and not saved here. R26 and R27 are reserved by kernel and not used. + var l = layout{sp: "R29", stack: regsize} // add slot to save PC of interrupted instruction (in LR) + for i := 1; i <= 25; i++ { + if i == 23 { + continue // R23 is REGTMP + } + reg := fmt.Sprintf("R%d", i) + l.add(mov, reg, regsize) + } + l.add(mov, r28, regsize) + l.addSpecial( + mov+" HI, R1\n"+mov+" R1, %d(R29)", + mov+" %d(R29), R1\n"+mov+" R1, HI", + regsize) + l.addSpecial( + mov+" LO, R1\n"+mov+" R1, %d(R29)", + mov+" %d(R29), R1\n"+mov+" R1, LO", + regsize) + + // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) + var lfp = layout{sp: "R29", stack: l.stack} + lfp.addSpecial( + mov+" FCR31, R1\n"+mov+" R1, %d(R29)", + mov+" %d(R29), R1\n"+mov+" R1, FCR31", + regsize) + // Add floating point registers F0-F31. + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) + lfp.add(movf, reg, regsize) + } + + // allocate frame, save PC of interrupted instruction (in LR) + p(mov+" R31, -%d(R29)", lfp.stack) + p(sub+" $%d, R29", lfp.stack) + + l.save() + p("#ifndef %s", softfloat) + lfp.save() + p("#endif") + p("CALL ·asyncPreempt2(SB)") + p("#ifndef %s", softfloat) + lfp.restore() + p("#endif") + l.restore() + + p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p(mov + " (R29), R23") // load PC to REGTMP + p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (R23)") +} + +func genLoong64() { + mov := "MOVV" + movf := "MOVD" + add := "ADDV" + sub := "SUBV" + regsize := 8 + + // Add integer registers r4-r21 r23-r29 r31 + // R0 (zero), R30 (REGTMP), R2 (tp), R3 (SP), R22 (g), R1 (LR) are special, + var l = layout{sp: "R3", stack: regsize} // add slot to save PC of interrupted instruction (in LR) + for i := 4; i <= 31; i++ { + if i == 22 || i == 30 { + continue + } + reg := fmt.Sprintf("R%d", i) + l.add(mov, reg, regsize) + } + + // Add floating point registers F0-F31. + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) + l.add(movf, reg, regsize) + } + + // save/restore FCC0 + l.addSpecial( + mov+" FCC0, R4\n"+mov+" R4, %d(R3)", + mov+" %d(R3), R4\n"+mov+" R4, FCC0", + regsize) + + // allocate frame, save PC of interrupted instruction (in LR) + p(mov+" R1, -%d(R3)", l.stack) + p(sub+" $%d, R3", l.stack) + + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + + p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p(mov + " (R3), R30") // load PC to REGTMP + p(add+" $%d, R3", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (R30)") +} + +func genPPC64() { + // Add integer registers R3-R29 + // R0 (zero), R1 (SP), R30 (g) are special and not saved here. + // R2 (TOC pointer in PIC mode), R12 (function entry address in PIC mode) have been saved in sigctxt.pushCall. + // R31 (REGTMP) will be saved manually. + var l = layout{sp: "R1", stack: 32 + 8} // MinFrameSize on PPC64, plus one word for saving R31 + for i := 3; i <= 29; i++ { + if i == 12 || i == 13 { + // R12 has been saved in sigctxt.pushCall. + // R13 is TLS pointer, not used by Go code. we must NOT + // restore it, otherwise if we parked and resumed on a + // different thread we'll mess up TLS addresses. + continue + } + reg := fmt.Sprintf("R%d", i) + l.add("MOVD", reg, 8) + } + l.addSpecial( + "MOVW CR, R31\nMOVW R31, %d(R1)", + "MOVW %d(R1), R31\nMOVFL R31, $0xff", // this is MOVW R31, CR + 8) // CR is 4-byte wide, but just keep the alignment + l.addSpecial( + "MOVD XER, R31\nMOVD R31, %d(R1)", + "MOVD %d(R1), R31\nMOVD R31, XER", + 8) + // Add floating point registers F0-F31. + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) + l.add("FMOVD", reg, 8) + } + // Add floating point control/status register FPSCR. + l.addSpecial( + "MOVFL FPSCR, F0\nFMOVD F0, %d(R1)", + "FMOVD %d(R1), F0\nMOVFL F0, FPSCR", + 8) + + p("MOVD R31, -%d(R1)", l.stack-32) // save R31 first, we'll use R31 for saving LR + p("MOVD LR, R31") + p("MOVDU R31, -%d(R1)", l.stack) // allocate frame, save PC of interrupted instruction (in LR) + + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + + p("MOVD %d(R1), R31", l.stack) // sigctxt.pushCall has pushed LR, R2, R12 (at interrupt) on stack, restore them + p("MOVD R31, LR") + p("MOVD %d(R1), R2", l.stack+8) + p("MOVD %d(R1), R12", l.stack+16) + p("MOVD (R1), R31") // load PC to CTR + p("MOVD R31, CTR") + p("MOVD 32(R1), R31") // restore R31 + p("ADD $%d, R1", l.stack+32) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (CTR)") +} + +func genRISCV64() { + // X0 (zero), X1 (LR), X2 (SP), X3 (GP), X4 (TP), X27 (g), X31 (TMP) are special. + var l = layout{sp: "X2", stack: 8} + + // Add integer registers (X5-X26, X28-30). + for i := 5; i < 31; i++ { + if i == 27 { + continue + } + reg := fmt.Sprintf("X%d", i) + l.add("MOV", reg, 8) + } + + // Add floating point registers (F0-F31). + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) + l.add("MOVD", reg, 8) + } + + p("MOV X1, -%d(X2)", l.stack) + p("SUB $%d, X2", l.stack) + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + p("MOV %d(X2), X1", l.stack) + p("MOV (X2), X31") + p("ADD $%d, X2", l.stack+8) + p("JMP (X31)") +} + +func genS390X() { + // Add integer registers R0-R12 + // R13 (g), R14 (LR), R15 (SP) are special, and not saved here. + // Saving R10 (REGTMP) is not necessary, but it is saved anyway. + var l = layout{sp: "R15", stack: 16} // add slot to save PC of interrupted instruction and flags + l.addSpecial( + "STMG R0, R12, %d(R15)", + "LMG %d(R15), R0, R12", + 13*8) + // Add floating point registers F0-F31. + for i := 0; i <= 15; i++ { + reg := fmt.Sprintf("F%d", i) + l.add("FMOVD", reg, 8) + } + + // allocate frame, save PC of interrupted instruction (in LR) and flags (condition code) + p("IPM R10") // save flags upfront, as ADD will clobber flags + p("MOVD R14, -%d(R15)", l.stack) + p("ADD $-%d, R15", l.stack) + p("MOVW R10, 8(R15)") // save flags + + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + + p("MOVD %d(R15), R14", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p("ADD $%d, R15", l.stack+8) // pop frame (including the space pushed by sigctxt.pushCall) + p("MOVWZ -%d(R15), R10", l.stack) // load flags to REGTMP + p("TMLH R10, $(3<<12)") // restore flags + p("MOVD -%d(R15), R10", l.stack+8) // load PC to REGTMP + p("JMP (R10)") +} + +func genWasm() { + p("// No async preemption on wasm") + p("UNDEF") +} + +func notImplemented() { + p("// Not implemented yet") + p("JMP ·abort(SB)") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mksizeclasses.go b/platform/dbops/binaries/go/go/src/runtime/mksizeclasses.go new file mode 100644 index 0000000000000000000000000000000000000000..26ca49e6eb13d6985722e05344003227e99bffd0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mksizeclasses.go @@ -0,0 +1,355 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// Generate tables for small malloc size classes. +// +// See malloc.go for overview. +// +// The size classes are chosen so that rounding an allocation +// request up to the next size class wastes at most 12.5% (1.125x). +// +// Each size class has its own page count that gets allocated +// and chopped up when new objects of the size class are needed. +// That page count is chosen so that chopping up the run of +// pages into objects of the given size wastes at most 12.5% (1.125x) +// of the memory. It is not necessary that the cutoff here be +// the same as above. +// +// The two sources of waste multiply, so the worst possible case +// for the above constraints would be that allocations of some +// size might have a 26.6% (1.266x) overhead. +// In practice, only one of the wastes comes into play for a +// given size (sizes < 512 waste mainly on the round-up, +// sizes > 512 waste mainly on the page chopping). +// For really small sizes, alignment constraints force the +// overhead higher. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "math" + "math/bits" + "os" +) + +// Generate msize.go + +var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go") + +func main() { + flag.Parse() + + var b bytes.Buffer + fmt.Fprintln(&b, "// Code generated by mksizeclasses.go; DO NOT EDIT.") + fmt.Fprintln(&b, "//go:generate go run mksizeclasses.go") + fmt.Fprintln(&b) + fmt.Fprintln(&b, "package runtime") + classes := makeClasses() + + printComment(&b, classes) + + printClasses(&b, classes) + + out, err := format.Source(b.Bytes()) + if err != nil { + log.Fatal(err) + } + if *stdout { + _, err = os.Stdout.Write(out) + } else { + err = os.WriteFile("sizeclasses.go", out, 0666) + } + if err != nil { + log.Fatal(err) + } +} + +const ( + // Constants that we use and will transfer to the runtime. + maxSmallSize = 32 << 10 + smallSizeDiv = 8 + smallSizeMax = 1024 + largeSizeDiv = 128 + pageShift = 13 + + // Derived constants. + pageSize = 1 << pageShift +) + +type class struct { + size int // max size + npages int // number of pages +} + +func powerOfTwo(x int) bool { + return x != 0 && x&(x-1) == 0 +} + +func makeClasses() []class { + var classes []class + + classes = append(classes, class{}) // class #0 is a dummy entry + + align := 8 + for size := align; size <= maxSmallSize; size += align { + if powerOfTwo(size) { // bump alignment once in a while + if size >= 2048 { + align = 256 + } else if size >= 128 { + align = size / 8 + } else if size >= 32 { + align = 16 // heap bitmaps assume 16 byte alignment for allocations >= 32 bytes. + } + } + if !powerOfTwo(align) { + panic("incorrect alignment") + } + + // Make the allocnpages big enough that + // the leftover is less than 1/8 of the total, + // so wasted space is at most 12.5%. + allocsize := pageSize + for allocsize%size > allocsize/8 { + allocsize += pageSize + } + npages := allocsize / pageSize + + // If the previous sizeclass chose the same + // allocation size and fit the same number of + // objects into the page, we might as well + // use just this size instead of having two + // different sizes. + if len(classes) > 1 && npages == classes[len(classes)-1].npages && allocsize/size == allocsize/classes[len(classes)-1].size { + classes[len(classes)-1].size = size + continue + } + classes = append(classes, class{size: size, npages: npages}) + } + + // Increase object sizes if we can fit the same number of larger objects + // into the same number of pages. For example, we choose size 8448 above + // with 6 objects in 7 pages. But we can well use object size 9472, + // which is also 6 objects in 7 pages but +1024 bytes (+12.12%). + // We need to preserve at least largeSizeDiv alignment otherwise + // sizeToClass won't work. + for i := range classes { + if i == 0 { + continue + } + c := &classes[i] + psize := c.npages * pageSize + new_size := (psize / (psize / c.size)) &^ (largeSizeDiv - 1) + if new_size > c.size { + c.size = new_size + } + } + + if len(classes) != 68 { + panic("number of size classes has changed") + } + + for i := range classes { + computeDivMagic(&classes[i]) + } + + return classes +} + +// computeDivMagic checks that the division required to compute object +// index from span offset can be computed using 32-bit multiplication. +// n / c.size is implemented as (n * (^uint32(0)/uint32(c.size) + 1)) >> 32 +// for all 0 <= n <= c.npages * pageSize +func computeDivMagic(c *class) { + // divisor + d := c.size + if d == 0 { + return + } + + // maximum input value for which the formula needs to work. + max := c.npages * pageSize + + // As reported in [1], if n and d are unsigned N-bit integers, we + // can compute n / d as ⌊n * c / 2^F⌋, where c is ⌈2^F / d⌉ and F is + // computed with: + // + // Algorithm 2: Algorithm to select the number of fractional bits + // and the scaled approximate reciprocal in the case of unsigned + // integers. + // + // if d is a power of two then + // Let F ← log₂(d) and c = 1. + // else + // Let F ← N + L where L is the smallest integer + // such that d ≤ (2^(N+L) mod d) + 2^L. + // end if + // + // [1] "Faster Remainder by Direct Computation: Applications to + // Compilers and Software Libraries" Daniel Lemire, Owen Kaser, + // Nathan Kurz arXiv:1902.01961 + // + // To minimize the risk of introducing errors, we implement the + // algorithm exactly as stated, rather than trying to adapt it to + // fit typical Go idioms. + N := bits.Len(uint(max)) + var F int + if powerOfTwo(d) { + F = int(math.Log2(float64(d))) + if d != 1< 32 { + fmt.Printf("d=%d max=%d N=%d F=%d\n", c.size, max, N, F) + panic("size class requires more than 32 bits of precision") + } + + // Brute force double-check with the exact computation that will be + // done by the runtime. + m := ^uint32(0)/uint32(c.size) + 1 + for n := 0; n <= max; n++ { + if uint32((uint64(n)*uint64(m))>>32) != uint32(n/c.size) { + fmt.Printf("d=%d max=%d m=%d n=%d\n", d, max, m, n) + panic("bad 32-bit multiply magic") + } + } +} + +func printComment(w io.Writer, classes []class) { + fmt.Fprintf(w, "// %-5s %-9s %-10s %-7s %-10s %-9s %-9s\n", "class", "bytes/obj", "bytes/span", "objects", "tail waste", "max waste", "min align") + prevSize := 0 + var minAligns [pageShift + 1]int + for i, c := range classes { + if i == 0 { + continue + } + spanSize := c.npages * pageSize + objects := spanSize / c.size + tailWaste := spanSize - c.size*(spanSize/c.size) + maxWaste := float64((c.size-prevSize-1)*objects+tailWaste) / float64(spanSize) + alignBits := bits.TrailingZeros(uint(c.size)) + if alignBits > pageShift { + // object alignment is capped at page alignment + alignBits = pageShift + } + for i := range minAligns { + if i > alignBits { + minAligns[i] = 0 + } else if minAligns[i] == 0 { + minAligns[i] = c.size + } + } + prevSize = c.size + fmt.Fprintf(w, "// %5d %9d %10d %7d %10d %8.2f%% %9d\n", i, c.size, spanSize, objects, tailWaste, 100*maxWaste, 1<= size { + sc[i] = j + break + } + } + } + fmt.Fprint(w, "var size_to_class8 = [smallSizeMax/smallSizeDiv+1]uint8 {") + for _, v := range sc { + fmt.Fprintf(w, "%d,", v) + } + fmt.Fprintln(w, "}") + + // map from size to size class, for large sizes. + sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1) + for i := range sc { + size := smallSizeMax + i*largeSizeDiv + for j, c := range classes { + if c.size >= size { + sc[i] = j + break + } + } + } + fmt.Fprint(w, "var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv+1]uint8 {") + for _, v := range sc { + fmt.Fprintf(w, "%d,", v) + } + fmt.Fprintln(w, "}") +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mmap.go b/platform/dbops/binaries/go/go/src/runtime/mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..9a7b2985623d685ea2dcd2e87b3f389e29c4dfb9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mmap.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !js && !((linux && (amd64 || arm64 || loong64)) || (freebsd && amd64)) && !openbsd && !plan9 && !solaris && !windows + +package runtime + +import "unsafe" + +// mmap calls the mmap system call. It is implemented in assembly. +// We only pass the lower 32 bits of file offset to the +// assembly routine; the higher bits (if required), should be provided +// by the assembly routine as 0. +// The err result is an OS error code such as ENOMEM. +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) + +// munmap calls the munmap system call. It is implemented in assembly. +func munmap(addr unsafe.Pointer, n uintptr) diff --git a/platform/dbops/binaries/go/go/src/runtime/mpagealloc.go b/platform/dbops/binaries/go/go/src/runtime/mpagealloc.go new file mode 100644 index 0000000000000000000000000000000000000000..d533f84180fb73b541794b9886b2991a0ad4e092 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mpagealloc.go @@ -0,0 +1,1076 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Page allocator. +// +// The page allocator manages mapped pages (defined by pageSize, NOT +// physPageSize) for allocation and re-use. It is embedded into mheap. +// +// Pages are managed using a bitmap that is sharded into chunks. +// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the +// process's address space. Chunks are managed in a sparse-array-style structure +// similar to mheap.arenas, since the bitmap may be large on some systems. +// +// The bitmap is efficiently searched by using a radix tree in combination +// with fast bit-wise intrinsics. Allocation is performed using an address-ordered +// first-fit approach. +// +// Each entry in the radix tree is a summary that describes three properties of +// a particular region of the address space: the number of contiguous free pages +// at the start and end of the region it represents, and the maximum number of +// contiguous free pages found anywhere in that region. +// +// Each level of the radix tree is stored as one contiguous array, which represents +// a different granularity of subdivision of the processes' address space. Thus, this +// radix tree is actually implicit in these large arrays, as opposed to having explicit +// dynamically-allocated pointer-based node structures. Naturally, these arrays may be +// quite large for system with large address spaces, so in these cases they are mapped +// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk. +// +// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each +// summary represent the largest section of address space (16 GiB on 64-bit systems), +// with each subsequent level representing successively smaller subsections until we +// reach the finest granularity at the leaves, a chunk. +// +// More specifically, each summary in each level (except for leaf summaries) +// represents some number of entries in the following level. For example, each +// summary in the root level may represent a 16 GiB region of address space, +// and in the next level there could be 8 corresponding entries which represent 2 +// GiB subsections of that 16 GiB region, each of which could correspond to 8 +// entries in the next level which each represent 256 MiB regions, and so on. +// +// Thus, this design only scales to heaps so large, but can always be extended to +// larger heaps by simply adding levels to the radix tree, which mostly costs +// additional virtual address space. The choice of managing large arrays also means +// that a large amount of virtual address space may be reserved by the runtime. + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +const ( + // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider + // in the bitmap at once. + pallocChunkPages = 1 << logPallocChunkPages + pallocChunkBytes = pallocChunkPages * pageSize + logPallocChunkPages = 9 + logPallocChunkBytes = logPallocChunkPages + pageShift + + // The number of radix bits for each level. + // + // The value of 3 is chosen such that the block of summaries we need to scan at + // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is + // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree + // levels perfectly into the 21-bit pallocBits summary field at the root level. + // + // The following equation explains how each of the constants relate: + // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits + // + // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go. + summaryLevelBits = 3 + summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits + + // pallocChunksL2Bits is the number of bits of the chunk index number + // covered by the second level of the chunks map. + // + // See (*pageAlloc).chunks for more details. Update the documentation + // there should this change. + pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits + pallocChunksL1Shift = pallocChunksL2Bits +) + +// maxSearchAddr returns the maximum searchAddr value, which indicates +// that the heap has no free space. +// +// This function exists just to make it clear that this is the maximum address +// for the page allocator's search space. See maxOffAddr for details. +// +// It's a function (rather than a variable) because it needs to be +// usable before package runtime's dynamic initialization is complete. +// See #51913 for details. +func maxSearchAddr() offAddr { return maxOffAddr } + +// Global chunk index. +// +// Represents an index into the leaf level of the radix tree. +// Similar to arenaIndex, except instead of arenas, it divides the address +// space into chunks. +type chunkIdx uint + +// chunkIndex returns the global index of the palloc chunk containing the +// pointer p. +func chunkIndex(p uintptr) chunkIdx { + return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes) +} + +// chunkBase returns the base address of the palloc chunk at index ci. +func chunkBase(ci chunkIdx) uintptr { + return uintptr(ci)*pallocChunkBytes + arenaBaseOffset +} + +// chunkPageIndex computes the index of the page that contains p, +// relative to the chunk which contains p. +func chunkPageIndex(p uintptr) uint { + return uint(p % pallocChunkBytes / pageSize) +} + +// l1 returns the index into the first level of (*pageAlloc).chunks. +func (i chunkIdx) l1() uint { + if pallocChunksL1Bits == 0 { + // Let the compiler optimize this away if there's no + // L1 map. + return 0 + } else { + return uint(i) >> pallocChunksL1Shift + } +} + +// l2 returns the index into the second level of (*pageAlloc).chunks. +func (i chunkIdx) l2() uint { + if pallocChunksL1Bits == 0 { + return uint(i) + } else { + return uint(i) & (1<> levelShift[level]) +} + +// levelIndexToOffAddr converts an index into summary[level] into +// the corresponding address in the offset address space. +func levelIndexToOffAddr(level, idx int) offAddr { + return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset} +} + +// addrsToSummaryRange converts base and limit pointers into a range +// of entries for the given summary level. +// +// The returned range is inclusive on the lower bound and exclusive on +// the upper bound. +func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) { + // This is slightly more nuanced than just a shift for the exclusive + // upper-bound. Note that the exclusive upper bound may be within a + // summary at this level, meaning if we just do the obvious computation + // hi will end up being an inclusive upper bound. Unfortunately, just + // adding 1 to that is too broad since we might be on the very edge + // of a summary's max page count boundary for this level + // (1 << levelLogPages[level]). So, make limit an inclusive upper bound + // then shift, then add 1, so we get an exclusive upper bound at the end. + lo = int((base - arenaBaseOffset) >> levelShift[level]) + hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1 + return +} + +// blockAlignSummaryRange aligns indices into the given level to that +// level's block width (1 << levelBits[level]). It assumes lo is inclusive +// and hi is exclusive, and so aligns them down and up respectively. +func blockAlignSummaryRange(level int, lo, hi int) (int, int) { + e := uintptr(1) << levelBits[level] + return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e)) +} + +type pageAlloc struct { + // Radix tree of summaries. + // + // Each slice's cap represents the whole memory reservation. + // Each slice's len reflects the allocator's maximum known + // mapped heap address for that level. + // + // The backing store of each summary level is reserved in init + // and may or may not be committed in grow (small address spaces + // may commit all the memory in init). + // + // The purpose of keeping len <= cap is to enforce bounds checks + // on the top end of the slice so that instead of an unknown + // runtime segmentation fault, we get a much friendlier out-of-bounds + // error. + // + // To iterate over a summary level, use inUse to determine which ranges + // are currently available. Otherwise one might try to access + // memory which is only Reserved which may result in a hard fault. + // + // We may still get segmentation faults < len since some of that + // memory may not be committed yet. + summary [summaryLevels][]pallocSum + + // chunks is a slice of bitmap chunks. + // + // The total size of chunks is quite large on most 64-bit platforms + // (O(GiB) or more) if flattened, so rather than making one large mapping + // (which has problems on some platforms, even when PROT_NONE) we use a + // two-level sparse array approach similar to the arena index in mheap. + // + // To find the chunk containing a memory address `a`, do: + // chunkOf(chunkIndex(a)) + // + // Below is a table describing the configuration for chunks for various + // heapAddrBits supported by the runtime. + // + // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size + // ------------------------------------------------ + // 32 | 0 | 10 | 128 KiB + // 33 (iOS) | 0 | 11 | 256 KiB + // 48 | 13 | 13 | 1 MiB + // + // There's no reason to use the L1 part of chunks on 32-bit, the + // address space is small so the L2 is small. For platforms with a + // 48-bit address space, we pick the L1 such that the L2 is 1 MiB + // in size, which is a good balance between low granularity without + // making the impact on BSS too high (note the L1 is stored directly + // in pageAlloc). + // + // To iterate over the bitmap, use inUse to determine which ranges + // are currently available. Otherwise one might iterate over unused + // ranges. + // + // Protected by mheapLock. + // + // TODO(mknyszek): Consider changing the definition of the bitmap + // such that 1 means free and 0 means in-use so that summaries and + // the bitmaps align better on zero-values. + chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData + + // The address to start an allocation search with. It must never + // point to any memory that is not contained in inUse, i.e. + // inUse.contains(searchAddr.addr()) must always be true. The one + // exception to this rule is that it may take on the value of + // maxOffAddr to indicate that the heap is exhausted. + // + // We guarantee that all valid heap addresses below this value + // are allocated and not worth searching. + searchAddr offAddr + + // start and end represent the chunk indices + // which pageAlloc knows about. It assumes + // chunks in the range [start, end) are + // currently ready to use. + start, end chunkIdx + + // inUse is a slice of ranges of address space which are + // known by the page allocator to be currently in-use (passed + // to grow). + // + // We care much more about having a contiguous heap in these cases + // and take additional measures to ensure that, so in nearly all + // cases this should have just 1 element. + // + // All access is protected by the mheapLock. + inUse addrRanges + + // scav stores the scavenger state. + scav struct { + // index is an efficient index of chunks that have pages available to + // scavenge. + index scavengeIndex + + // releasedBg is the amount of memory released in the background this + // scavenge cycle. + releasedBg atomic.Uintptr + + // releasedEager is the amount of memory released eagerly this scavenge + // cycle. + releasedEager atomic.Uintptr + } + + // mheap_.lock. This level of indirection makes it possible + // to test pageAlloc independently of the runtime allocator. + mheapLock *mutex + + // sysStat is the runtime memstat to update when new system + // memory is committed by the pageAlloc for allocation metadata. + sysStat *sysMemStat + + // summaryMappedReady is the number of bytes mapped in the Ready state + // in the summary structure. Used only for testing currently. + // + // Protected by mheapLock. + summaryMappedReady uintptr + + // chunkHugePages indicates whether page bitmap chunks should be backed + // by huge pages. + chunkHugePages bool + + // Whether or not this struct is being used in tests. + test bool +} + +func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool) { + if levelLogPages[0] > logMaxPackedValue { + // We can't represent 1< p.end { + p.end = end + } + // Note that [base, limit) will never overlap with any existing + // range inUse because grow only ever adds never-used memory + // regions to the page allocator. + p.inUse.add(makeAddrRange(base, limit)) + + // A grow operation is a lot like a free operation, so if our + // chunk ends up below p.searchAddr, update p.searchAddr to the + // new address, just like in free. + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b + } + + // Add entries into chunks, which is sparse, if needed. Then, + // initialize the bitmap. + // + // Newly-grown memory is always considered scavenged. + // Set all the bits in the scavenged bitmaps high. + for c := chunkIndex(base); c < chunkIndex(limit); c++ { + if p.chunks[c.l1()] == nil { + // Create the necessary l2 entry. + const l2Size = unsafe.Sizeof(*p.chunks[0]) + r := sysAlloc(l2Size, p.sysStat) + if r == nil { + throw("pageAlloc: out of memory") + } + if !p.test { + // Make the chunk mapping eligible or ineligible + // for huge pages, depending on what our current + // state is. + if p.chunkHugePages { + sysHugePage(r, l2Size) + } else { + sysNoHugePage(r, l2Size) + } + } + // Store the new chunk block but avoid a write barrier. + // grow is used in call chains that disallow write barriers. + *(*uintptr)(unsafe.Pointer(&p.chunks[c.l1()])) = uintptr(r) + } + p.chunkOf(c).scavenged.setRange(0, pallocChunkPages) + } + + // Update summaries accordingly. The grow acts like a free, so + // we need to ensure this newly-free memory is visible in the + // summaries. + p.update(base, size/pageSize, true, false) +} + +// enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default). +// +// This function is idempotent. +// +// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant +// time, but may take time proportional to the size of the mapped heap beyond that. +// +// The heap lock must not be held over this operation, since it will briefly acquire +// the heap lock. +// +// Must be called on the system stack because it acquires the heap lock. +// +//go:systemstack +func (p *pageAlloc) enableChunkHugePages() { + // Grab the heap lock to turn on huge pages for new chunks and clone the current + // heap address space ranges. + // + // After the lock is released, we can be sure that bitmaps for any new chunks may + // be backed with huge pages, and we have the address space for the rest of the + // chunks. At the end of this function, all chunk metadata should be backed by huge + // pages. + lock(&mheap_.lock) + if p.chunkHugePages { + unlock(&mheap_.lock) + return + } + p.chunkHugePages = true + var inUse addrRanges + inUse.sysStat = p.sysStat + p.inUse.cloneInto(&inUse) + unlock(&mheap_.lock) + + // This might seem like a lot of work, but all these loops are for generality. + // + // For a 1 GiB contiguous heap, a 48-bit address space, 13 L1 bits, a palloc chunk size + // of 4 MiB, and adherence to the default set of heap address hints, this will result in + // exactly 1 call to sysHugePage. + for _, r := range p.inUse.ranges { + for i := chunkIndex(r.base.addr()).l1(); i < chunkIndex(r.limit.addr()-1).l1(); i++ { + // N.B. We can assume that p.chunks[i] is non-nil and in a mapped part of p.chunks + // because it's derived from inUse, which never shrinks. + sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0])) + } + } +} + +// update updates heap metadata. It must be called each time the bitmap +// is updated. +// +// If contig is true, update does some optimizations assuming that there was +// a contiguous allocation or free between addr and addr+npages. alloc indicates +// whether the operation performed was an allocation or a free. +// +// p.mheapLock must be held. +func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) { + assertLockHeld(p.mheapLock) + + // base, limit, start, and end are inclusive. + limit := base + npages*pageSize - 1 + sc, ec := chunkIndex(base), chunkIndex(limit) + + // Handle updating the lowest level first. + if sc == ec { + // Fast path: the allocation doesn't span more than one chunk, + // so update this one and if the summary didn't change, return. + x := p.summary[len(p.summary)-1][sc] + y := p.chunkOf(sc).summarize() + if x == y { + return + } + p.summary[len(p.summary)-1][sc] = y + } else if contig { + // Slow contiguous path: the allocation spans more than one chunk + // and at least one summary is guaranteed to change. + summary := p.summary[len(p.summary)-1] + + // Update the summary for chunk sc. + summary[sc] = p.chunkOf(sc).summarize() + + // Update the summaries for chunks in between, which are + // either totally allocated or freed. + whole := p.summary[len(p.summary)-1][sc+1 : ec] + if alloc { + // Should optimize into a memclr. + for i := range whole { + whole[i] = 0 + } + } else { + for i := range whole { + whole[i] = freeChunkSum + } + } + + // Update the summary for chunk ec. + summary[ec] = p.chunkOf(ec).summarize() + } else { + // Slow general path: the allocation spans more than one chunk + // and at least one summary is guaranteed to change. + // + // We can't assume a contiguous allocation happened, so walk over + // every chunk in the range and manually recompute the summary. + summary := p.summary[len(p.summary)-1] + for c := sc; c <= ec; c++ { + summary[c] = p.chunkOf(c).summarize() + } + } + + // Walk up the radix tree and update the summaries appropriately. + changed := true + for l := len(p.summary) - 2; l >= 0 && changed; l-- { + // Update summaries at level l from summaries at level l+1. + changed = false + + // "Constants" for the previous level which we + // need to compute the summary from that level. + logEntriesPerBlock := levelBits[l+1] + logMaxPages := levelLogPages[l+1] + + // lo and hi describe all the parts of the level we need to look at. + lo, hi := addrsToSummaryRange(l, base, limit+1) + + // Iterate over each block, updating the corresponding summary in the less-granular level. + for i := lo; i < hi; i++ { + children := p.summary[l+1][i<= addr. That is, if addr refers to mapped memory, then it is +// returned. If addr is higher than any mapped region, then +// it returns maxOffAddr. +// +// p.mheapLock must be held. +func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr { + assertLockHeld(p.mheapLock) + + // If we're not in a test, validate first by checking mheap_.arenas. + // This is a fast path which is only safe to use outside of testing. + ai := arenaIndex(addr.addr()) + if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil { + vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr()) + if ok { + return offAddr{vAddr} + } else { + // The candidate search address is greater than any + // known address, which means we definitely have no + // free memory left. + return maxOffAddr + } + } + return addr +} + +// find searches for the first (address-ordered) contiguous free region of +// npages in size and returns a base address for that region. +// +// It uses p.searchAddr to prune its search and assumes that no palloc chunks +// below chunkIndex(p.searchAddr) contain any free memory at all. +// +// find also computes and returns a candidate p.searchAddr, which may or +// may not prune more of the address space than p.searchAddr already does. +// This candidate is always a valid p.searchAddr. +// +// find represents the slow path and the full radix tree search. +// +// Returns a base address of 0 on failure, in which case the candidate +// searchAddr returned is invalid and must be ignored. +// +// p.mheapLock must be held. +func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) { + assertLockHeld(p.mheapLock) + + // Search algorithm. + // + // This algorithm walks each level l of the radix tree from the root level + // to the leaf level. It iterates over at most 1 << levelBits[l] of entries + // in a given level in the radix tree, and uses the summary information to + // find either: + // 1) That a given subtree contains a large enough contiguous region, at + // which point it continues iterating on the next level, or + // 2) That there are enough contiguous boundary-crossing bits to satisfy + // the allocation, at which point it knows exactly where to start + // allocating from. + // + // i tracks the index into the current level l's structure for the + // contiguous 1 << levelBits[l] entries we're actually interested in. + // + // NOTE: Technically this search could allocate a region which crosses + // the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is + // a discontinuity. However, the only way this could happen is if the + // page at the zero address is mapped, and this is impossible on + // every system we support where arenaBaseOffset != 0. So, the + // discontinuity is already encoded in the fact that the OS will never + // map the zero page for us, and this function doesn't try to handle + // this case in any way. + + // i is the beginning of the block of entries we're searching at the + // current level. + i := 0 + + // firstFree is the region of address space that we are certain to + // find the first free page in the heap. base and bound are the inclusive + // bounds of this window, and both are addresses in the linearized, contiguous + // view of the address space (with arenaBaseOffset pre-added). At each level, + // this window is narrowed as we find the memory region containing the + // first free page of memory. To begin with, the range reflects the + // full process address space. + // + // firstFree is updated by calling foundFree each time free space in the + // heap is discovered. + // + // At the end of the search, base.addr() is the best new + // searchAddr we could deduce in this search. + firstFree := struct { + base, bound offAddr + }{ + base: minOffAddr, + bound: maxOffAddr, + } + // foundFree takes the given address range [addr, addr+size) and + // updates firstFree if it is a narrower range. The input range must + // either be fully contained within firstFree or not overlap with it + // at all. + // + // This way, we'll record the first summary we find with any free + // pages on the root level and narrow that down if we descend into + // that summary. But as soon as we need to iterate beyond that summary + // in a level to find a large enough range, we'll stop narrowing. + foundFree := func(addr offAddr, size uintptr) { + if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) { + // This range fits within the current firstFree window, so narrow + // down the firstFree window to the base and bound of this range. + firstFree.base = addr + firstFree.bound = addr.add(size - 1) + } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) { + // This range only partially overlaps with the firstFree range, + // so throw. + print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n") + print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n") + throw("range partially overlaps") + } + } + + // lastSum is the summary which we saw on the previous level that made us + // move on to the next level. Used to print additional information in the + // case of a catastrophic failure. + // lastSumIdx is that summary's index in the previous level. + lastSum := packPallocSum(0, 0, 0) + lastSumIdx := -1 + +nextLevel: + for l := 0; l < len(p.summary); l++ { + // For the root level, entriesPerBlock is the whole level. + entriesPerBlock := 1 << levelBits[l] + logMaxPages := levelLogPages[l] + + // We've moved into a new level, so let's update i to our new + // starting index. This is a no-op for level 0. + i <<= levelBits[l] + + // Slice out the block of entries we care about. + entries := p.summary[l][i : i+entriesPerBlock] + + // Determine j0, the first index we should start iterating from. + // The searchAddr may help us eliminate iterations if we followed the + // searchAddr on the previous level or we're on the root level, in which + // case the searchAddr should be the same as i after levelShift. + j0 := 0 + if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i { + j0 = searchIdx & (entriesPerBlock - 1) + } + + // Run over the level entries looking for + // a contiguous run of at least npages either + // within an entry or across entries. + // + // base contains the page index (relative to + // the first entry's first page) of the currently + // considered run of consecutive pages. + // + // size contains the size of the currently considered + // run of consecutive pages. + var base, size uint + for j := j0; j < len(entries); j++ { + sum := entries[j] + if sum == 0 { + // A full entry means we broke any streak and + // that we should skip it altogether. + size = 0 + continue + } + + // We've encountered a non-zero summary which means + // free memory, so update firstFree. + foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<= uint(npages) { + // If size == 0 we don't have a run yet, + // which means base isn't valid. So, set + // base to the first page in this block. + if size == 0 { + base = uint(j) << logMaxPages + } + // We hit npages; we're done! + size += s + break + } + if sum.max() >= uint(npages) { + // The entry itself contains npages contiguous + // free pages, so continue on the next level + // to find that run. + i += j + lastSumIdx = i + lastSum = sum + continue nextLevel + } + if size == 0 || s < 1<= uint(npages) { + // We found a sufficiently large run of free pages straddling + // some boundary, so compute the address and return it. + addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr() + return addr, p.findMappedAddr(firstFree.base) + } + if l == 0 { + // We're at level zero, so that means we've exhausted our search. + return 0, maxSearchAddr() + } + + // We're not at level zero, and we exhausted the level we were looking in. + // This means that either our calculations were wrong or the level above + // lied to us. In either case, dump some useful state and throw. + print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n") + print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n") + print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n") + print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n") + for j := 0; j < len(entries); j++ { + sum := entries[j] + print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") + } + throw("bad summary data") + } + + // Since we've gotten to this point, that means we haven't found a + // sufficiently-sized free region straddling some boundary (chunk or larger). + // This means the last summary we inspected must have had a large enough "max" + // value, so look inside the chunk to find a suitable run. + // + // After iterating over all levels, i must contain a chunk index which + // is what the final level represents. + ci := chunkIdx(i) + j, searchIdx := p.chunkOf(ci).find(npages, 0) + if j == ^uint(0) { + // We couldn't find any space in this chunk despite the summaries telling + // us it should be there. There's likely a bug, so dump some state and throw. + sum := p.summary[len(p.summary)-1][i] + print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") + print("runtime: npages = ", npages, "\n") + throw("bad summary data") + } + + // Compute the address at which the free space starts. + addr := chunkBase(ci) + uintptr(j)*pageSize + + // Since we actually searched the chunk, we may have + // found an even narrower free window. + searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize + foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr) + return addr, p.findMappedAddr(firstFree.base) +} + +// alloc allocates npages worth of memory from the page heap, returning the base +// address for the allocation and the amount of scavenged memory in bytes +// contained in the region [base address, base address + npages*pageSize). +// +// Returns a 0 base address on failure, in which case other returned values +// should be ignored. +// +// p.mheapLock must be held. +// +// Must run on the system stack because p.mheapLock must be held. +// +//go:systemstack +func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { + assertLockHeld(p.mheapLock) + + // If the searchAddr refers to a region which has a higher address than + // any known chunk, then we know we're out of memory. + if chunkIndex(p.searchAddr.addr()) >= p.end { + return 0, 0 + } + + // If npages has a chance of fitting in the chunk where the searchAddr is, + // search it directly. + searchAddr := minOffAddr + if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) { + // npages is guaranteed to be no greater than pallocChunkPages here. + i := chunkIndex(p.searchAddr.addr()) + if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) { + j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr())) + if j == ^uint(0) { + print("runtime: max = ", max, ", npages = ", npages, "\n") + print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n") + throw("bad summary data") + } + addr = chunkBase(i) + uintptr(j)*pageSize + searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize} + goto Found + } + } + // We failed to use a searchAddr for one reason or another, so try + // the slow path. + addr, searchAddr = p.find(npages) + if addr == 0 { + if npages == 1 { + // We failed to find a single free page, the smallest unit + // of allocation. This means we know the heap is completely + // exhausted. Otherwise, the heap still might have free + // space in it, just not enough contiguous space to + // accommodate npages. + p.searchAddr = maxSearchAddr() + } + return 0, 0 + } +Found: + // Go ahead and actually mark the bits now that we have an address. + scav = p.allocRange(addr, npages) + + // If we found a higher searchAddr, we know that all the + // heap memory before that searchAddr in an offset address space is + // allocated, so bump p.searchAddr up to the new one. + if p.searchAddr.lessThan(searchAddr) { + p.searchAddr = searchAddr + } + return addr, scav +} + +// free returns npages worth of memory starting at base back to the page heap. +// +// p.mheapLock must be held. +// +// Must run on the system stack because p.mheapLock must be held. +// +//go:systemstack +func (p *pageAlloc) free(base, npages uintptr) { + assertLockHeld(p.mheapLock) + + // If we're freeing pages below the p.searchAddr, update searchAddr. + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b + } + limit := base + npages*pageSize - 1 + if npages == 1 { + // Fast path: we're clearing a single bit, and we know exactly + // where it is, so mark it directly. + i := chunkIndex(base) + pi := chunkPageIndex(base) + p.chunkOf(i).free1(pi) + p.scav.index.free(i, pi, 1) + } else { + // Slow path: we're clearing more bits so we may need to iterate. + sc, ec := chunkIndex(base), chunkIndex(limit) + si, ei := chunkPageIndex(base), chunkPageIndex(limit) + + if sc == ec { + // The range doesn't cross any chunk boundaries. + p.chunkOf(sc).free(si, ei+1-si) + p.scav.index.free(sc, si, ei+1-si) + } else { + // The range crosses at least one chunk boundary. + p.chunkOf(sc).free(si, pallocChunkPages-si) + p.scav.index.free(sc, si, pallocChunkPages-si) + for c := sc + 1; c < ec; c++ { + p.chunkOf(c).freeAll() + p.scav.index.free(c, 0, pallocChunkPages) + } + p.chunkOf(ec).free(0, ei+1) + p.scav.index.free(ec, 0, ei+1) + } + } + p.update(base, npages, true, false) +} + +const ( + pallocSumBytes = unsafe.Sizeof(pallocSum(0)) + + // maxPackedValue is the maximum value that any of the three fields in + // the pallocSum may take on. + maxPackedValue = 1 << logMaxPackedValue + logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits + + freeChunkSum = pallocSum(uint64(pallocChunkPages) | + uint64(pallocChunkPages<> logMaxPackedValue) & (maxPackedValue - 1)) +} + +// end extracts the end value from a packed sum. +func (p pallocSum) end() uint { + if uint64(p)&uint64(1<<63) != 0 { + return maxPackedValue + } + return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) +} + +// unpack unpacks all three values from the summary. +func (p pallocSum) unpack() (uint, uint, uint) { + if uint64(p)&uint64(1<<63) != 0 { + return maxPackedValue, maxPackedValue, maxPackedValue + } + return uint(uint64(p) & (maxPackedValue - 1)), + uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)), + uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) +} + +// mergeSummaries merges consecutive summaries which may each represent at +// most 1 << logMaxPagesPerSum pages each together into one. +func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum { + // Merge the summaries in sums into one. + // + // We do this by keeping a running summary representing the merged + // summaries of sums[:i] in start, most, and end. + start, most, end := sums[0].unpack() + for i := 1; i < len(sums); i++ { + // Merge in sums[i]. + si, mi, ei := sums[i].unpack() + + // Merge in sums[i].start only if the running summary is + // completely free, otherwise this summary's start + // plays no role in the combined sum. + if start == uint(i)<= 0; l-- { + // Figure out what part of the summary array this new address space needs. + // Note that we need to align the ranges to the block width (1< len(p.summary[l]) { + p.summary[l] = p.summary[l][:hi] + } + } +} + +// sysInit initializes the scavengeIndex' chunks array. +// +// Returns the amount of memory added to sysStat. +func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uintptr) { + if test { + // Set up the scavenge index via sysAlloc so the test can free it later. + scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{}) + s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:] + mappedReady = scavIndexSize + } else { + // Set up the scavenge index. + s.chunks = scavengeIndexArray[:] + } + s.min.Store(1) // The 0th chunk is never going to be mapped for the heap. + s.max.Store(uintptr(len(s.chunks))) + return +} + +// sysGrow is a no-op on 32-bit platforms. +func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr { + return 0 +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mpagealloc_64bit.go b/platform/dbops/binaries/go/go/src/runtime/mpagealloc_64bit.go new file mode 100644 index 0000000000000000000000000000000000000000..36cd222360c3bad880f97c2dc18913c4c2efb1d2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mpagealloc_64bit.go @@ -0,0 +1,255 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x + +package runtime + +import ( + "unsafe" +) + +const ( + // The number of levels in the radix tree. + summaryLevels = 5 + + // Constants for testing. + pageAlloc32Bit = 0 + pageAlloc64Bit = 1 + + // Number of bits needed to represent all indices into the L1 of the + // chunks map. + // + // See (*pageAlloc).chunks for more details. Update the documentation + // there should this number change. + pallocChunksL1Bits = 13 +) + +// levelBits is the number of bits in the radix for a given level in the super summary +// structure. +// +// The sum of all the entries of levelBits should equal heapAddrBits. +var levelBits = [summaryLevels]uint{ + summaryL0Bits, + summaryLevelBits, + summaryLevelBits, + summaryLevelBits, + summaryLevelBits, +} + +// levelShift is the number of bits to shift to acquire the radix for a given level +// in the super summary structure. +// +// With levelShift, one can compute the index of the summary at level l related to a +// pointer p by doing: +// +// p >> levelShift[l] +var levelShift = [summaryLevels]uint{ + heapAddrBits - summaryL0Bits, + heapAddrBits - summaryL0Bits - 1*summaryLevelBits, + heapAddrBits - summaryL0Bits - 2*summaryLevelBits, + heapAddrBits - summaryL0Bits - 3*summaryLevelBits, + heapAddrBits - summaryL0Bits - 4*summaryLevelBits, +} + +// levelLogPages is log2 the maximum number of runtime pages in the address space +// a summary in the given level represents. +// +// The leaf level always represents exactly log2 of 1 chunk's worth of pages. +var levelLogPages = [summaryLevels]uint{ + logPallocChunkPages + 4*summaryLevelBits, + logPallocChunkPages + 3*summaryLevelBits, + logPallocChunkPages + 2*summaryLevelBits, + logPallocChunkPages + 1*summaryLevelBits, + logPallocChunkPages, +} + +// sysInit performs architecture-dependent initialization of fields +// in pageAlloc. pageAlloc should be uninitialized except for sysStat +// if any runtime statistic should be updated. +func (p *pageAlloc) sysInit(test bool) { + // Reserve memory for each level. This will get mapped in + // as R/W by setArenas. + for l, shift := range levelShift { + entries := 1 << (heapAddrBits - shift) + + // Reserve b bytes of memory anywhere in the address space. + b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize) + r := sysReserve(nil, b) + if r == nil { + throw("failed to reserve page summary memory") + } + + // Put this reservation into a slice. + sl := notInHeapSlice{(*notInHeap)(r), 0, entries} + p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl)) + } +} + +// sysGrow performs architecture-dependent operations on heap +// growth for the page allocator, such as mapping in new memory +// for summaries. It also updates the length of the slices in +// p.summary. +// +// base is the base of the newly-added heap memory and limit is +// the first address past the end of the newly-added heap memory. +// Both must be aligned to pallocChunkBytes. +// +// The caller must update p.start and p.end after calling sysGrow. +func (p *pageAlloc) sysGrow(base, limit uintptr) { + if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 { + print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n") + throw("sysGrow bounds not aligned to pallocChunkBytes") + } + + // addrRangeToSummaryRange converts a range of addresses into a range + // of summary indices which must be mapped to support those addresses + // in the summary range. + addrRangeToSummaryRange := func(level int, r addrRange) (int, int) { + sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr()) + return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit) + } + + // summaryRangeToSumAddrRange converts a range of indices in any + // level of p.summary into page-aligned addresses which cover that + // range of indices. + summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange { + baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize) + limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize) + base := unsafe.Pointer(&p.summary[level][0]) + return addrRange{ + offAddr{uintptr(add(base, baseOffset))}, + offAddr{uintptr(add(base, limitOffset))}, + } + } + + // addrRangeToSumAddrRange is a convenience function that converts + // an address range r to the address range of the given summary level + // that stores the summaries for r. + addrRangeToSumAddrRange := func(level int, r addrRange) addrRange { + sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r) + return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit) + } + + // Find the first inUse index which is strictly greater than base. + // + // Because this function will never be asked remap the same memory + // twice, this index is effectively the index at which we would insert + // this new growth, and base will never overlap/be contained within + // any existing range. + // + // This will be used to look at what memory in the summary array is already + // mapped before and after this new range. + inUseIndex := p.inUse.findSucc(base) + + // Walk up the radix tree and map summaries in as needed. + for l := range p.summary { + // Figure out what part of the summary array this new address space needs. + needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit)) + + // Update the summary slices with a new upper-bound. This ensures + // we get tight bounds checks on at least the top bound. + // + // We must do this regardless of whether we map new memory. + if needIdxLimit > len(p.summary[l]) { + p.summary[l] = p.summary[l][:needIdxLimit] + } + + // Compute the needed address range in the summary array for level l. + need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit) + + // Prune need down to what needs to be newly mapped. Some parts of it may + // already be mapped by what inUse describes due to page alignment requirements + // for mapping. Because this function will never be asked to remap the same + // memory twice, it should never be possible to prune in such a way that causes + // need to be split. + if inUseIndex > 0 { + need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1])) + } + if inUseIndex < len(p.inUse.ranges) { + need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex])) + } + // It's possible that after our pruning above, there's nothing new to map. + if need.size() == 0 { + continue + } + + // Map and commit need. + sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat) + sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) + p.summaryMappedReady += need.size() + } + + // Update the scavenge index. + p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat) +} + +// sysGrow increases the index's backing store in response to a heap growth. +// +// Returns the amount of memory added to sysStat. +func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr { + if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 { + print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n") + throw("sysGrow bounds not aligned to pallocChunkBytes") + } + scSize := unsafe.Sizeof(atomicScavChunkData{}) + // Map and commit the pieces of chunks that we need. + // + // We always map the full range of the minimum heap address to the + // maximum heap address. We don't do this for the summary structure + // because it's quite large and a discontiguous heap could cause a + // lot of memory to be used. In this situation, the worst case overhead + // is in the single-digit MiB if we map the whole thing. + // + // The base address of the backing store is always page-aligned, + // because it comes from the OS, so it's sufficient to align the + // index. + haveMin := s.min.Load() + haveMax := s.max.Load() + needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize) + needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize) + + // We need a contiguous range, so extend the range if there's no overlap. + if needMax < haveMin { + needMax = haveMin + } + if haveMax != 0 && needMin > haveMax { + needMin = haveMax + } + + // Avoid a panic from indexing one past the last element. + chunksBase := uintptr(unsafe.Pointer(&s.chunks[0])) + have := makeAddrRange(chunksBase+haveMin*scSize, chunksBase+haveMax*scSize) + need := makeAddrRange(chunksBase+needMin*scSize, chunksBase+needMax*scSize) + + // Subtract any overlap from rounding. We can't re-map memory because + // it'll be zeroed. + need = need.subtract(have) + + // If we've got something to map, map it, and update the slice bounds. + if need.size() != 0 { + sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat) + sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) + // Update the indices only after the new memory is valid. + if haveMax == 0 || needMin < haveMin { + s.min.Store(needMin) + } + if needMax > haveMax { + s.max.Store(needMax) + } + } + return need.size() +} + +// sysInit initializes the scavengeIndex' chunks array. +// +// Returns the amount of memory added to sysStat. +func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr { + n := uintptr(1<= PallocChunkPages { + wantDesc[BaseChunkIdx+i] = []BitRange{{0, PallocChunkPages}} + allocPages -= PallocChunkPages + } else if allocPages > 0 { + wantDesc[BaseChunkIdx+i] = []BitRange{{0, uint(allocPages)}} + allocPages = 0 + } else { + wantDesc[BaseChunkIdx+i] = []BitRange{} + } + } + want := NewPageAlloc(wantDesc, nil) + defer FreePageAlloc(want) + + // Check to make sure the heap b matches what we want. + checkPageAlloc(t, want, b) + }) + } +} + +func TestPageAllocFree(t *testing.T) { + if GOOS == "openbsd" && testing.Short() { + t.Skip("skipping because virtual memory is limited; see #36210") + } + tests := map[string]struct { + before map[ChunkIdx][]BitRange + after map[ChunkIdx][]BitRange + npages uintptr + frees []uintptr + }{ + "Free1": { + npages: 1, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + PageBase(BaseChunkIdx, 1), + PageBase(BaseChunkIdx, 2), + PageBase(BaseChunkIdx, 3), + PageBase(BaseChunkIdx, 4), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{5, PallocChunkPages - 5}}, + }, + }, + "ManyArena1": { + npages: 1, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, PallocChunkPages/2), + PageBase(BaseChunkIdx+1, 0), + PageBase(BaseChunkIdx+2, PallocChunkPages-1), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages / 2}, {PallocChunkPages/2 + 1, PallocChunkPages/2 - 1}}, + BaseChunkIdx + 1: {{1, PallocChunkPages - 1}}, + BaseChunkIdx + 2: {{0, PallocChunkPages - 1}}, + }, + }, + "Free2": { + npages: 2, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + PageBase(BaseChunkIdx, 2), + PageBase(BaseChunkIdx, 4), + PageBase(BaseChunkIdx, 6), + PageBase(BaseChunkIdx, 8), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{10, PallocChunkPages - 10}}, + }, + }, + "Straddle2": { + npages: 2, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{PallocChunkPages - 1, 1}}, + BaseChunkIdx + 1: {{0, 1}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, PallocChunkPages-1), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + }, + }, + "Free5": { + npages: 5, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + PageBase(BaseChunkIdx, 5), + PageBase(BaseChunkIdx, 10), + PageBase(BaseChunkIdx, 15), + PageBase(BaseChunkIdx, 20), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{25, PallocChunkPages - 25}}, + }, + }, + "Free64": { + npages: 64, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + PageBase(BaseChunkIdx, 64), + PageBase(BaseChunkIdx, 128), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{192, PallocChunkPages - 192}}, + }, + }, + "Free65": { + npages: 65, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + PageBase(BaseChunkIdx, 65), + PageBase(BaseChunkIdx, 130), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{195, PallocChunkPages - 195}}, + }, + }, + "FreePallocChunkPages": { + npages: PallocChunkPages, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + }, + }, + "StraddlePallocChunkPages": { + npages: PallocChunkPages, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{PallocChunkPages / 2, PallocChunkPages / 2}}, + BaseChunkIdx + 1: {{0, PallocChunkPages / 2}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, PallocChunkPages/2), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + }, + }, + "StraddlePallocChunkPages+1": { + npages: PallocChunkPages + 1, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, PallocChunkPages/2), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages / 2}}, + BaseChunkIdx + 1: {{PallocChunkPages/2 + 1, PallocChunkPages/2 - 1}}, + }, + }, + "FreePallocChunkPages*2": { + npages: PallocChunkPages * 2, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + }, + }, + "StraddlePallocChunkPages*2": { + npages: PallocChunkPages * 2, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, PallocChunkPages/2), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages / 2}}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {{PallocChunkPages / 2, PallocChunkPages / 2}}, + }, + }, + "AllFreePallocChunkPages*7+5": { + npages: PallocChunkPages*7 + 5, + before: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {{0, PallocChunkPages}}, + BaseChunkIdx + 1: {{0, PallocChunkPages}}, + BaseChunkIdx + 2: {{0, PallocChunkPages}}, + BaseChunkIdx + 3: {{0, PallocChunkPages}}, + BaseChunkIdx + 4: {{0, PallocChunkPages}}, + BaseChunkIdx + 5: {{0, PallocChunkPages}}, + BaseChunkIdx + 6: {{0, PallocChunkPages}}, + BaseChunkIdx + 7: {{0, PallocChunkPages}}, + }, + frees: []uintptr{ + PageBase(BaseChunkIdx, 0), + }, + after: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {}, + BaseChunkIdx + 3: {}, + BaseChunkIdx + 4: {}, + BaseChunkIdx + 5: {}, + BaseChunkIdx + 6: {}, + BaseChunkIdx + 7: {{5, PallocChunkPages - 5}}, + }, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := NewPageAlloc(v.before, nil) + defer FreePageAlloc(b) + + for _, addr := range v.frees { + b.Free(addr, v.npages) + } + want := NewPageAlloc(v.after, nil) + defer FreePageAlloc(want) + + checkPageAlloc(t, want, b) + }) + } +} + +func TestPageAllocAllocAndFree(t *testing.T) { + if GOOS == "openbsd" && testing.Short() { + t.Skip("skipping because virtual memory is limited; see #36210") + } + type hit struct { + alloc bool + npages uintptr + base uintptr + } + tests := map[string]struct { + init map[ChunkIdx][]BitRange + hits []hit + }{ + // TODO(mknyszek): Write more tests here. + "Chunks8": { + init: map[ChunkIdx][]BitRange{ + BaseChunkIdx: {}, + BaseChunkIdx + 1: {}, + BaseChunkIdx + 2: {}, + BaseChunkIdx + 3: {}, + BaseChunkIdx + 4: {}, + BaseChunkIdx + 5: {}, + BaseChunkIdx + 6: {}, + BaseChunkIdx + 7: {}, + }, + hits: []hit{ + {true, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {false, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {true, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {false, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {true, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {false, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + {true, 1, PageBase(BaseChunkIdx, 0)}, + {false, 1, PageBase(BaseChunkIdx, 0)}, + {true, PallocChunkPages * 8, PageBase(BaseChunkIdx, 0)}, + }, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := NewPageAlloc(v.init, nil) + defer FreePageAlloc(b) + + for iter, i := range v.hits { + if i.alloc { + if a, _ := b.Alloc(i.npages); a != i.base { + t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", iter+1, i.base, a) + } + } else { + b.Free(i.base, i.npages) + } + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mpagecache.go b/platform/dbops/binaries/go/go/src/runtime/mpagecache.go new file mode 100644 index 0000000000000000000000000000000000000000..245b0cbfef2cf40165338cd71377c1dc24345b8d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mpagecache.go @@ -0,0 +1,183 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache) + +// pageCache represents a per-p cache of pages the allocator can +// allocate from without a lock. More specifically, it represents +// a pageCachePages*pageSize chunk of memory with 0 or more free +// pages in it. +type pageCache struct { + base uintptr // base address of the chunk + cache uint64 // 64-bit bitmap representing free pages (1 means free) + scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged) +} + +// empty reports whether the page cache has no free pages. +func (c *pageCache) empty() bool { + return c.cache == 0 +} + +// alloc allocates npages from the page cache and is the main entry +// point for allocation. +// +// Returns a base address and the amount of scavenged memory in the +// allocated region in bytes. +// +// Returns a base address of zero on failure, in which case the +// amount of scavenged memory should be ignored. +func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) { + if c.cache == 0 { + return 0, 0 + } + if npages == 1 { + i := uintptr(sys.TrailingZeros64(c.cache)) + scav := (c.scav >> i) & 1 + c.cache &^= 1 << i // set bit to mark in-use + c.scav &^= 1 << i // clear bit to mark unscavenged + return c.base + i*pageSize, uintptr(scav) * pageSize + } + return c.allocN(npages) +} + +// allocN is a helper which attempts to allocate npages worth of pages +// from the cache. It represents the general case for allocating from +// the page cache. +// +// Returns a base address and the amount of scavenged memory in the +// allocated region in bytes. +func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) { + i := findBitRange64(c.cache, uint(npages)) + if i >= 64 { + return 0, 0 + } + mask := ((uint64(1) << npages) - 1) << i + scav := sys.OnesCount64(c.scav & mask) + c.cache &^= mask // mark in-use bits + c.scav &^= mask // clear scavenged bits + return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize +} + +// flush empties out unallocated free pages in the given cache +// into s. Then, it clears the cache, such that empty returns +// true. +// +// p.mheapLock must be held. +// +// Must run on the system stack because p.mheapLock must be held. +// +//go:systemstack +func (c *pageCache) flush(p *pageAlloc) { + assertLockHeld(p.mheapLock) + + if c.empty() { + return + } + ci := chunkIndex(c.base) + pi := chunkPageIndex(c.base) + + // This method is called very infrequently, so just do the + // slower, safer thing by iterating over each bit individually. + for i := uint(0); i < 64; i++ { + if c.cache&(1<= p.end { + return pageCache{} + } + c := pageCache{} + ci := chunkIndex(p.searchAddr.addr()) // chunk index + var chunk *pallocData + if p.summary[len(p.summary)-1][ci] != 0 { + // Fast path: there's free pages at or near the searchAddr address. + chunk = p.chunkOf(ci) + j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr())) + if j == ^uint(0) { + throw("bad summary data") + } + c = pageCache{ + base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize, + cache: ^chunk.pages64(j), + scav: chunk.scavenged.block64(j), + } + } else { + // Slow path: the searchAddr address had nothing there, so go find + // the first free page the slow way. + addr, _ := p.find(1) + if addr == 0 { + // We failed to find adequate free space, so mark the searchAddr as OoM + // and return an empty pageCache. + p.searchAddr = maxSearchAddr() + return pageCache{} + } + ci = chunkIndex(addr) + chunk = p.chunkOf(ci) + c = pageCache{ + base: alignDown(addr, 64*pageSize), + cache: ^chunk.pages64(chunkPageIndex(addr)), + scav: chunk.scavenged.block64(chunkPageIndex(addr)), + } + } + + // Set the page bits as allocated and clear the scavenged bits, but + // be careful to only set and clear the relevant bits. + cpi := chunkPageIndex(c.base) + chunk.allocPages64(cpi, c.cache) + chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */) + + // Update as an allocation, but note that it's not contiguous. + p.update(c.base, pageCachePages, false, true) + + // Update density statistics. + p.scav.index.alloc(ci, uint(sys.OnesCount64(c.cache))) + + // Set the search address to the last page represented by the cache. + // Since all of the pages in this block are going to the cache, and we + // searched for the first free page, we can confidently start at the + // next page. + // + // However, p.searchAddr is not allowed to point into unmapped heap memory + // unless it is maxSearchAddr, so make it the last page as opposed to + // the page after. + p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)} + return c +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mpagecache_test.go b/platform/dbops/binaries/go/go/src/runtime/mpagecache_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6cb0620f7b883c7c27eeec9cedf73d6027dc8c7b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mpagecache_test.go @@ -0,0 +1,424 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/goos" + "math/rand" + . "runtime" + "testing" +) + +func checkPageCache(t *testing.T, got, want PageCache) { + if got.Base() != want.Base() { + t.Errorf("bad pageCache base: got 0x%x, want 0x%x", got.Base(), want.Base()) + } + if got.Cache() != want.Cache() { + t.Errorf("bad pageCache bits: got %016x, want %016x", got.Base(), want.Base()) + } + if got.Scav() != want.Scav() { + t.Errorf("bad pageCache scav: got %016x, want %016x", got.Scav(), want.Scav()) + } +} + +func TestPageCacheAlloc(t *testing.T) { + base := PageBase(BaseChunkIdx, 0) + type hit struct { + npages uintptr + base uintptr + scav uintptr + } + tests := map[string]struct { + cache PageCache + hits []hit + }{ + "Empty": { + cache: NewPageCache(base, 0, 0), + hits: []hit{ + {1, 0, 0}, + {2, 0, 0}, + {3, 0, 0}, + {4, 0, 0}, + {5, 0, 0}, + {11, 0, 0}, + {12, 0, 0}, + {16, 0, 0}, + {27, 0, 0}, + {32, 0, 0}, + {43, 0, 0}, + {57, 0, 0}, + {64, 0, 0}, + {121, 0, 0}, + }, + }, + "Lo1": { + cache: NewPageCache(base, 0x1, 0x1), + hits: []hit{ + {1, base, PageSize}, + {1, 0, 0}, + {10, 0, 0}, + }, + }, + "Hi1": { + cache: NewPageCache(base, 0x1<<63, 0x1), + hits: []hit{ + {1, base + 63*PageSize, 0}, + {1, 0, 0}, + {10, 0, 0}, + }, + }, + "Swiss1": { + cache: NewPageCache(base, 0x20005555, 0x5505), + hits: []hit{ + {2, 0, 0}, + {1, base, PageSize}, + {1, base + 2*PageSize, PageSize}, + {1, base + 4*PageSize, 0}, + {1, base + 6*PageSize, 0}, + {1, base + 8*PageSize, PageSize}, + {1, base + 10*PageSize, PageSize}, + {1, base + 12*PageSize, PageSize}, + {1, base + 14*PageSize, PageSize}, + {1, base + 29*PageSize, 0}, + {1, 0, 0}, + {10, 0, 0}, + }, + }, + "Lo2": { + cache: NewPageCache(base, 0x3, 0x2<<62), + hits: []hit{ + {2, base, 0}, + {2, 0, 0}, + {1, 0, 0}, + }, + }, + "Hi2": { + cache: NewPageCache(base, 0x3<<62, 0x3<<62), + hits: []hit{ + {2, base + 62*PageSize, 2 * PageSize}, + {2, 0, 0}, + {1, 0, 0}, + }, + }, + "Swiss2": { + cache: NewPageCache(base, 0x3333<<31, 0x3030<<31), + hits: []hit{ + {2, base + 31*PageSize, 0}, + {2, base + 35*PageSize, 2 * PageSize}, + {2, base + 39*PageSize, 0}, + {2, base + 43*PageSize, 2 * PageSize}, + {2, 0, 0}, + }, + }, + "Hi53": { + cache: NewPageCache(base, ((uint64(1)<<53)-1)<<10, ((uint64(1)<<16)-1)<<10), + hits: []hit{ + {53, base + 10*PageSize, 16 * PageSize}, + {53, 0, 0}, + {1, 0, 0}, + }, + }, + "Full53": { + cache: NewPageCache(base, ^uint64(0), ((uint64(1)<<16)-1)<<10), + hits: []hit{ + {53, base, 16 * PageSize}, + {53, 0, 0}, + {1, base + 53*PageSize, 0}, + }, + }, + "Full64": { + cache: NewPageCache(base, ^uint64(0), ^uint64(0)), + hits: []hit{ + {64, base, 64 * PageSize}, + {64, 0, 0}, + {1, 0, 0}, + }, + }, + "FullMixed": { + cache: NewPageCache(base, ^uint64(0), ^uint64(0)), + hits: []hit{ + {5, base, 5 * PageSize}, + {7, base + 5*PageSize, 7 * PageSize}, + {1, base + 12*PageSize, 1 * PageSize}, + {23, base + 13*PageSize, 23 * PageSize}, + {63, 0, 0}, + {3, base + 36*PageSize, 3 * PageSize}, + {3, base + 39*PageSize, 3 * PageSize}, + {3, base + 42*PageSize, 3 * PageSize}, + {12, base + 45*PageSize, 12 * PageSize}, + {11, 0, 0}, + {4, base + 57*PageSize, 4 * PageSize}, + {4, 0, 0}, + {6, 0, 0}, + {36, 0, 0}, + {2, base + 61*PageSize, 2 * PageSize}, + {3, 0, 0}, + {1, base + 63*PageSize, 1 * PageSize}, + {4, 0, 0}, + {2, 0, 0}, + {62, 0, 0}, + {1, 0, 0}, + }, + }, + } + for name, test := range tests { + test := test + t.Run(name, func(t *testing.T) { + c := test.cache + for i, h := range test.hits { + b, s := c.Alloc(h.npages) + if b != h.base { + t.Fatalf("bad alloc base #%d: got 0x%x, want 0x%x", i, b, h.base) + } + if s != h.scav { + t.Fatalf("bad alloc scav #%d: got %d, want %d", i, s, h.scav) + } + } + }) + } +} + +func TestPageCacheFlush(t *testing.T) { + if GOOS == "openbsd" && testing.Short() { + t.Skip("skipping because virtual memory is limited; see #36210") + } + bits64ToBitRanges := func(bits uint64, base uint) []BitRange { + var ranges []BitRange + start, size := uint(0), uint(0) + for i := 0; i < 64; i++ { + if bits&(1<> (i % 64)) & 1) +} + +// block64 returns the 64-bit aligned block of bits containing the i'th bit. +func (b *pageBits) block64(i uint) uint64 { + return b[i/64] +} + +// set sets bit i of pageBits. +func (b *pageBits) set(i uint) { + b[i/64] |= 1 << (i % 64) +} + +// setRange sets bits in the range [i, i+n). +func (b *pageBits) setRange(i, n uint) { + _ = b[i/64] + if n == 1 { + // Fast path for the n == 1 case. + b.set(i) + return + } + // Set bits [i, j]. + j := i + n - 1 + if i/64 == j/64 { + b[i/64] |= ((uint64(1) << n) - 1) << (i % 64) + return + } + _ = b[j/64] + // Set leading bits. + b[i/64] |= ^uint64(0) << (i % 64) + for k := i/64 + 1; k < j/64; k++ { + b[k] = ^uint64(0) + } + // Set trailing bits. + b[j/64] |= (uint64(1) << (j%64 + 1)) - 1 +} + +// setAll sets all the bits of b. +func (b *pageBits) setAll() { + for i := range b { + b[i] = ^uint64(0) + } +} + +// setBlock64 sets the 64-bit aligned block of bits containing the i'th bit that +// are set in v. +func (b *pageBits) setBlock64(i uint, v uint64) { + b[i/64] |= v +} + +// clear clears bit i of pageBits. +func (b *pageBits) clear(i uint) { + b[i/64] &^= 1 << (i % 64) +} + +// clearRange clears bits in the range [i, i+n). +func (b *pageBits) clearRange(i, n uint) { + _ = b[i/64] + if n == 1 { + // Fast path for the n == 1 case. + b.clear(i) + return + } + // Clear bits [i, j]. + j := i + n - 1 + if i/64 == j/64 { + b[i/64] &^= ((uint64(1) << n) - 1) << (i % 64) + return + } + _ = b[j/64] + // Clear leading bits. + b[i/64] &^= ^uint64(0) << (i % 64) + for k := i/64 + 1; k < j/64; k++ { + b[k] = 0 + } + // Clear trailing bits. + b[j/64] &^= (uint64(1) << (j%64 + 1)) - 1 +} + +// clearAll frees all the bits of b. +func (b *pageBits) clearAll() { + for i := range b { + b[i] = 0 + } +} + +// clearBlock64 clears the 64-bit aligned block of bits containing the i'th bit that +// are set in v. +func (b *pageBits) clearBlock64(i uint, v uint64) { + b[i/64] &^= v +} + +// popcntRange counts the number of set bits in the +// range [i, i+n). +func (b *pageBits) popcntRange(i, n uint) (s uint) { + if n == 1 { + return uint((b[i/64] >> (i % 64)) & 1) + } + _ = b[i/64] + j := i + n - 1 + if i/64 == j/64 { + return uint(sys.OnesCount64((b[i/64] >> (i % 64)) & ((1 << n) - 1))) + } + _ = b[j/64] + s += uint(sys.OnesCount64(b[i/64] >> (i % 64))) + for k := i/64 + 1; k < j/64; k++ { + s += uint(sys.OnesCount64(b[k])) + } + s += uint(sys.OnesCount64(b[j/64] & ((1 << (j%64 + 1)) - 1))) + return +} + +// pallocBits is a bitmap that tracks page allocations for at most one +// palloc chunk. +// +// The precise representation is an implementation detail, but for the +// sake of documentation, 0s are free pages and 1s are allocated pages. +type pallocBits pageBits + +// summarize returns a packed summary of the bitmap in pallocBits. +func (b *pallocBits) summarize() pallocSum { + var start, most, cur uint + const notSetYet = ^uint(0) // sentinel for start value + start = notSetYet + for i := 0; i < len(b); i++ { + x := b[i] + if x == 0 { + cur += 64 + continue + } + t := uint(sys.TrailingZeros64(x)) + l := uint(sys.LeadingZeros64(x)) + + // Finish any region spanning the uint64s + cur += t + if start == notSetYet { + start = cur + } + most = max(most, cur) + // Final region that might span to next uint64 + cur = l + } + if start == notSetYet { + // Made it all the way through without finding a single 1 bit. + const n = uint(64 * len(b)) + return packPallocSum(n, n, n) + } + most = max(most, cur) + + if most >= 64-2 { + // There is no way an internal run of zeros could beat max. + return packPallocSum(start, most, cur) + } + // Now look inside each uint64 for runs of zeros. + // All uint64s must be nonzero, or we would have aborted above. +outer: + for i := 0; i < len(b); i++ { + x := b[i] + + // Look inside this uint64. We have a pattern like + // 000000 1xxxxx1 000000 + // We need to look inside the 1xxxxx1 for any contiguous + // region of zeros. + + // We already know the trailing zeros are no larger than max. Remove them. + x >>= sys.TrailingZeros64(x) & 63 + if x&(x+1) == 0 { // no more zeros (except at the top). + continue + } + + // Strategy: shrink all runs of zeros by max. If any runs of zero + // remain, then we've identified a larger maximum zero run. + p := most // number of zeros we still need to shrink by. + k := uint(1) // current minimum length of runs of ones in x. + for { + // Shrink all runs of zeros by p places (except the top zeros). + for p > 0 { + if p <= k { + // Shift p ones down into the top of each run of zeros. + x |= x >> (p & 63) + if x&(x+1) == 0 { // no more zeros (except at the top). + continue outer + } + break + } + // Shift k ones down into the top of each run of zeros. + x |= x >> (k & 63) + if x&(x+1) == 0 { // no more zeros (except at the top). + continue outer + } + p -= k + // We've just doubled the minimum length of 1-runs. + // This allows us to shift farther in the next iteration. + k *= 2 + } + + // The length of the lowest-order zero run is an increment to our maximum. + j := uint(sys.TrailingZeros64(^x)) // count contiguous trailing ones + x >>= j & 63 // remove trailing ones + j = uint(sys.TrailingZeros64(x)) // count contiguous trailing zeros + x >>= j & 63 // remove zeros + most += j // we have a new maximum! + if x&(x+1) == 0 { // no more zeros (except at the top). + continue outer + } + p = j // remove j more zeros from each zero run. + } + } + return packPallocSum(start, most, cur) +} + +// find searches for npages contiguous free pages in pallocBits and returns +// the index where that run starts, as well as the index of the first free page +// it found in the search. searchIdx represents the first known free page and +// where to begin the next search from. +// +// If find fails to find any free space, it returns an index of ^uint(0) and +// the new searchIdx should be ignored. +// +// Note that if npages == 1, the two returned values will always be identical. +func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint) { + if npages == 1 { + addr := b.find1(searchIdx) + return addr, addr + } else if npages <= 64 { + return b.findSmallN(npages, searchIdx) + } + return b.findLargeN(npages, searchIdx) +} + +// find1 is a helper for find which searches for a single free page +// in the pallocBits and returns the index. +// +// See find for an explanation of the searchIdx parameter. +func (b *pallocBits) find1(searchIdx uint) uint { + _ = b[0] // lift nil check out of loop + for i := searchIdx / 64; i < uint(len(b)); i++ { + x := b[i] + if ^x == 0 { + continue + } + return i*64 + uint(sys.TrailingZeros64(^x)) + } + return ^uint(0) +} + +// findSmallN is a helper for find which searches for npages contiguous free pages +// in this pallocBits and returns the index where that run of contiguous pages +// starts as well as the index of the first free page it finds in its search. +// +// See find for an explanation of the searchIdx parameter. +// +// Returns a ^uint(0) index on failure and the new searchIdx should be ignored. +// +// findSmallN assumes npages <= 64, where any such contiguous run of pages +// crosses at most one aligned 64-bit boundary in the bits. +func (b *pallocBits) findSmallN(npages uintptr, searchIdx uint) (uint, uint) { + end, newSearchIdx := uint(0), ^uint(0) + for i := searchIdx / 64; i < uint(len(b)); i++ { + bi := b[i] + if ^bi == 0 { + end = 0 + continue + } + // First see if we can pack our allocation in the trailing + // zeros plus the end of the last 64 bits. + if newSearchIdx == ^uint(0) { + // The new searchIdx is going to be at these 64 bits after any + // 1s we file, so count trailing 1s. + newSearchIdx = i*64 + uint(sys.TrailingZeros64(^bi)) + } + start := uint(sys.TrailingZeros64(bi)) + if end+start >= uint(npages) { + return i*64 - end, newSearchIdx + } + // Next, check the interior of the 64-bit chunk. + j := findBitRange64(^bi, uint(npages)) + if j < 64 { + return i*64 + j, newSearchIdx + } + end = uint(sys.LeadingZeros64(bi)) + } + return ^uint(0), newSearchIdx +} + +// findLargeN is a helper for find which searches for npages contiguous free pages +// in this pallocBits and returns the index where that run starts, as well as the +// index of the first free page it found it its search. +// +// See alloc for an explanation of the searchIdx parameter. +// +// Returns a ^uint(0) index on failure and the new searchIdx should be ignored. +// +// findLargeN assumes npages > 64, where any such run of free pages +// crosses at least one aligned 64-bit boundary in the bits. +func (b *pallocBits) findLargeN(npages uintptr, searchIdx uint) (uint, uint) { + start, size, newSearchIdx := ^uint(0), uint(0), ^uint(0) + for i := searchIdx / 64; i < uint(len(b)); i++ { + x := b[i] + if x == ^uint64(0) { + size = 0 + continue + } + if newSearchIdx == ^uint(0) { + // The new searchIdx is going to be at these 64 bits after any + // 1s we file, so count trailing 1s. + newSearchIdx = i*64 + uint(sys.TrailingZeros64(^x)) + } + if size == 0 { + size = uint(sys.LeadingZeros64(x)) + start = i*64 + 64 - size + continue + } + s := uint(sys.TrailingZeros64(x)) + if s+size >= uint(npages) { + size += s + return start, newSearchIdx + } + if s < 64 { + size = uint(sys.LeadingZeros64(x)) + start = i*64 + 64 - size + continue + } + size += 64 + } + if size < uint(npages) { + return ^uint(0), newSearchIdx + } + return start, newSearchIdx +} + +// allocRange allocates the range [i, i+n). +func (b *pallocBits) allocRange(i, n uint) { + (*pageBits)(b).setRange(i, n) +} + +// allocAll allocates all the bits of b. +func (b *pallocBits) allocAll() { + (*pageBits)(b).setAll() +} + +// free1 frees a single page in the pallocBits at i. +func (b *pallocBits) free1(i uint) { + (*pageBits)(b).clear(i) +} + +// free frees the range [i, i+n) of pages in the pallocBits. +func (b *pallocBits) free(i, n uint) { + (*pageBits)(b).clearRange(i, n) +} + +// freeAll frees all the bits of b. +func (b *pallocBits) freeAll() { + (*pageBits)(b).clearAll() +} + +// pages64 returns a 64-bit bitmap representing a block of 64 pages aligned +// to 64 pages. The returned block of pages is the one containing the i'th +// page in this pallocBits. Each bit represents whether the page is in-use. +func (b *pallocBits) pages64(i uint) uint64 { + return (*pageBits)(b).block64(i) +} + +// allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according +// to the bits set in alloc. The block set is the one containing the i'th page. +func (b *pallocBits) allocPages64(i uint, alloc uint64) { + (*pageBits)(b).setBlock64(i, alloc) +} + +// findBitRange64 returns the bit index of the first set of +// n consecutive 1 bits. If no consecutive set of 1 bits of +// size n may be found in c, then it returns an integer >= 64. +// n must be > 0. +func findBitRange64(c uint64, n uint) uint { + // This implementation is based on shrinking the length of + // runs of contiguous 1 bits. We remove the top n-1 1 bits + // from each run of 1s, then look for the first remaining 1 bit. + p := n - 1 // number of 1s we want to remove. + k := uint(1) // current minimum width of runs of 0 in c. + for p > 0 { + if p <= k { + // Shift p 0s down into the top of each run of 1s. + c &= c >> (p & 63) + break + } + // Shift k 0s down into the top of each run of 1s. + c &= c >> (k & 63) + if c == 0 { + return 64 + } + p -= k + // We've just doubled the minimum length of 0-runs. + // This allows us to shift farther in the next iteration. + k *= 2 + } + // Find first remaining 1. + // Since we shrunk from the top down, the first 1 is in + // its correct original position. + return uint(sys.TrailingZeros64(c)) +} + +// pallocData encapsulates pallocBits and a bitmap for +// whether or not a given page is scavenged in a single +// structure. It's effectively a pallocBits with +// additional functionality. +// +// Update the comment on (*pageAlloc).chunks should this +// structure change. +type pallocData struct { + pallocBits + scavenged pageBits +} + +// allocRange sets bits [i, i+n) in the bitmap to 1 and +// updates the scavenged bits appropriately. +func (m *pallocData) allocRange(i, n uint) { + // Clear the scavenged bits when we alloc the range. + m.pallocBits.allocRange(i, n) + m.scavenged.clearRange(i, n) +} + +// allocAll sets every bit in the bitmap to 1 and updates +// the scavenged bits appropriately. +func (m *pallocData) allocAll() { + // Clear the scavenged bits when we alloc the range. + m.pallocBits.allocAll() + m.scavenged.clearAll() +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mpallocbits_test.go b/platform/dbops/binaries/go/go/src/runtime/mpallocbits_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5095e24220e80dd19869ed840bec3c1b3c57f5bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mpallocbits_test.go @@ -0,0 +1,551 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "math/rand" + . "runtime" + "testing" +) + +// Ensures that got and want are the same, and if not, reports +// detailed diff information. +func checkPallocBits(t *testing.T, got, want *PallocBits) bool { + d := DiffPallocBits(got, want) + if len(d) != 0 { + t.Errorf("%d range(s) different", len(d)) + for _, bits := range d { + t.Logf("\t@ bit index %d", bits.I) + t.Logf("\t| got: %s", StringifyPallocBits(got, bits)) + t.Logf("\t| want: %s", StringifyPallocBits(want, bits)) + } + return false + } + return true +} + +// makePallocBits produces an initialized PallocBits by setting +// the ranges in s to 1 and the rest to zero. +func makePallocBits(s []BitRange) *PallocBits { + b := new(PallocBits) + for _, v := range s { + b.AllocRange(v.I, v.N) + } + return b +} + +// Ensures that PallocBits.AllocRange works, which is a fundamental +// method used for testing and initialization since it's used by +// makePallocBits. +func TestPallocBitsAllocRange(t *testing.T) { + test := func(t *testing.T, i, n uint, want *PallocBits) { + checkPallocBits(t, makePallocBits([]BitRange{{i, n}}), want) + } + t.Run("OneLow", func(t *testing.T) { + want := new(PallocBits) + want[0] = 0x1 + test(t, 0, 1, want) + }) + t.Run("OneHigh", func(t *testing.T) { + want := new(PallocBits) + want[PallocChunkPages/64-1] = 1 << 63 + test(t, PallocChunkPages-1, 1, want) + }) + t.Run("Inner", func(t *testing.T) { + want := new(PallocBits) + want[2] = 0x3e + test(t, 129, 5, want) + }) + t.Run("Aligned", func(t *testing.T) { + want := new(PallocBits) + want[2] = ^uint64(0) + want[3] = ^uint64(0) + test(t, 128, 128, want) + }) + t.Run("Begin", func(t *testing.T) { + want := new(PallocBits) + want[0] = ^uint64(0) + want[1] = ^uint64(0) + want[2] = ^uint64(0) + want[3] = ^uint64(0) + want[4] = ^uint64(0) + want[5] = 0x1 + test(t, 0, 321, want) + }) + t.Run("End", func(t *testing.T) { + want := new(PallocBits) + want[PallocChunkPages/64-1] = ^uint64(0) + want[PallocChunkPages/64-2] = ^uint64(0) + want[PallocChunkPages/64-3] = ^uint64(0) + want[PallocChunkPages/64-4] = 1 << 63 + test(t, PallocChunkPages-(64*3+1), 64*3+1, want) + }) + t.Run("All", func(t *testing.T) { + want := new(PallocBits) + for i := range want { + want[i] = ^uint64(0) + } + test(t, 0, PallocChunkPages, want) + }) +} + +// Inverts every bit in the PallocBits. +func invertPallocBits(b *PallocBits) { + for i := range b { + b[i] = ^b[i] + } +} + +// Ensures two packed summaries are identical, and reports a detailed description +// of the difference if they're not. +func checkPallocSum(t testing.TB, got, want PallocSum) { + if got.Start() != want.Start() { + t.Errorf("inconsistent start: got %d, want %d", got.Start(), want.Start()) + } + if got.Max() != want.Max() { + t.Errorf("inconsistent max: got %d, want %d", got.Max(), want.Max()) + } + if got.End() != want.End() { + t.Errorf("inconsistent end: got %d, want %d", got.End(), want.End()) + } +} + +func TestMallocBitsPopcntRange(t *testing.T) { + type test struct { + i, n uint // bit range to popcnt over. + want uint // expected popcnt result on that range. + } + tests := map[string]struct { + init []BitRange // bit ranges to set to 1 in the bitmap. + tests []test // a set of popcnt tests to run over the bitmap. + }{ + "None": { + tests: []test{ + {0, 1, 0}, + {5, 3, 0}, + {2, 11, 0}, + {PallocChunkPages/4 + 1, PallocChunkPages / 2, 0}, + {0, PallocChunkPages, 0}, + }, + }, + "All": { + init: []BitRange{{0, PallocChunkPages}}, + tests: []test{ + {0, 1, 1}, + {5, 3, 3}, + {2, 11, 11}, + {PallocChunkPages/4 + 1, PallocChunkPages / 2, PallocChunkPages / 2}, + {0, PallocChunkPages, PallocChunkPages}, + }, + }, + "Half": { + init: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}}, + tests: []test{ + {0, 1, 0}, + {5, 3, 0}, + {2, 11, 0}, + {PallocChunkPages/2 - 1, 1, 0}, + {PallocChunkPages / 2, 1, 1}, + {PallocChunkPages/2 + 10, 1, 1}, + {PallocChunkPages/2 - 1, 2, 1}, + {PallocChunkPages / 4, PallocChunkPages / 4, 0}, + {PallocChunkPages / 4, PallocChunkPages/4 + 1, 1}, + {PallocChunkPages/4 + 1, PallocChunkPages / 2, PallocChunkPages/4 + 1}, + {0, PallocChunkPages, PallocChunkPages / 2}, + }, + }, + "OddBound": { + init: []BitRange{{0, 111}}, + tests: []test{ + {0, 1, 1}, + {5, 3, 3}, + {2, 11, 11}, + {110, 2, 1}, + {99, 50, 12}, + {110, 1, 1}, + {111, 1, 0}, + {99, 1, 1}, + {120, 1, 0}, + {PallocChunkPages / 2, PallocChunkPages / 2, 0}, + {0, PallocChunkPages, 111}, + }, + }, + "Scattered": { + init: []BitRange{ + {1, 3}, {5, 1}, {7, 1}, {10, 2}, {13, 1}, {15, 4}, + {21, 1}, {23, 1}, {26, 2}, {30, 5}, {36, 2}, {40, 3}, + {44, 6}, {51, 1}, {53, 2}, {58, 3}, {63, 1}, {67, 2}, + {71, 10}, {84, 1}, {89, 7}, {99, 2}, {103, 1}, {107, 2}, + {111, 1}, {113, 1}, {115, 1}, {118, 1}, {120, 2}, {125, 5}, + }, + tests: []test{ + {0, 11, 6}, + {0, 64, 39}, + {13, 64, 40}, + {64, 64, 34}, + {0, 128, 73}, + {1, 128, 74}, + {0, PallocChunkPages, 75}, + }, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := makePallocBits(v.init) + for _, h := range v.tests { + if got := b.PopcntRange(h.i, h.n); got != h.want { + t.Errorf("bad popcnt (i=%d, n=%d): got %d, want %d", h.i, h.n, got, h.want) + } + } + }) + } +} + +// Ensures computing bit summaries works as expected by generating random +// bitmaps and checking against a reference implementation. +func TestPallocBitsSummarizeRandom(t *testing.T) { + b := new(PallocBits) + for i := 0; i < 1000; i++ { + // Randomize bitmap. + for i := range b { + b[i] = rand.Uint64() + } + // Check summary against reference implementation. + checkPallocSum(t, b.Summarize(), SummarizeSlow(b)) + } +} + +// Ensures computing bit summaries works as expected. +func TestPallocBitsSummarize(t *testing.T) { + var emptySum = PackPallocSum(PallocChunkPages, PallocChunkPages, PallocChunkPages) + type test struct { + free []BitRange // Ranges of free (zero) bits. + hits []PallocSum + } + tests := make(map[string]test) + tests["NoneFree"] = test{ + free: []BitRange{}, + hits: []PallocSum{ + PackPallocSum(0, 0, 0), + }, + } + tests["OnlyStart"] = test{ + free: []BitRange{{0, 10}}, + hits: []PallocSum{ + PackPallocSum(10, 10, 0), + }, + } + tests["OnlyEnd"] = test{ + free: []BitRange{{PallocChunkPages - 40, 40}}, + hits: []PallocSum{ + PackPallocSum(0, 40, 40), + }, + } + tests["StartAndEnd"] = test{ + free: []BitRange{{0, 11}, {PallocChunkPages - 23, 23}}, + hits: []PallocSum{ + PackPallocSum(11, 23, 23), + }, + } + tests["StartMaxEnd"] = test{ + free: []BitRange{{0, 4}, {50, 100}, {PallocChunkPages - 4, 4}}, + hits: []PallocSum{ + PackPallocSum(4, 100, 4), + }, + } + tests["OnlyMax"] = test{ + free: []BitRange{{1, 20}, {35, 241}, {PallocChunkPages - 50, 30}}, + hits: []PallocSum{ + PackPallocSum(0, 241, 0), + }, + } + tests["MultiMax"] = test{ + free: []BitRange{{35, 2}, {40, 5}, {100, 5}}, + hits: []PallocSum{ + PackPallocSum(0, 5, 0), + }, + } + tests["One"] = test{ + free: []BitRange{{2, 1}}, + hits: []PallocSum{ + PackPallocSum(0, 1, 0), + }, + } + tests["AllFree"] = test{ + free: []BitRange{{0, PallocChunkPages}}, + hits: []PallocSum{ + emptySum, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := makePallocBits(v.free) + // In the PallocBits we create 1's represent free spots, but in our actual + // PallocBits 1 means not free, so invert. + invertPallocBits(b) + for _, h := range v.hits { + checkPallocSum(t, b.Summarize(), h) + } + }) + } +} + +// Benchmarks how quickly we can summarize a PallocBits. +func BenchmarkPallocBitsSummarize(b *testing.B) { + patterns := []uint64{ + 0, + ^uint64(0), + 0xaa, + 0xaaaaaaaaaaaaaaaa, + 0x80000000aaaaaaaa, + 0xaaaaaaaa00000001, + 0xbbbbbbbbbbbbbbbb, + 0x80000000bbbbbbbb, + 0xbbbbbbbb00000001, + 0xcccccccccccccccc, + 0x4444444444444444, + 0x4040404040404040, + 0x4000400040004000, + 0x1000404044ccaaff, + } + for _, p := range patterns { + buf := new(PallocBits) + for i := 0; i < len(buf); i++ { + buf[i] = p + } + b.Run(fmt.Sprintf("Unpacked%02X", p), func(b *testing.B) { + checkPallocSum(b, buf.Summarize(), SummarizeSlow(buf)) + for i := 0; i < b.N; i++ { + buf.Summarize() + } + }) + } +} + +// Ensures page allocation works. +func TestPallocBitsAlloc(t *testing.T) { + tests := map[string]struct { + before []BitRange + after []BitRange + npages uintptr + hits []uint + }{ + "AllFree1": { + npages: 1, + hits: []uint{0, 1, 2, 3, 4, 5}, + after: []BitRange{{0, 6}}, + }, + "AllFree2": { + npages: 2, + hits: []uint{0, 2, 4, 6, 8, 10}, + after: []BitRange{{0, 12}}, + }, + "AllFree5": { + npages: 5, + hits: []uint{0, 5, 10, 15, 20}, + after: []BitRange{{0, 25}}, + }, + "AllFree64": { + npages: 64, + hits: []uint{0, 64, 128}, + after: []BitRange{{0, 192}}, + }, + "AllFree65": { + npages: 65, + hits: []uint{0, 65, 130}, + after: []BitRange{{0, 195}}, + }, + "SomeFree64": { + before: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}}, + npages: 64, + hits: []uint{^uint(0)}, + after: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}}, + }, + "NoneFree1": { + before: []BitRange{{0, PallocChunkPages}}, + npages: 1, + hits: []uint{^uint(0), ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "NoneFree2": { + before: []BitRange{{0, PallocChunkPages}}, + npages: 2, + hits: []uint{^uint(0), ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "NoneFree5": { + before: []BitRange{{0, PallocChunkPages}}, + npages: 5, + hits: []uint{^uint(0), ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "NoneFree65": { + before: []BitRange{{0, PallocChunkPages}}, + npages: 65, + hits: []uint{^uint(0), ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "ExactFit1": { + before: []BitRange{{0, PallocChunkPages/2 - 3}, {PallocChunkPages/2 - 2, PallocChunkPages/2 + 2}}, + npages: 1, + hits: []uint{PallocChunkPages/2 - 3, ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "ExactFit2": { + before: []BitRange{{0, PallocChunkPages/2 - 3}, {PallocChunkPages/2 - 1, PallocChunkPages/2 + 1}}, + npages: 2, + hits: []uint{PallocChunkPages/2 - 3, ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "ExactFit5": { + before: []BitRange{{0, PallocChunkPages/2 - 3}, {PallocChunkPages/2 + 2, PallocChunkPages/2 - 2}}, + npages: 5, + hits: []uint{PallocChunkPages/2 - 3, ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "ExactFit65": { + before: []BitRange{{0, PallocChunkPages/2 - 31}, {PallocChunkPages/2 + 34, PallocChunkPages/2 - 34}}, + npages: 65, + hits: []uint{PallocChunkPages/2 - 31, ^uint(0)}, + after: []BitRange{{0, PallocChunkPages}}, + }, + "SomeFree161": { + before: []BitRange{{0, 185}, {331, 1}}, + npages: 161, + hits: []uint{332}, + after: []BitRange{{0, 185}, {331, 162}}, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := makePallocBits(v.before) + for iter, i := range v.hits { + a, _ := b.Find(v.npages, 0) + if i != a { + t.Fatalf("find #%d picked wrong index: want %d, got %d", iter+1, i, a) + } + if i != ^uint(0) { + b.AllocRange(a, uint(v.npages)) + } + } + want := makePallocBits(v.after) + checkPallocBits(t, b, want) + }) + } +} + +// Ensures page freeing works. +func TestPallocBitsFree(t *testing.T) { + tests := map[string]struct { + beforeInv []BitRange + afterInv []BitRange + frees []uint + npages uintptr + }{ + "SomeFree": { + npages: 1, + beforeInv: []BitRange{{0, 32}, {64, 32}, {100, 1}}, + frees: []uint{32}, + afterInv: []BitRange{{0, 33}, {64, 32}, {100, 1}}, + }, + "NoneFree1": { + npages: 1, + frees: []uint{0, 1, 2, 3, 4, 5}, + afterInv: []BitRange{{0, 6}}, + }, + "NoneFree2": { + npages: 2, + frees: []uint{0, 2, 4, 6, 8, 10}, + afterInv: []BitRange{{0, 12}}, + }, + "NoneFree5": { + npages: 5, + frees: []uint{0, 5, 10, 15, 20}, + afterInv: []BitRange{{0, 25}}, + }, + "NoneFree64": { + npages: 64, + frees: []uint{0, 64, 128}, + afterInv: []BitRange{{0, 192}}, + }, + "NoneFree65": { + npages: 65, + frees: []uint{0, 65, 130}, + afterInv: []BitRange{{0, 195}}, + }, + } + for name, v := range tests { + v := v + t.Run(name, func(t *testing.T) { + b := makePallocBits(v.beforeInv) + invertPallocBits(b) + for _, i := range v.frees { + b.Free(i, uint(v.npages)) + } + want := makePallocBits(v.afterInv) + invertPallocBits(want) + checkPallocBits(t, b, want) + }) + } +} + +func TestFindBitRange64(t *testing.T) { + check := func(x uint64, n uint, result uint) { + i := FindBitRange64(x, n) + if result == ^uint(0) && i < 64 { + t.Errorf("case (%016x, %d): got %d, want failure", x, n, i) + } else if result != ^uint(0) && i != result { + t.Errorf("case (%016x, %d): got %d, want %d", x, n, i, result) + } + } + for i := uint(1); i <= 64; i++ { + check(^uint64(0), i, 0) + } + for i := uint(1); i <= 64; i++ { + check(0, i, ^uint(0)) + } + check(0x8000000000000000, 1, 63) + check(0xc000010001010000, 2, 62) + check(0xc000010001030000, 2, 16) + check(0xe000030001030000, 3, 61) + check(0xe000030001070000, 3, 16) + check(0xffff03ff01070000, 16, 48) + check(0xffff03ff0107ffff, 16, 0) + check(0x0fff03ff01079fff, 16, ^uint(0)) +} + +func BenchmarkFindBitRange64(b *testing.B) { + patterns := []uint64{ + 0, + ^uint64(0), + 0xaa, + 0xaaaaaaaaaaaaaaaa, + 0x80000000aaaaaaaa, + 0xaaaaaaaa00000001, + 0xbbbbbbbbbbbbbbbb, + 0x80000000bbbbbbbb, + 0xbbbbbbbb00000001, + 0xcccccccccccccccc, + 0x4444444444444444, + 0x4040404040404040, + 0x4000400040004000, + } + sizes := []uint{ + 2, 8, 32, + } + for _, pattern := range patterns { + for _, size := range sizes { + b.Run(fmt.Sprintf("Pattern%02XSize%d", pattern, size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + FindBitRange64(pattern, size) + } + }) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mprof.go b/platform/dbops/binaries/go/go/src/runtime/mprof.go new file mode 100644 index 0000000000000000000000000000000000000000..abdd2f3e8c9db859c5b302e8f061ffa642fe1792 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mprof.go @@ -0,0 +1,1517 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Malloc profiling. +// Patterned after tcmalloc's algorithms; shorter code. + +package runtime + +import ( + "internal/abi" + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// NOTE(rsc): Everything here could use cas if contention became an issue. +var ( + // profInsertLock protects changes to the start of all *bucket linked lists + profInsertLock mutex + // profBlockLock protects the contents of every blockRecord struct + profBlockLock mutex + // profMemActiveLock protects the active field of every memRecord struct + profMemActiveLock mutex + // profMemFutureLock is a set of locks that protect the respective elements + // of the future array of every memRecord struct + profMemFutureLock [len(memRecord{}.future)]mutex +) + +// All memory allocations are local and do not escape outside of the profiler. +// The profiler is forbidden from referring to garbage-collected memory. + +const ( + // profile types + memProfile bucketType = 1 + iota + blockProfile + mutexProfile + + // size of bucket hash table + buckHashSize = 179999 + + // maxStack is the max depth of stack to record in bucket. + // Note that it's only used internally as a guard against + // wildly out-of-bounds slicing of the PCs that come after + // a bucket struct, and it could increase in the future. + maxStack = 32 +) + +type bucketType int + +// A bucket holds per-call-stack profiling information. +// The representation is a bit sleazy, inherited from C. +// This struct defines the bucket header. It is followed in +// memory by the stack words and then the actual record +// data, either a memRecord or a blockRecord. +// +// Per-call-stack profiling information. +// Lookup by hashing call stack into a linked-list hash table. +// +// None of the fields in this bucket header are modified after +// creation, including its next and allnext links. +// +// No heap pointers. +type bucket struct { + _ sys.NotInHeap + next *bucket + allnext *bucket + typ bucketType // memBucket or blockBucket (includes mutexProfile) + hash uintptr + size uintptr + nstk uintptr +} + +// A memRecord is the bucket data for a bucket of type memProfile, +// part of the memory profile. +type memRecord struct { + // The following complex 3-stage scheme of stats accumulation + // is required to obtain a consistent picture of mallocs and frees + // for some point in time. + // The problem is that mallocs come in real time, while frees + // come only after a GC during concurrent sweeping. So if we would + // naively count them, we would get a skew toward mallocs. + // + // Hence, we delay information to get consistent snapshots as + // of mark termination. Allocations count toward the next mark + // termination's snapshot, while sweep frees count toward the + // previous mark termination's snapshot: + // + // MT MT MT MT + // .·| .·| .·| .·| + // .·˙ | .·˙ | .·˙ | .·˙ | + // .·˙ | .·˙ | .·˙ | .·˙ | + // .·˙ |.·˙ |.·˙ |.·˙ | + // + // alloc → ▲ ← free + // ┠┅┅┅┅┅┅┅┅┅┅┅P + // C+2 → C+1 → C + // + // alloc → ▲ ← free + // ┠┅┅┅┅┅┅┅┅┅┅┅P + // C+2 → C+1 → C + // + // Since we can't publish a consistent snapshot until all of + // the sweep frees are accounted for, we wait until the next + // mark termination ("MT" above) to publish the previous mark + // termination's snapshot ("P" above). To do this, allocation + // and free events are accounted to *future* heap profile + // cycles ("C+n" above) and we only publish a cycle once all + // of the events from that cycle must be done. Specifically: + // + // Mallocs are accounted to cycle C+2. + // Explicit frees are accounted to cycle C+2. + // GC frees (done during sweeping) are accounted to cycle C+1. + // + // After mark termination, we increment the global heap + // profile cycle counter and accumulate the stats from cycle C + // into the active profile. + + // active is the currently published profile. A profiling + // cycle can be accumulated into active once its complete. + active memRecordCycle + + // future records the profile events we're counting for cycles + // that have not yet been published. This is ring buffer + // indexed by the global heap profile cycle C and stores + // cycles C, C+1, and C+2. Unlike active, these counts are + // only for a single cycle; they are not cumulative across + // cycles. + // + // We store cycle C here because there's a window between when + // C becomes the active cycle and when we've flushed it to + // active. + future [3]memRecordCycle +} + +// memRecordCycle +type memRecordCycle struct { + allocs, frees uintptr + alloc_bytes, free_bytes uintptr +} + +// add accumulates b into a. It does not zero b. +func (a *memRecordCycle) add(b *memRecordCycle) { + a.allocs += b.allocs + a.frees += b.frees + a.alloc_bytes += b.alloc_bytes + a.free_bytes += b.free_bytes +} + +// A blockRecord is the bucket data for a bucket of type blockProfile, +// which is used in blocking and mutex profiles. +type blockRecord struct { + count float64 + cycles int64 +} + +var ( + mbuckets atomic.UnsafePointer // *bucket, memory profile buckets + bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets + xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets + buckhash atomic.UnsafePointer // *buckhashArray + + mProfCycle mProfCycleHolder +) + +type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket + +const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24) + +// mProfCycleHolder holds the global heap profile cycle number (wrapped at +// mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to +// indicate whether future[cycle] in all buckets has been queued to flush into +// the active profile. +type mProfCycleHolder struct { + value atomic.Uint32 +} + +// read returns the current cycle count. +func (c *mProfCycleHolder) read() (cycle uint32) { + v := c.value.Load() + cycle = v >> 1 + return cycle +} + +// setFlushed sets the flushed flag. It returns the current cycle count and the +// previous value of the flushed flag. +func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) { + for { + prev := c.value.Load() + cycle = prev >> 1 + alreadyFlushed = (prev & 0x1) != 0 + next := prev | 0x1 + if c.value.CompareAndSwap(prev, next) { + return cycle, alreadyFlushed + } + } +} + +// increment increases the cycle count by one, wrapping the value at +// mProfCycleWrap. It clears the flushed flag. +func (c *mProfCycleHolder) increment() { + // We explicitly wrap mProfCycle rather than depending on + // uint wraparound because the memRecord.future ring does not + // itself wrap at a power of two. + for { + prev := c.value.Load() + cycle := prev >> 1 + cycle = (cycle + 1) % mProfCycleWrap + next := cycle << 1 + if c.value.CompareAndSwap(prev, next) { + break + } + } +} + +// newBucket allocates a bucket with the given type and number of stack entries. +func newBucket(typ bucketType, nstk int) *bucket { + size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0)) + switch typ { + default: + throw("invalid profile bucket type") + case memProfile: + size += unsafe.Sizeof(memRecord{}) + case blockProfile, mutexProfile: + size += unsafe.Sizeof(blockRecord{}) + } + + b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys)) + b.typ = typ + b.nstk = uintptr(nstk) + return b +} + +// stk returns the slice in b holding the stack. +func (b *bucket) stk() []uintptr { + stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) + if b.nstk > maxStack { + // prove that slicing works; otherwise a failure requires a P + throw("bad profile stack count") + } + return stk[:b.nstk:b.nstk] +} + +// mp returns the memRecord associated with the memProfile bucket b. +func (b *bucket) mp() *memRecord { + if b.typ != memProfile { + throw("bad use of bucket.mp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*memRecord)(data) +} + +// bp returns the blockRecord associated with the blockProfile bucket b. +func (b *bucket) bp() *blockRecord { + if b.typ != blockProfile && b.typ != mutexProfile { + throw("bad use of bucket.bp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*blockRecord)(data) +} + +// Return the bucket for stk[0:nstk], allocating new bucket if needed. +func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { + bh := (*buckhashArray)(buckhash.Load()) + if bh == nil { + lock(&profInsertLock) + // check again under the lock + bh = (*buckhashArray)(buckhash.Load()) + if bh == nil { + bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys)) + if bh == nil { + throw("runtime: cannot allocate memory") + } + buckhash.StoreNoWB(unsafe.Pointer(bh)) + } + unlock(&profInsertLock) + } + + // Hash stack. + var h uintptr + for _, pc := range stk { + h += pc + h += h << 10 + h ^= h >> 6 + } + // hash in size + h += size + h += h << 10 + h ^= h >> 6 + // finalize + h += h << 3 + h ^= h >> 11 + + i := int(h % buckHashSize) + // first check optimistically, without the lock + for b := (*bucket)(bh[i].Load()); b != nil; b = b.next { + if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { + return b + } + } + + if !alloc { + return nil + } + + lock(&profInsertLock) + // check again under the insertion lock + for b := (*bucket)(bh[i].Load()); b != nil; b = b.next { + if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { + unlock(&profInsertLock) + return b + } + } + + // Create new bucket. + b := newBucket(typ, len(stk)) + copy(b.stk(), stk) + b.hash = h + b.size = size + + var allnext *atomic.UnsafePointer + if typ == memProfile { + allnext = &mbuckets + } else if typ == mutexProfile { + allnext = &xbuckets + } else { + allnext = &bbuckets + } + + b.next = (*bucket)(bh[i].Load()) + b.allnext = (*bucket)(allnext.Load()) + + bh[i].StoreNoWB(unsafe.Pointer(b)) + allnext.StoreNoWB(unsafe.Pointer(b)) + + unlock(&profInsertLock) + return b +} + +func eqslice(x, y []uintptr) bool { + if len(x) != len(y) { + return false + } + for i, xi := range x { + if xi != y[i] { + return false + } + } + return true +} + +// mProf_NextCycle publishes the next heap profile cycle and creates a +// fresh heap profile cycle. This operation is fast and can be done +// during STW. The caller must call mProf_Flush before calling +// mProf_NextCycle again. +// +// This is called by mark termination during STW so allocations and +// frees after the world is started again count towards a new heap +// profiling cycle. +func mProf_NextCycle() { + mProfCycle.increment() +} + +// mProf_Flush flushes the events from the current heap profiling +// cycle into the active profile. After this it is safe to start a new +// heap profiling cycle with mProf_NextCycle. +// +// This is called by GC after mark termination starts the world. In +// contrast with mProf_NextCycle, this is somewhat expensive, but safe +// to do concurrently. +func mProf_Flush() { + cycle, alreadyFlushed := mProfCycle.setFlushed() + if alreadyFlushed { + return + } + + index := cycle % uint32(len(memRecord{}.future)) + lock(&profMemActiveLock) + lock(&profMemFutureLock[index]) + mProf_FlushLocked(index) + unlock(&profMemFutureLock[index]) + unlock(&profMemActiveLock) +} + +// mProf_FlushLocked flushes the events from the heap profiling cycle at index +// into the active profile. The caller must hold the lock for the active profile +// (profMemActiveLock) and for the profiling cycle at index +// (profMemFutureLock[index]). +func mProf_FlushLocked(index uint32) { + assertLockHeld(&profMemActiveLock) + assertLockHeld(&profMemFutureLock[index]) + head := (*bucket)(mbuckets.Load()) + for b := head; b != nil; b = b.allnext { + mp := b.mp() + + // Flush cycle C into the published profile and clear + // it for reuse. + mpc := &mp.future[index] + mp.active.add(mpc) + *mpc = memRecordCycle{} + } +} + +// mProf_PostSweep records that all sweep frees for this GC cycle have +// completed. This has the effect of publishing the heap profile +// snapshot as of the last mark termination without advancing the heap +// profile cycle. +func mProf_PostSweep() { + // Flush cycle C+1 to the active profile so everything as of + // the last mark termination becomes visible. *Don't* advance + // the cycle, since we're still accumulating allocs in cycle + // C+2, which have to become C+1 in the next mark termination + // and so on. + cycle := mProfCycle.read() + 1 + + index := cycle % uint32(len(memRecord{}.future)) + lock(&profMemActiveLock) + lock(&profMemFutureLock[index]) + mProf_FlushLocked(index) + unlock(&profMemFutureLock[index]) + unlock(&profMemActiveLock) +} + +// Called by malloc to record a profiled block. +func mProf_Malloc(p unsafe.Pointer, size uintptr) { + var stk [maxStack]uintptr + nstk := callers(4, stk[:]) + + index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future)) + + b := stkbucket(memProfile, size, stk[:nstk], true) + mp := b.mp() + mpc := &mp.future[index] + + lock(&profMemFutureLock[index]) + mpc.allocs++ + mpc.alloc_bytes += size + unlock(&profMemFutureLock[index]) + + // Setprofilebucket locks a bunch of other mutexes, so we call it outside of + // the profiler locks. This reduces potential contention and chances of + // deadlocks. Since the object must be alive during the call to + // mProf_Malloc, it's fine to do this non-atomically. + systemstack(func() { + setprofilebucket(p, b) + }) +} + +// Called when freeing a profiled block. +func mProf_Free(b *bucket, size uintptr) { + index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future)) + + mp := b.mp() + mpc := &mp.future[index] + + lock(&profMemFutureLock[index]) + mpc.frees++ + mpc.free_bytes += size + unlock(&profMemFutureLock[index]) +} + +var blockprofilerate uint64 // in CPU ticks + +// SetBlockProfileRate controls the fraction of goroutine blocking events +// that are reported in the blocking profile. The profiler aims to sample +// an average of one blocking event per rate nanoseconds spent blocked. +// +// To include every blocking event in the profile, pass rate = 1. +// To turn off profiling entirely, pass rate <= 0. +func SetBlockProfileRate(rate int) { + var r int64 + if rate <= 0 { + r = 0 // disable profiling + } else if rate == 1 { + r = 1 // profile everything + } else { + // convert ns to cycles, use float64 to prevent overflow during multiplication + r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000)) + if r == 0 { + r = 1 + } + } + + atomic.Store64(&blockprofilerate, uint64(r)) +} + +func blockevent(cycles int64, skip int) { + if cycles <= 0 { + cycles = 1 + } + + rate := int64(atomic.Load64(&blockprofilerate)) + if blocksampled(cycles, rate) { + saveblockevent(cycles, rate, skip+1, blockProfile) + } +} + +// blocksampled returns true for all events where cycles >= rate. Shorter +// events have a cycles/rate random chance of returning true. +func blocksampled(cycles, rate int64) bool { + if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) { + return false + } + return true +} + +func saveblockevent(cycles, rate int64, skip int, which bucketType) { + gp := getg() + var nstk int + var stk [maxStack]uintptr + if gp.m.curg == nil || gp.m.curg == gp { + nstk = callers(skip, stk[:]) + } else { + nstk = gcallers(gp.m.curg, skip, stk[:]) + } + + saveBlockEventStack(cycles, rate, stk[:nstk], which) +} + +// lockTimer assists with profiling contention on runtime-internal locks. +// +// There are several steps between the time that an M experiences contention and +// when that contention may be added to the profile. This comes from our +// constraints: We need to keep the critical section of each lock small, +// especially when those locks are contended. The reporting code cannot acquire +// new locks until the M has released all other locks, which means no memory +// allocations and encourages use of (temporary) M-local storage. +// +// The M will have space for storing one call stack that caused contention, and +// for the magnitude of that contention. It will also have space to store the +// magnitude of additional contention the M caused, since it only has space to +// remember one call stack and might encounter several contention events before +// it releases all of its locks and is thus able to transfer the local buffer +// into the profile. +// +// The M will collect the call stack when it unlocks the contended lock. That +// minimizes the impact on the critical section of the contended lock, and +// matches the mutex profile's behavior for contention in sync.Mutex: measured +// at the Unlock method. +// +// The profile for contention on sync.Mutex blames the caller of Unlock for the +// amount of contention experienced by the callers of Lock which had to wait. +// When there are several critical sections, this allows identifying which of +// them is responsible. +// +// Matching that behavior for runtime-internal locks will require identifying +// which Ms are blocked on the mutex. The semaphore-based implementation is +// ready to allow that, but the futex-based implementation will require a bit +// more work. Until then, we report contention on runtime-internal locks with a +// call stack taken from the unlock call (like the rest of the user-space +// "mutex" profile), but assign it a duration value based on how long the +// previous lock call took (like the user-space "block" profile). +// +// Thus, reporting the call stacks of runtime-internal lock contention is +// guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable. +// +// TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment +// +// The M will track this by storing a pointer to the lock; lock/unlock pairs for +// runtime-internal locks are always on the same M. +// +// Together, that demands several steps for recording contention. First, when +// finally acquiring a contended lock, the M decides whether it should plan to +// profile that event by storing a pointer to the lock in its "to be profiled +// upon unlock" field. If that field is already set, it uses the relative +// magnitudes to weight a random choice between itself and the other lock, with +// the loser's time being added to the "additional contention" field. Otherwise +// if the M's call stack buffer is occupied, it does the comparison against that +// sample's magnitude. +// +// Second, having unlocked a mutex the M checks to see if it should capture the +// call stack into its local buffer. Finally, when the M unlocks its last mutex, +// it transfers the local buffer into the profile. As part of that step, it also +// transfers any "additional contention" time to the profile. Any lock +// contention that it experiences while adding samples to the profile will be +// recorded later as "additional contention" and not include a call stack, to +// avoid an echo. +type lockTimer struct { + lock *mutex + timeRate int64 + timeStart int64 + tickStart int64 +} + +func (lt *lockTimer) begin() { + rate := int64(atomic.Load64(&mutexprofilerate)) + + lt.timeRate = gTrackingPeriod + if rate != 0 && rate < lt.timeRate { + lt.timeRate = rate + } + if int64(cheaprand())%lt.timeRate == 0 { + lt.timeStart = nanotime() + } + + if rate > 0 && int64(cheaprand())%rate == 0 { + lt.tickStart = cputicks() + } +} + +func (lt *lockTimer) end() { + gp := getg() + + if lt.timeStart != 0 { + nowTime := nanotime() + gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate) + } + + if lt.tickStart != 0 { + nowTick := cputicks() + gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock) + } +} + +type mLockProfile struct { + waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank + stack [maxStack]uintptr // stack that experienced contention in runtime.lockWithRank + pending uintptr // *mutex that experienced contention (to be traceback-ed) + cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack" + cyclesLost int64 // contention for which we weren't able to record a call stack + disabled bool // attribute all time to "lost" +} + +func (prof *mLockProfile) recordLock(cycles int64, l *mutex) { + if cycles <= 0 { + return + } + + if prof.disabled { + // We're experiencing contention while attempting to report contention. + // Make a note of its magnitude, but don't allow it to be the sole cause + // of another contention report. + prof.cyclesLost += cycles + return + } + + if uintptr(unsafe.Pointer(l)) == prof.pending { + // Optimization: we'd already planned to profile this same lock (though + // possibly from a different unlock site). + prof.cycles += cycles + return + } + + if prev := prof.cycles; prev > 0 { + // We can only store one call stack for runtime-internal lock contention + // on this M, and we've already got one. Decide which should stay, and + // add the other to the report for runtime._LostContendedRuntimeLock. + prevScore := uint64(cheaprand64()) % uint64(prev) + thisScore := uint64(cheaprand64()) % uint64(cycles) + if prevScore > thisScore { + prof.cyclesLost += cycles + return + } else { + prof.cyclesLost += prev + } + } + // Saving the *mutex as a uintptr is safe because: + // - lockrank_on.go does this too, which gives it regular exercise + // - the lock would only move if it's stack allocated, which means it + // cannot experience multi-M contention + prof.pending = uintptr(unsafe.Pointer(l)) + prof.cycles = cycles +} + +// From unlock2, we might not be holding a p in this code. +// +//go:nowritebarrierrec +func (prof *mLockProfile) recordUnlock(l *mutex) { + if uintptr(unsafe.Pointer(l)) == prof.pending { + prof.captureStack() + } + if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.cycles != 0 { + prof.store() + } +} + +func (prof *mLockProfile) captureStack() { + skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank + if staticLockRanking { + // When static lock ranking is enabled, we'll always be on the system + // stack at this point. There will be a runtime.unlockWithRank.func1 + // frame, and if the call to runtime.unlock took place on a user stack + // then there'll also be a runtime.systemstack frame. To keep stack + // traces somewhat consistent whether or not static lock ranking is + // enabled, we'd like to skip those. But it's hard to tell how long + // we've been on the system stack so accept an extra frame in that case, + // with a leaf of "runtime.unlockWithRank runtime.unlock" instead of + // "runtime.unlock". + skip += 1 // runtime.unlockWithRank.func1 + } + prof.pending = 0 + + if debug.runtimeContentionStacks.Load() == 0 { + prof.stack[0] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum + prof.stack[1] = 0 + return + } + + var nstk int + gp := getg() + sp := getcallersp() + pc := getcallerpc() + systemstack(func() { + var u unwinder + u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack) + nstk = tracebackPCs(&u, skip, prof.stack[:]) + }) + if nstk < len(prof.stack) { + prof.stack[nstk] = 0 + } +} + +func (prof *mLockProfile) store() { + // Report any contention we experience within this function as "lost"; it's + // important that the act of reporting a contention event not lead to a + // reportable contention event. This also means we can use prof.stack + // without copying, since it won't change during this function. + mp := acquirem() + prof.disabled = true + + nstk := maxStack + for i := 0; i < nstk; i++ { + if pc := prof.stack[i]; pc == 0 { + nstk = i + break + } + } + + cycles, lost := prof.cycles, prof.cyclesLost + prof.cycles, prof.cyclesLost = 0, 0 + + rate := int64(atomic.Load64(&mutexprofilerate)) + saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile) + if lost > 0 { + lostStk := [...]uintptr{ + abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum, + } + saveBlockEventStack(lost, rate, lostStk[:], mutexProfile) + } + + prof.disabled = false + releasem(mp) +} + +func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) { + b := stkbucket(which, 0, stk, true) + bp := b.bp() + + lock(&profBlockLock) + // We want to up-scale the count and cycles according to the + // probability that the event was sampled. For block profile events, + // the sample probability is 1 if cycles >= rate, and cycles / rate + // otherwise. For mutex profile events, the sample probability is 1 / rate. + // We scale the events by 1 / (probability the event was sampled). + if which == blockProfile && cycles < rate { + // Remove sampling bias, see discussion on http://golang.org/cl/299991. + bp.count += float64(rate) / float64(cycles) + bp.cycles += rate + } else if which == mutexProfile { + bp.count += float64(rate) + bp.cycles += rate * cycles + } else { + bp.count++ + bp.cycles += cycles + } + unlock(&profBlockLock) +} + +var mutexprofilerate uint64 // fraction sampled + +// SetMutexProfileFraction controls the fraction of mutex contention events +// that are reported in the mutex profile. On average 1/rate events are +// reported. The previous rate is returned. +// +// To turn off profiling entirely, pass rate 0. +// To just read the current rate, pass rate < 0. +// (For n>1 the details of sampling may change.) +func SetMutexProfileFraction(rate int) int { + if rate < 0 { + return int(mutexprofilerate) + } + old := mutexprofilerate + atomic.Store64(&mutexprofilerate, uint64(rate)) + return int(old) +} + +//go:linkname mutexevent sync.event +func mutexevent(cycles int64, skip int) { + if cycles < 0 { + cycles = 0 + } + rate := int64(atomic.Load64(&mutexprofilerate)) + if rate > 0 && cheaprand64()%rate == 0 { + saveblockevent(cycles, rate, skip+1, mutexProfile) + } +} + +// Go interface to profile data. + +// A StackRecord describes a single execution stack. +type StackRecord struct { + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *StackRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfileRate controls the fraction of memory allocations +// that are recorded and reported in the memory profile. +// The profiler aims to sample an average of +// one allocation per MemProfileRate bytes allocated. +// +// To include every allocated block in the profile, set MemProfileRate to 1. +// To turn off profiling entirely, set MemProfileRate to 0. +// +// The tools that process the memory profiles assume that the +// profile rate is constant across the lifetime of the program +// and equal to the current value. Programs that change the +// memory profiling rate should do so just once, as early as +// possible in the execution of the program (for example, +// at the beginning of main). +var MemProfileRate int = 512 * 1024 + +// disableMemoryProfiling is set by the linker if runtime.MemProfile +// is not used and the link type guarantees nobody else could use it +// elsewhere. +var disableMemoryProfiling bool + +// A MemProfileRecord describes the live objects allocated +// by a particular call sequence (stack trace). +type MemProfileRecord struct { + AllocBytes, FreeBytes int64 // number of bytes allocated, freed + AllocObjects, FreeObjects int64 // number of objects allocated, freed + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). +func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } + +// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). +func (r *MemProfileRecord) InUseObjects() int64 { + return r.AllocObjects - r.FreeObjects +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *MemProfileRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfile returns a profile of memory allocated and freed per allocation +// site. +// +// MemProfile returns n, the number of records in the current memory profile. +// If len(p) >= n, MemProfile copies the profile into p and returns n, true. +// If len(p) < n, MemProfile does not change p and returns n, false. +// +// If inuseZero is true, the profile includes allocation records +// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. +// These are sites where memory was allocated, but it has all +// been released back to the runtime. +// +// The returned profile may be up to two garbage collection cycles old. +// This is to avoid skewing the profile toward allocations; because +// allocations happen in real time but frees are delayed until the garbage +// collector performs sweeping, the profile only accounts for allocations +// that have had a chance to be freed by the garbage collector. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.memprofile flag instead +// of calling MemProfile directly. +func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { + cycle := mProfCycle.read() + // If we're between mProf_NextCycle and mProf_Flush, take care + // of flushing to the active profile so we only have to look + // at the active profile below. + index := cycle % uint32(len(memRecord{}.future)) + lock(&profMemActiveLock) + lock(&profMemFutureLock[index]) + mProf_FlushLocked(index) + unlock(&profMemFutureLock[index]) + clear := true + head := (*bucket)(mbuckets.Load()) + for b := head; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { + n++ + } + if mp.active.allocs != 0 || mp.active.frees != 0 { + clear = false + } + } + if clear { + // Absolutely no data, suggesting that a garbage collection + // has not yet happened. In order to allow profiling when + // garbage collection is disabled from the beginning of execution, + // accumulate all of the cycles, and recount buckets. + n = 0 + for b := head; b != nil; b = b.allnext { + mp := b.mp() + for c := range mp.future { + lock(&profMemFutureLock[c]) + mp.active.add(&mp.future[c]) + mp.future[c] = memRecordCycle{} + unlock(&profMemFutureLock[c]) + } + if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { + n++ + } + } + } + if n <= len(p) { + ok = true + idx := 0 + for b := head; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { + record(&p[idx], b) + idx++ + } + } + } + unlock(&profMemActiveLock) + return +} + +// Write b's data to r. +func record(r *MemProfileRecord, b *bucket) { + mp := b.mp() + r.AllocBytes = int64(mp.active.alloc_bytes) + r.FreeBytes = int64(mp.active.free_bytes) + r.AllocObjects = int64(mp.active.allocs) + r.FreeObjects = int64(mp.active.frees) + if raceenabled { + racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile)) + } + if msanenabled { + msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) + } + if asanenabled { + asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) + } + copy(r.Stack0[:], b.stk()) + for i := int(b.nstk); i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } +} + +func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { + lock(&profMemActiveLock) + head := (*bucket)(mbuckets.Load()) + for b := head; b != nil; b = b.allnext { + mp := b.mp() + fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees) + } + unlock(&profMemActiveLock) +} + +// BlockProfileRecord describes blocking events originated +// at a particular call sequence (stack trace). +type BlockProfileRecord struct { + Count int64 + Cycles int64 + StackRecord +} + +// BlockProfile returns n, the number of records in the current blocking profile. +// If len(p) >= n, BlockProfile copies the profile into p and returns n, true. +// If len(p) < n, BlockProfile does not change p and returns n, false. +// +// Most clients should use the [runtime/pprof] package or +// the [testing] package's -test.blockprofile flag instead +// of calling BlockProfile directly. +func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { + lock(&profBlockLock) + head := (*bucket)(bbuckets.Load()) + for b := head; b != nil; b = b.allnext { + n++ + } + if n <= len(p) { + ok = true + for b := head; b != nil; b = b.allnext { + bp := b.bp() + r := &p[0] + r.Count = int64(bp.count) + // Prevent callers from having to worry about division by zero errors. + // See discussion on http://golang.org/cl/299991. + if r.Count == 0 { + r.Count = 1 + } + r.Cycles = bp.cycles + if raceenabled { + racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile)) + } + if msanenabled { + msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) + } + if asanenabled { + asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) + } + i := copy(r.Stack0[:], b.stk()) + for ; i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } + p = p[1:] + } + } + unlock(&profBlockLock) + return +} + +// MutexProfile returns n, the number of records in the current mutex profile. +// If len(p) >= n, MutexProfile copies the profile into p and returns n, true. +// Otherwise, MutexProfile does not change p, and returns n, false. +// +// Most clients should use the [runtime/pprof] package +// instead of calling MutexProfile directly. +func MutexProfile(p []BlockProfileRecord) (n int, ok bool) { + lock(&profBlockLock) + head := (*bucket)(xbuckets.Load()) + for b := head; b != nil; b = b.allnext { + n++ + } + if n <= len(p) { + ok = true + for b := head; b != nil; b = b.allnext { + bp := b.bp() + r := &p[0] + r.Count = int64(bp.count) + r.Cycles = bp.cycles + i := copy(r.Stack0[:], b.stk()) + for ; i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } + p = p[1:] + } + } + unlock(&profBlockLock) + return +} + +// ThreadCreateProfile returns n, the number of records in the thread creation profile. +// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. +// If len(p) < n, ThreadCreateProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling ThreadCreateProfile directly. +func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { + first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) + for mp := first; mp != nil; mp = mp.alllink { + n++ + } + if n <= len(p) { + ok = true + i := 0 + for mp := first; mp != nil; mp = mp.alllink { + p[i].Stack0 = mp.createstack + i++ + } + } + return +} + +//go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels +func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + return goroutineProfileWithLabels(p, labels) +} + +// labels may be nil. If labels is non-nil, it must have the same length as p. +func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if labels != nil && len(labels) != len(p) { + labels = nil + } + + return goroutineProfileWithLabelsConcurrent(p, labels) +} + +var goroutineProfile = struct { + sema uint32 + active bool + offset atomic.Int64 + records []StackRecord + labels []unsafe.Pointer +}{ + sema: 1, +} + +// goroutineProfileState indicates the status of a goroutine's stack for the +// current in-progress goroutine profile. Goroutines' stacks are initially +// "Absent" from the profile, and end up "Satisfied" by the time the profile is +// complete. While a goroutine's stack is being captured, its +// goroutineProfileState will be "InProgress" and it will not be able to run +// until the capture completes and the state moves to "Satisfied". +// +// Some goroutines (the finalizer goroutine, which at various times can be +// either a "system" or a "user" goroutine, and the goroutine that is +// coordinating the profile, any goroutines created during the profile) move +// directly to the "Satisfied" state. +type goroutineProfileState uint32 + +const ( + goroutineProfileAbsent goroutineProfileState = iota + goroutineProfileInProgress + goroutineProfileSatisfied +) + +type goroutineProfileStateHolder atomic.Uint32 + +func (p *goroutineProfileStateHolder) Load() goroutineProfileState { + return goroutineProfileState((*atomic.Uint32)(p).Load()) +} + +func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) { + (*atomic.Uint32)(p).Store(uint32(value)) +} + +func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool { + return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new)) +} + +func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + semacquire(&goroutineProfile.sema) + + ourg := getg() + + stw := stopTheWorld(stwGoroutineProfile) + // Using gcount while the world is stopped should give us a consistent view + // of the number of live goroutines, minus the number of goroutines that are + // alive and permanently marked as "system". But to make this count agree + // with what we'd get from isSystemGoroutine, we need special handling for + // goroutines that can vary between user and system to ensure that the count + // doesn't change during the collection. So, check the finalizer goroutine + // in particular. + n = int(gcount()) + if fingStatus.Load()&fingRunningFinalizer != 0 { + n++ + } + + if n > len(p) { + // There's not enough space in p to store the whole profile, so (per the + // contract of runtime.GoroutineProfile) we're not allowed to write to p + // at all and must return n, false. + startTheWorld(stw) + semrelease(&goroutineProfile.sema) + return n, false + } + + // Save current goroutine. + sp := getcallersp() + pc := getcallerpc() + systemstack(func() { + saveg(pc, sp, ourg, &p[0]) + }) + if labels != nil { + labels[0] = ourg.labels + } + ourg.goroutineProfiled.Store(goroutineProfileSatisfied) + goroutineProfile.offset.Store(1) + + // Prepare for all other goroutines to enter the profile. Aside from ourg, + // every goroutine struct in the allgs list has its goroutineProfiled field + // cleared. Any goroutine created from this point on (while + // goroutineProfile.active is set) will start with its goroutineProfiled + // field set to goroutineProfileSatisfied. + goroutineProfile.active = true + goroutineProfile.records = p + goroutineProfile.labels = labels + // The finalizer goroutine needs special handling because it can vary over + // time between being a user goroutine (eligible for this profile) and a + // system goroutine (to be excluded). Pick one before restarting the world. + if fing != nil { + fing.goroutineProfiled.Store(goroutineProfileSatisfied) + if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) { + doRecordGoroutineProfile(fing) + } + } + startTheWorld(stw) + + // Visit each goroutine that existed as of the startTheWorld call above. + // + // New goroutines may not be in this list, but we didn't want to know about + // them anyway. If they do appear in this list (via reusing a dead goroutine + // struct, or racing to launch between the world restarting and us getting + // the list), they will already have their goroutineProfiled field set to + // goroutineProfileSatisfied before their state transitions out of _Gdead. + // + // Any goroutine that the scheduler tries to execute concurrently with this + // call will start by adding itself to the profile (before the act of + // executing can cause any changes in its stack). + forEachGRace(func(gp1 *g) { + tryRecordGoroutineProfile(gp1, Gosched) + }) + + stw = stopTheWorld(stwGoroutineProfileCleanup) + endOffset := goroutineProfile.offset.Swap(0) + goroutineProfile.active = false + goroutineProfile.records = nil + goroutineProfile.labels = nil + startTheWorld(stw) + + // Restore the invariant that every goroutine struct in allgs has its + // goroutineProfiled field cleared. + forEachGRace(func(gp1 *g) { + gp1.goroutineProfiled.Store(goroutineProfileAbsent) + }) + + if raceenabled { + raceacquire(unsafe.Pointer(&labelSync)) + } + + if n != int(endOffset) { + // It's a big surprise that the number of goroutines changed while we + // were collecting the profile. But probably better to return a + // truncated profile than to crash the whole process. + // + // For instance, needm moves a goroutine out of the _Gdead state and so + // might be able to change the goroutine count without interacting with + // the scheduler. For code like that, the race windows are small and the + // combination of features is uncommon, so it's hard to be (and remain) + // sure we've caught them all. + } + + semrelease(&goroutineProfile.sema) + return n, true +} + +// tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls +// tryRecordGoroutineProfile. +// +//go:yeswritebarrierrec +func tryRecordGoroutineProfileWB(gp1 *g) { + if getg().m.p.ptr() == nil { + throw("no P available, write barriers are forbidden") + } + tryRecordGoroutineProfile(gp1, osyield) +} + +// tryRecordGoroutineProfile ensures that gp1 has the appropriate representation +// in the current goroutine profile: either that it should not be profiled, or +// that a snapshot of its call stack and labels are now in the profile. +func tryRecordGoroutineProfile(gp1 *g, yield func()) { + if readgstatus(gp1) == _Gdead { + // Dead goroutines should not appear in the profile. Goroutines that + // start while profile collection is active will get goroutineProfiled + // set to goroutineProfileSatisfied before transitioning out of _Gdead, + // so here we check _Gdead first. + return + } + if isSystemGoroutine(gp1, true) { + // System goroutines should not appear in the profile. (The finalizer + // goroutine is marked as "already profiled".) + return + } + + for { + prev := gp1.goroutineProfiled.Load() + if prev == goroutineProfileSatisfied { + // This goroutine is already in the profile (or is new since the + // start of collection, so shouldn't appear in the profile). + break + } + if prev == goroutineProfileInProgress { + // Something else is adding gp1 to the goroutine profile right now. + // Give that a moment to finish. + yield() + continue + } + + // While we have gp1.goroutineProfiled set to + // goroutineProfileInProgress, gp1 may appear _Grunnable but will not + // actually be able to run. Disable preemption for ourselves, to make + // sure we finish profiling gp1 right away instead of leaving it stuck + // in this limbo. + mp := acquirem() + if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) { + doRecordGoroutineProfile(gp1) + gp1.goroutineProfiled.Store(goroutineProfileSatisfied) + } + releasem(mp) + } +} + +// doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress +// goroutine profile. Preemption is disabled. +// +// This may be called via tryRecordGoroutineProfile in two ways: by the +// goroutine that is coordinating the goroutine profile (running on its own +// stack), or from the scheduler in preparation to execute gp1 (running on the +// system stack). +func doRecordGoroutineProfile(gp1 *g) { + if readgstatus(gp1) == _Grunning { + print("doRecordGoroutineProfile gp1=", gp1.goid, "\n") + throw("cannot read stack of running goroutine") + } + + offset := int(goroutineProfile.offset.Add(1)) - 1 + + if offset >= len(goroutineProfile.records) { + // Should be impossible, but better to return a truncated profile than + // to crash the entire process at this point. Instead, deal with it in + // goroutineProfileWithLabelsConcurrent where we have more context. + return + } + + // saveg calls gentraceback, which may call cgo traceback functions. When + // called from the scheduler, this is on the system stack already so + // traceback.go:cgoContextPCs will avoid calling back into the scheduler. + // + // When called from the goroutine coordinating the profile, we still have + // set gp1.goroutineProfiled to goroutineProfileInProgress and so are still + // preventing it from being truly _Grunnable. So we'll use the system stack + // to avoid schedule delays. + systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) }) + + if goroutineProfile.labels != nil { + goroutineProfile.labels[offset] = gp1.labels + } +} + +func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + gp := getg() + + isOK := func(gp1 *g) bool { + // Checking isSystemGoroutine here makes GoroutineProfile + // consistent with both NumGoroutine and Stack. + return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false) + } + + stw := stopTheWorld(stwGoroutineProfile) + + // World is stopped, no locking required. + n = 1 + forEachGRace(func(gp1 *g) { + if isOK(gp1) { + n++ + } + }) + + if n <= len(p) { + ok = true + r, lbl := p, labels + + // Save current goroutine. + sp := getcallersp() + pc := getcallerpc() + systemstack(func() { + saveg(pc, sp, gp, &r[0]) + }) + r = r[1:] + + // If we have a place to put our goroutine labelmap, insert it there. + if labels != nil { + lbl[0] = gp.labels + lbl = lbl[1:] + } + + // Save other goroutines. + forEachGRace(func(gp1 *g) { + if !isOK(gp1) { + return + } + + if len(r) == 0 { + // Should be impossible, but better to return a + // truncated profile than to crash the entire process. + return + } + // saveg calls gentraceback, which may call cgo traceback functions. + // The world is stopped, so it cannot use cgocall (which will be + // blocked at exitsyscall). Do it on the system stack so it won't + // call into the schedular (see traceback.go:cgoContextPCs). + systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) }) + if labels != nil { + lbl[0] = gp1.labels + lbl = lbl[1:] + } + r = r[1:] + }) + } + + if raceenabled { + raceacquire(unsafe.Pointer(&labelSync)) + } + + startTheWorld(stw) + return n, ok +} + +// GoroutineProfile returns n, the number of records in the active goroutine stack profile. +// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. +// If len(p) < n, GoroutineProfile does not change p and returns n, false. +// +// Most clients should use the [runtime/pprof] package instead +// of calling GoroutineProfile directly. +func GoroutineProfile(p []StackRecord) (n int, ok bool) { + + return goroutineProfileWithLabels(p, nil) +} + +func saveg(pc, sp uintptr, gp *g, r *StackRecord) { + var u unwinder + u.initAt(pc, sp, 0, gp, unwindSilentErrors) + n := tracebackPCs(&u, 0, r.Stack0[:]) + if n < len(r.Stack0) { + r.Stack0[n] = 0 + } +} + +// Stack formats a stack trace of the calling goroutine into buf +// and returns the number of bytes written to buf. +// If all is true, Stack formats stack traces of all other goroutines +// into buf after the trace for the current goroutine. +func Stack(buf []byte, all bool) int { + var stw worldStop + if all { + stw = stopTheWorld(stwAllGoroutinesStack) + } + + n := 0 + if len(buf) > 0 { + gp := getg() + sp := getcallersp() + pc := getcallerpc() + systemstack(func() { + g0 := getg() + // Force traceback=1 to override GOTRACEBACK setting, + // so that Stack's results are consistent. + // GOTRACEBACK is only about crash dumps. + g0.m.traceback = 1 + g0.writebuf = buf[0:0:len(buf)] + goroutineheader(gp) + traceback(pc, sp, 0, gp) + if all { + tracebackothers(gp) + } + g0.m.traceback = 0 + n = len(g0.writebuf) + g0.writebuf = nil + }) + } + + if all { + startTheWorld(stw) + } + return n +} + +// Tracing of alloc/free/gc. + +var tracelock mutex + +func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + if typ == nil { + print("tracealloc(", p, ", ", hex(size), ")\n") + } else { + print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n") + } + if gp.m.curg == nil || gp == gp.m.curg { + goroutineheader(gp) + pc := getcallerpc() + sp := getcallersp() + systemstack(func() { + traceback(pc, sp, 0, gp) + }) + } else { + goroutineheader(gp.m.curg) + traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg) + } + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracefree(p unsafe.Pointer, size uintptr) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracefree(", p, ", ", hex(size), ")\n") + goroutineheader(gp) + pc := getcallerpc() + sp := getcallersp() + systemstack(func() { + traceback(pc, sp, 0, gp) + }) + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracegc() { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracegc()\n") + // running on m->g0 stack; show all non-g0 goroutines + tracebackothers(gp) + print("end tracegc\n") + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mranges.go b/platform/dbops/binaries/go/go/src/runtime/mranges.go new file mode 100644 index 0000000000000000000000000000000000000000..6dd1a752473220f0221ddb74ca509aff234fab0e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mranges.go @@ -0,0 +1,460 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Address range data structure. +// +// This file contains an implementation of a data structure which +// manages ordered address ranges. + +package runtime + +import ( + "internal/goarch" + "runtime/internal/atomic" + "unsafe" +) + +// addrRange represents a region of address space. +// +// An addrRange must never span a gap in the address space. +type addrRange struct { + // base and limit together represent the region of address space + // [base, limit). That is, base is inclusive, limit is exclusive. + // These are address over an offset view of the address space on + // platforms with a segmented address space, that is, on platforms + // where arenaBaseOffset != 0. + base, limit offAddr +} + +// makeAddrRange creates a new address range from two virtual addresses. +// +// Throws if the base and limit are not in the same memory segment. +func makeAddrRange(base, limit uintptr) addrRange { + r := addrRange{offAddr{base}, offAddr{limit}} + if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) { + throw("addr range base and limit are not in the same memory segment") + } + return r +} + +// size returns the size of the range represented in bytes. +func (a addrRange) size() uintptr { + if !a.base.lessThan(a.limit) { + return 0 + } + // Subtraction is safe because limit and base must be in the same + // segment of the address space. + return a.limit.diff(a.base) +} + +// contains returns whether or not the range contains a given address. +func (a addrRange) contains(addr uintptr) bool { + return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit) +} + +// subtract takes the addrRange toPrune and cuts out any overlap with +// from, then returns the new range. subtract assumes that a and b +// either don't overlap at all, only overlap on one side, or are equal. +// If b is strictly contained in a, thus forcing a split, it will throw. +func (a addrRange) subtract(b addrRange) addrRange { + if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) { + return addrRange{} + } else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) { + throw("bad prune") + } else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) { + a.base = b.limit + } else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) { + a.limit = b.base + } + return a +} + +// takeFromFront takes len bytes from the front of the address range, aligning +// the base to align first. On success, returns the aligned start of the region +// taken and true. +func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool) { + base := alignUp(a.base.addr(), uintptr(align)) + len + if base > a.limit.addr() { + return 0, false + } + a.base = offAddr{base} + return base - len, true +} + +// takeFromBack takes len bytes from the end of the address range, aligning +// the limit to align after subtracting len. On success, returns the aligned +// start of the region taken and true. +func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool) { + limit := alignDown(a.limit.addr()-len, uintptr(align)) + if a.base.addr() > limit { + return 0, false + } + a.limit = offAddr{limit} + return limit, true +} + +// removeGreaterEqual removes all addresses in a greater than or equal +// to addr and returns the new range. +func (a addrRange) removeGreaterEqual(addr uintptr) addrRange { + if (offAddr{addr}).lessEqual(a.base) { + return addrRange{} + } + if a.limit.lessEqual(offAddr{addr}) { + return a + } + return makeAddrRange(a.base.addr(), addr) +} + +var ( + // minOffAddr is the minimum address in the offset space, and + // it corresponds to the virtual address arenaBaseOffset. + minOffAddr = offAddr{arenaBaseOffset} + + // maxOffAddr is the maximum address in the offset address + // space. It corresponds to the highest virtual address representable + // by the page alloc chunk and heap arena maps. + maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask} +) + +// offAddr represents an address in a contiguous view +// of the address space on systems where the address space is +// segmented. On other systems, it's just a normal address. +type offAddr struct { + // a is just the virtual address, but should never be used + // directly. Call addr() to get this value instead. + a uintptr +} + +// add adds a uintptr offset to the offAddr. +func (l offAddr) add(bytes uintptr) offAddr { + return offAddr{a: l.a + bytes} +} + +// sub subtracts a uintptr offset from the offAddr. +func (l offAddr) sub(bytes uintptr) offAddr { + return offAddr{a: l.a - bytes} +} + +// diff returns the amount of bytes in between the +// two offAddrs. +func (l1 offAddr) diff(l2 offAddr) uintptr { + return l1.a - l2.a +} + +// lessThan returns true if l1 is less than l2 in the offset +// address space. +func (l1 offAddr) lessThan(l2 offAddr) bool { + return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset) +} + +// lessEqual returns true if l1 is less than or equal to l2 in +// the offset address space. +func (l1 offAddr) lessEqual(l2 offAddr) bool { + return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset) +} + +// equal returns true if the two offAddr values are equal. +func (l1 offAddr) equal(l2 offAddr) bool { + // No need to compare in the offset space, it + // means the same thing. + return l1 == l2 +} + +// addr returns the virtual address for this offset address. +func (l offAddr) addr() uintptr { + return l.a +} + +// atomicOffAddr is like offAddr, but operations on it are atomic. +// It also contains operations to be able to store marked addresses +// to ensure that they're not overridden until they've been seen. +type atomicOffAddr struct { + // a contains the offset address, unlike offAddr. + a atomic.Int64 +} + +// Clear attempts to store minOffAddr in atomicOffAddr. It may fail +// if a marked value is placed in the box in the meanwhile. +func (b *atomicOffAddr) Clear() { + for { + old := b.a.Load() + if old < 0 { + return + } + if b.a.CompareAndSwap(old, int64(minOffAddr.addr()-arenaBaseOffset)) { + return + } + } +} + +// StoreMin stores addr if it's less than the current value in the +// offset address space if the current value is not marked. +func (b *atomicOffAddr) StoreMin(addr uintptr) { + new := int64(addr - arenaBaseOffset) + for { + old := b.a.Load() + if old < new { + return + } + if b.a.CompareAndSwap(old, new) { + return + } + } +} + +// StoreUnmark attempts to unmark the value in atomicOffAddr and +// replace it with newAddr. markedAddr must be a marked address +// returned by Load. This function will not store newAddr if the +// box no longer contains markedAddr. +func (b *atomicOffAddr) StoreUnmark(markedAddr, newAddr uintptr) { + b.a.CompareAndSwap(-int64(markedAddr-arenaBaseOffset), int64(newAddr-arenaBaseOffset)) +} + +// StoreMarked stores addr but first converted to the offset address +// space and then negated. +func (b *atomicOffAddr) StoreMarked(addr uintptr) { + b.a.Store(-int64(addr - arenaBaseOffset)) +} + +// Load returns the address in the box as a virtual address. It also +// returns if the value was marked or not. +func (b *atomicOffAddr) Load() (uintptr, bool) { + v := b.a.Load() + wasMarked := false + if v < 0 { + wasMarked = true + v = -v + } + return uintptr(v) + arenaBaseOffset, wasMarked +} + +// addrRanges is a data structure holding a collection of ranges of +// address space. +// +// The ranges are coalesced eagerly to reduce the +// number ranges it holds. +// +// The slice backing store for this field is persistentalloc'd +// and thus there is no way to free it. +// +// addrRanges is not thread-safe. +type addrRanges struct { + // ranges is a slice of ranges sorted by base. + ranges []addrRange + + // totalBytes is the total amount of address space in bytes counted by + // this addrRanges. + totalBytes uintptr + + // sysStat is the stat to track allocations by this type + sysStat *sysMemStat +} + +func (a *addrRanges) init(sysStat *sysMemStat) { + ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges)) + ranges.len = 0 + ranges.cap = 16 + ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat)) + a.sysStat = sysStat + a.totalBytes = 0 +} + +// findSucc returns the first index in a such that addr is +// less than the base of the addrRange at that index. +func (a *addrRanges) findSucc(addr uintptr) int { + base := offAddr{addr} + + // Narrow down the search space via a binary search + // for large addrRanges until we have at most iterMax + // candidates left. + const iterMax = 8 + bot, top := 0, len(a.ranges) + for top-bot > iterMax { + i := int(uint(bot+top) >> 1) + if a.ranges[i].contains(base.addr()) { + // a.ranges[i] contains base, so + // its successor is the next index. + return i + 1 + } + if base.lessThan(a.ranges[i].base) { + // In this case i might actually be + // the successor, but we can't be sure + // until we check the ones before it. + top = i + } else { + // In this case we know base is + // greater than or equal to a.ranges[i].limit-1, + // so i is definitely not the successor. + // We already checked i, so pick the next + // one. + bot = i + 1 + } + } + // There are top-bot candidates left, so + // iterate over them and find the first that + // base is strictly less than. + for i := bot; i < top; i++ { + if base.lessThan(a.ranges[i].base) { + return i + } + } + return top +} + +// findAddrGreaterEqual returns the smallest address represented by a +// that is >= addr. Thus, if the address is represented by a, +// then it returns addr. The second return value indicates whether +// such an address exists for addr in a. That is, if addr is larger than +// any address known to a, the second return value will be false. +func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) { + i := a.findSucc(addr) + if i == 0 { + return a.ranges[0].base.addr(), true + } + if a.ranges[i-1].contains(addr) { + return addr, true + } + if i < len(a.ranges) { + return a.ranges[i].base.addr(), true + } + return 0, false +} + +// contains returns true if a covers the address addr. +func (a *addrRanges) contains(addr uintptr) bool { + i := a.findSucc(addr) + if i == 0 { + return false + } + return a.ranges[i-1].contains(addr) +} + +// add inserts a new address range to a. +// +// r must not overlap with any address range in a and r.size() must be > 0. +func (a *addrRanges) add(r addrRange) { + // The copies in this function are potentially expensive, but this data + // structure is meant to represent the Go heap. At worst, copying this + // would take ~160µs assuming a conservative copying rate of 25 GiB/s (the + // copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB + // arenas which is completely discontiguous. ~160µs is still a lot, but in + // practice most platforms have 64 MiB arenas (which cuts this by a factor + // of 16) and Go heaps are usually mostly contiguous, so the chance that + // an addrRanges even grows to that size is extremely low. + + // An empty range has no effect on the set of addresses represented + // by a, but passing a zero-sized range is almost always a bug. + if r.size() == 0 { + print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n") + throw("attempted to add zero-sized address range") + } + // Because we assume r is not currently represented in a, + // findSucc gives us our insertion index. + i := a.findSucc(r.base.addr()) + coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base) + coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base) + if coalescesUp && coalescesDown { + // We have neighbors and they both border us. + // Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1]. + a.ranges[i-1].limit = a.ranges[i].limit + + // Delete a.ranges[i]. + copy(a.ranges[i:], a.ranges[i+1:]) + a.ranges = a.ranges[:len(a.ranges)-1] + } else if coalescesDown { + // We have a neighbor at a lower address only and it borders us. + // Merge the new space into a.ranges[i-1]. + a.ranges[i-1].limit = r.limit + } else if coalescesUp { + // We have a neighbor at a higher address only and it borders us. + // Merge the new space into a.ranges[i]. + a.ranges[i].base = r.base + } else { + // We may or may not have neighbors which don't border us. + // Add the new range. + if len(a.ranges)+1 > cap(a.ranges) { + // Grow the array. Note that this leaks the old array, but since + // we're doubling we have at most 2x waste. For a 1 TiB heap and + // 4 MiB arenas which are all discontiguous (both very conservative + // assumptions), this would waste at most 4 MiB of memory. + oldRanges := a.ranges + ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges)) + ranges.len = len(oldRanges) + 1 + ranges.cap = cap(oldRanges) * 2 + ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat)) + + // Copy in the old array, but make space for the new range. + copy(a.ranges[:i], oldRanges[:i]) + copy(a.ranges[i+1:], oldRanges[i:]) + } else { + a.ranges = a.ranges[:len(a.ranges)+1] + copy(a.ranges[i+1:], a.ranges[i:]) + } + a.ranges[i] = r + } + a.totalBytes += r.size() +} + +// removeLast removes and returns the highest-addressed contiguous range +// of a, or the last nBytes of that range, whichever is smaller. If a is +// empty, it returns an empty range. +func (a *addrRanges) removeLast(nBytes uintptr) addrRange { + if len(a.ranges) == 0 { + return addrRange{} + } + r := a.ranges[len(a.ranges)-1] + size := r.size() + if size > nBytes { + newEnd := r.limit.sub(nBytes) + a.ranges[len(a.ranges)-1].limit = newEnd + a.totalBytes -= nBytes + return addrRange{newEnd, r.limit} + } + a.ranges = a.ranges[:len(a.ranges)-1] + a.totalBytes -= size + return r +} + +// removeGreaterEqual removes the ranges of a which are above addr, and additionally +// splits any range containing addr. +func (a *addrRanges) removeGreaterEqual(addr uintptr) { + pivot := a.findSucc(addr) + if pivot == 0 { + // addr is before all ranges in a. + a.totalBytes = 0 + a.ranges = a.ranges[:0] + return + } + removed := uintptr(0) + for _, r := range a.ranges[pivot:] { + removed += r.size() + } + if r := a.ranges[pivot-1]; r.contains(addr) { + removed += r.size() + r = r.removeGreaterEqual(addr) + if r.size() == 0 { + pivot-- + } else { + removed -= r.size() + a.ranges[pivot-1] = r + } + } + a.ranges = a.ranges[:pivot] + a.totalBytes -= removed +} + +// cloneInto makes a deep clone of a's state into b, re-using +// b's ranges if able. +func (a *addrRanges) cloneInto(b *addrRanges) { + if len(a.ranges) > cap(b.ranges) { + // Grow the array. + ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges)) + ranges.len = 0 + ranges.cap = cap(a.ranges) + ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat)) + } + b.ranges = b.ranges[:len(a.ranges)] + b.totalBytes = a.totalBytes + copy(b.ranges, a.ranges) +} diff --git a/platform/dbops/binaries/go/go/src/runtime/mranges_test.go b/platform/dbops/binaries/go/go/src/runtime/mranges_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ed439c56c256af7700c0eeeeed1b2362df0d021f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/mranges_test.go @@ -0,0 +1,275 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + . "runtime" + "testing" +) + +func validateAddrRanges(t *testing.T, a *AddrRanges, want ...AddrRange) { + ranges := a.Ranges() + if len(ranges) != len(want) { + t.Errorf("want %v, got %v", want, ranges) + t.Fatal("different lengths") + } + gotTotalBytes := uintptr(0) + wantTotalBytes := uintptr(0) + for i := range ranges { + gotTotalBytes += ranges[i].Size() + wantTotalBytes += want[i].Size() + if ranges[i].Base() >= ranges[i].Limit() { + t.Error("empty range found") + } + // Ensure this is equivalent to what we want. + if !ranges[i].Equals(want[i]) { + t.Errorf("range %d: got [0x%x, 0x%x), want [0x%x, 0x%x)", i, + ranges[i].Base(), ranges[i].Limit(), + want[i].Base(), want[i].Limit(), + ) + } + if i != 0 { + // Ensure the ranges are sorted. + if ranges[i-1].Base() >= ranges[i].Base() { + t.Errorf("ranges %d and %d are out of sorted order", i-1, i) + } + // Check for a failure to coalesce. + if ranges[i-1].Limit() == ranges[i].Base() { + t.Errorf("ranges %d and %d should have coalesced", i-1, i) + } + // Check if any ranges overlap. Because the ranges are sorted + // by base, it's sufficient to just check neighbors. + if ranges[i-1].Limit() > ranges[i].Base() { + t.Errorf("ranges %d and %d overlap", i-1, i) + } + } + } + if wantTotalBytes != gotTotalBytes { + t.Errorf("expected %d total bytes, got %d", wantTotalBytes, gotTotalBytes) + } + if b := a.TotalBytes(); b != gotTotalBytes { + t.Errorf("inconsistent total bytes: want %d, got %d", gotTotalBytes, b) + } + if t.Failed() { + t.Errorf("addrRanges: %v", ranges) + t.Fatal("detected bad addrRanges") + } +} + +func TestAddrRangesAdd(t *testing.T) { + a := NewAddrRanges() + + // First range. + a.Add(MakeAddrRange(512, 1024)) + validateAddrRanges(t, &a, + MakeAddrRange(512, 1024), + ) + + // Coalesce up. + a.Add(MakeAddrRange(1024, 2048)) + validateAddrRanges(t, &a, + MakeAddrRange(512, 2048), + ) + + // Add new independent range. + a.Add(MakeAddrRange(4096, 8192)) + validateAddrRanges(t, &a, + MakeAddrRange(512, 2048), + MakeAddrRange(4096, 8192), + ) + + // Coalesce down. + a.Add(MakeAddrRange(3776, 4096)) + validateAddrRanges(t, &a, + MakeAddrRange(512, 2048), + MakeAddrRange(3776, 8192), + ) + + // Coalesce up and down. + a.Add(MakeAddrRange(2048, 3776)) + validateAddrRanges(t, &a, + MakeAddrRange(512, 8192), + ) + + // Push a bunch of independent ranges to the end to try and force growth. + expectedRanges := []AddrRange{MakeAddrRange(512, 8192)} + for i := uintptr(0); i < 64; i++ { + dRange := MakeAddrRange(8192+(i+1)*2048, 8192+(i+1)*2048+10) + a.Add(dRange) + expectedRanges = append(expectedRanges, dRange) + validateAddrRanges(t, &a, expectedRanges...) + } + + // Push a bunch of independent ranges to the beginning to try and force growth. + var bottomRanges []AddrRange + for i := uintptr(0); i < 63; i++ { + dRange := MakeAddrRange(8+i*8, 8+i*8+4) + a.Add(dRange) + bottomRanges = append(bottomRanges, dRange) + validateAddrRanges(t, &a, append(bottomRanges, expectedRanges...)...) + } +} + +func TestAddrRangesFindSucc(t *testing.T) { + var large []AddrRange + for i := 0; i < 100; i++ { + large = append(large, MakeAddrRange(5+uintptr(i)*5, 5+uintptr(i)*5+3)) + } + + type testt struct { + name string + base uintptr + expect int + ranges []AddrRange + } + tests := []testt{ + { + name: "Empty", + base: 12, + expect: 0, + ranges: []AddrRange{}, + }, + { + name: "OneBefore", + base: 12, + expect: 0, + ranges: []AddrRange{ + MakeAddrRange(14, 16), + }, + }, + { + name: "OneWithin", + base: 14, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(14, 16), + }, + }, + { + name: "OneAfterLimit", + base: 16, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(14, 16), + }, + }, + { + name: "OneAfter", + base: 17, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(14, 16), + }, + }, + { + name: "ThreeBefore", + base: 3, + expect: 0, + ranges: []AddrRange{ + MakeAddrRange(6, 10), + MakeAddrRange(12, 16), + MakeAddrRange(19, 22), + }, + }, + { + name: "ThreeAfter", + base: 24, + expect: 3, + ranges: []AddrRange{ + MakeAddrRange(6, 10), + MakeAddrRange(12, 16), + MakeAddrRange(19, 22), + }, + }, + { + name: "ThreeBetween", + base: 11, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(6, 10), + MakeAddrRange(12, 16), + MakeAddrRange(19, 22), + }, + }, + { + name: "ThreeWithin", + base: 9, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(6, 10), + MakeAddrRange(12, 16), + MakeAddrRange(19, 22), + }, + }, + { + name: "Zero", + base: 0, + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(0, 10), + }, + }, + { + name: "Max", + base: ^uintptr(0), + expect: 1, + ranges: []AddrRange{ + MakeAddrRange(^uintptr(0)-5, ^uintptr(0)), + }, + }, + { + name: "LargeBefore", + base: 2, + expect: 0, + ranges: large, + }, + { + name: "LargeAfter", + base: 5 + uintptr(len(large))*5 + 30, + expect: len(large), + ranges: large, + }, + { + name: "LargeBetweenLow", + base: 14, + expect: 2, + ranges: large, + }, + { + name: "LargeBetweenHigh", + base: 249, + expect: 49, + ranges: large, + }, + { + name: "LargeWithinLow", + base: 25, + expect: 5, + ranges: large, + }, + { + name: "LargeWithinHigh", + base: 396, + expect: 79, + ranges: large, + }, + { + name: "LargeWithinMiddle", + base: 250, + expect: 50, + ranges: large, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + a := MakeAddrRanges(test.ranges...) + i := a.FindSucc(test.base) + if i != test.expect { + t.Fatalf("expected %d, got %d", test.expect, i) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/runtime/msan.go b/platform/dbops/binaries/go/go/src/runtime/msan.go new file mode 100644 index 0000000000000000000000000000000000000000..5e2aae1bd1616e312e3d61ccde0e89e28d0acf74 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/runtime/msan.go @@ -0,0 +1,62 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build msan + +package runtime + +import ( + "unsafe" +) + +// Public memory sanitizer API. + +func MSanRead(addr unsafe.Pointer, len int) { + msanread(addr, uintptr(len)) +} + +func MSanWrite(addr unsafe.Pointer, len int) { + msanwrite(addr, uintptr(len)) +} + +// Private interface for the runtime. +const msanenabled = true + +// If we are running on the system stack, the C program may have +// marked part of that stack as uninitialized. We don't instrument +// the runtime, but operations like a slice copy can call msanread +// anyhow for values on the stack. Just ignore msanread when running +// on the system stack. The other msan functions are fine. +// +//go:nosplit +func msanread(addr unsafe.Pointer, sz uintptr) { + gp := getg() + if gp == nil || gp.m == nil || gp == gp.m.g0 || gp == gp.m.gsignal { + return + } + domsanread(addr, sz) +} + +//go:noescape +func domsanread(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func msanwrite(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func msanmalloc(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func msanfree(addr unsafe.Pointer, sz uintptr) + +//go:noescape +func msanmove(dst, src unsafe.Pointer, sz uintptr) + +// These are called from msan_GOARCH.s +// +//go:cgo_import_static __msan_read_go +//go:cgo_import_static __msan_write_go +//go:cgo_import_static __msan_malloc_go +//go:cgo_import_static __msan_free_go +//go:cgo_import_static __msan_memmove